From 2e95805e2d474b85c837ec433e64947782039047 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?colin=20axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Thu, 17 Jun 2021 13:43:56 +0200 Subject: [PATCH] Add in-place and genesis migrations (#205) * add in-place migrations Prunes solomachines and expired tendermint consensus states via an x/upgrade * update migrations fix iteration bug remove solo machine connections remove solo machine channels * migrate solomachine from v1 to v2 during in place migration Regenerate v1 solo machine definition in 02-client legacy Migrate from v1 to v2 solo machine client state Prune v1 solo machine consensus states * fix build * add genesis migration * code cleanup * add store migration test for expired tendermint consensus states * finish adding in place migration store tests * add genesis test for solo machines * fix genesis migration bug, add tendermint tests * test fix, changelog, migration docs * Apply suggestions from code review * Update docs/migrations/ibc-migration-043.md * apply Aditya's review suggestions * fix tests * add genesis json unmarshal test Test that the legacy solo machines can be successfully unmarshalled. This requires registering an implementation for the legacy solo machine. An implemenation which panics has been added. This implementation should only be registered against a clientCtx during a migrate cli cmd. The implementation is only briefly used in order to decode the previous solo machine set in genesis. * add migration support for max expected time per block * fix docs * fix bug found by Aditya The genesis client metadata was being set independently for each unexpired height. It needed to be moved outside the unexpired for loop * remove unnecessary code * apply Aditya review suggestions, fix bug There was a bug in adding consensus metadata since it relied on the iteration key not yet set. This is fixed by using traditional iteration using the consensus state key, setting metadata for all consensus states, and then pruning expired consensus states. The store test has been updated to set create two tendermint clients Co-authored-by: Aditya --- CHANGELOG.md | 1 + docs/ibc/proto-docs.md | 340 +- docs/migrations/ibc-migration-043.md | 60 + go.mod | 1 + modules/core/02-client/keeper/migrations.go | 27 + modules/core/02-client/legacy/v100/genesis.go | 153 + .../02-client/legacy/v100/genesis_test.go | 311 ++ .../core/02-client/legacy/v100/solomachine.go | 208 + .../02-client/legacy/v100/solomachine.pb.go | 4121 +++++++++++++++++ modules/core/02-client/legacy/v100/store.go | 180 + .../core/02-client/legacy/v100/store_test.go | 231 + .../core/03-connection/types/connection.pb.go | 6 +- modules/core/03-connection/types/params.go | 2 +- modules/core/exported/client.go | 3 +- modules/core/keeper/migrations.go | 32 + modules/core/legacy/v100/genesis.go | 54 + modules/core/legacy/v100/genesis_test.go | 178 + modules/core/module.go | 6 +- .../07-tendermint/types/store.go | 36 + .../07-tendermint/types/tendermint.pb.go | 4 +- .../07-tendermint/types/update.go | 1 - .../07-tendermint/types/update_test.go | 12 + proto/ibc/core/connection/v1/connection.proto | 6 +- .../solomachine/v1/solomachine.proto | 189 + 24 files changed, 6148 insertions(+), 14 deletions(-) create mode 100644 modules/core/02-client/keeper/migrations.go create mode 100644 modules/core/02-client/legacy/v100/genesis.go create mode 100644 modules/core/02-client/legacy/v100/genesis_test.go create mode 100644 modules/core/02-client/legacy/v100/solomachine.go create mode 100644 modules/core/02-client/legacy/v100/solomachine.pb.go create mode 100644 modules/core/02-client/legacy/v100/store.go create mode 100644 modules/core/02-client/legacy/v100/store_test.go create mode 100644 modules/core/keeper/migrations.go create mode 100644 modules/core/legacy/v100/genesis.go create mode 100644 modules/core/legacy/v100/genesis_test.go create mode 100644 proto/ibc/lightclients/solomachine/v1/solomachine.proto diff --git a/CHANGELOG.md b/CHANGELOG.md index b8bb91dd822..c4420fb805b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,6 +72,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ ### Improvements +* (core/02-client) [\#205](https://github.com/cosmos/ibc-go/pull/205) Add in-place and genesis migrations from SDK v0.42.0 to ibc-go v1.0.0. Solo machine protobuf defintions are migrated from v1 to v2. All solo machine consensus states are pruned. All expired tendermint consensus states are pruned. * (modules/core) [\#184](https://github.com/cosmos/ibc-go/pull/184) Improve error messages. Uses unique error codes to indicate already relayed packets. * (07-tendermint) [\#182](https://github.com/cosmos/ibc-go/pull/182) Remove duplicate checks in upgrade logic. * (modules/core/04-channel) [\#7949](https://github.com/cosmos/cosmos-sdk/issues/7949) Standardized channel `Acknowledgement` moved to its own file. Codec registration redundancy removed. diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md index 701d4684940..cae068cf68c 100644 --- a/docs/ibc/proto-docs.md +++ b/docs/ibc/proto-docs.md @@ -195,6 +195,26 @@ - [ibc/lightclients/localhost/v1/localhost.proto](#ibc/lightclients/localhost/v1/localhost.proto) - [ClientState](#ibc.lightclients.localhost.v1.ClientState) +- [ibc/lightclients/solomachine/v1/solomachine.proto](#ibc/lightclients/solomachine/v1/solomachine.proto) + - [ChannelStateData](#ibc.lightclients.solomachine.v1.ChannelStateData) + - [ClientState](#ibc.lightclients.solomachine.v1.ClientState) + - [ClientStateData](#ibc.lightclients.solomachine.v1.ClientStateData) + - [ConnectionStateData](#ibc.lightclients.solomachine.v1.ConnectionStateData) + - [ConsensusState](#ibc.lightclients.solomachine.v1.ConsensusState) + - [ConsensusStateData](#ibc.lightclients.solomachine.v1.ConsensusStateData) + - [Header](#ibc.lightclients.solomachine.v1.Header) + - [HeaderData](#ibc.lightclients.solomachine.v1.HeaderData) + - [Misbehaviour](#ibc.lightclients.solomachine.v1.Misbehaviour) + - [NextSequenceRecvData](#ibc.lightclients.solomachine.v1.NextSequenceRecvData) + - [PacketAcknowledgementData](#ibc.lightclients.solomachine.v1.PacketAcknowledgementData) + - [PacketCommitmentData](#ibc.lightclients.solomachine.v1.PacketCommitmentData) + - [PacketReceiptAbsenceData](#ibc.lightclients.solomachine.v1.PacketReceiptAbsenceData) + - [SignBytes](#ibc.lightclients.solomachine.v1.SignBytes) + - [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData) + - [TimestampedSignatureData](#ibc.lightclients.solomachine.v1.TimestampedSignatureData) + + - [DataType](#ibc.lightclients.solomachine.v1.DataType) + - [ibc/lightclients/solomachine/v2/solomachine.proto](#ibc/lightclients/solomachine/v2/solomachine.proto) - [ChannelStateData](#ibc.lightclients.solomachine.v2.ChannelStateData) - [ClientState](#ibc.lightclients.solomachine.v2.ClientState) @@ -2405,7 +2425,7 @@ Params defines the set of Connection parameters. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| `max_expected_time_per_block` | [uint64](#uint64) | | maximum expected time per block, used to enforce block delay. This parameter should reflect the largest amount of time that the chain might reasonably take to produce the next block under normal operating conditions. A safe choice is 3-5x the expected time per block. | +| `max_expected_time_per_block` | [uint64](#uint64) | | maximum expected time per block (in nanoseconds), used to enforce block delay. This parameter should reflect the largest amount of time that the chain might reasonably take to produce the next block under normal operating conditions. A safe choice is 3-5x the expected time per block. | @@ -2919,6 +2939,324 @@ access to keys outside the client prefix. + +

Top

+ +## ibc/lightclients/solomachine/v1/solomachine.proto + + + + + +### ChannelStateData +ChannelStateData returns the SignBytes data for channel state +verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `channel` | [ibc.core.channel.v1.Channel](#ibc.core.channel.v1.Channel) | | | + + + + + + + + +### ClientState +ClientState defines a solo machine client that tracks the current consensus +state and if the client is frozen. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `sequence` | [uint64](#uint64) | | latest sequence of the client state | +| `frozen_sequence` | [uint64](#uint64) | | frozen sequence of the solo machine | +| `consensus_state` | [ConsensusState](#ibc.lightclients.solomachine.v1.ConsensusState) | | | +| `allow_update_after_proposal` | [bool](#bool) | | when set to true, will allow governance to update a solo machine client. The client will be unfrozen if it is frozen. | + + + + + + + + +### ClientStateData +ClientStateData returns the SignBytes data for client state verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `client_state` | [google.protobuf.Any](#google.protobuf.Any) | | | + + + + + + + + +### ConnectionStateData +ConnectionStateData returns the SignBytes data for connection state +verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `connection` | [ibc.core.connection.v1.ConnectionEnd](#ibc.core.connection.v1.ConnectionEnd) | | | + + + + + + + + +### ConsensusState +ConsensusState defines a solo machine consensus state. The sequence of a +consensus state is contained in the "height" key used in storing the +consensus state. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `public_key` | [google.protobuf.Any](#google.protobuf.Any) | | public key of the solo machine | +| `diversifier` | [string](#string) | | diversifier allows the same public key to be re-used across different solo machine clients (potentially on different chains) without being considered misbehaviour. | +| `timestamp` | [uint64](#uint64) | | | + + + + + + + + +### ConsensusStateData +ConsensusStateData returns the SignBytes data for consensus state +verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `consensus_state` | [google.protobuf.Any](#google.protobuf.Any) | | | + + + + + + + + +### Header +Header defines a solo machine consensus header + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `sequence` | [uint64](#uint64) | | sequence to update solo machine public key at | +| `timestamp` | [uint64](#uint64) | | | +| `signature` | [bytes](#bytes) | | | +| `new_public_key` | [google.protobuf.Any](#google.protobuf.Any) | | | +| `new_diversifier` | [string](#string) | | | + + + + + + + + +### HeaderData +HeaderData returns the SignBytes data for update verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `new_pub_key` | [google.protobuf.Any](#google.protobuf.Any) | | header public key | +| `new_diversifier` | [string](#string) | | header diversifier | + + + + + + + + +### Misbehaviour +Misbehaviour defines misbehaviour for a solo machine which consists +of a sequence and two signatures over different messages at that sequence. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `client_id` | [string](#string) | | | +| `sequence` | [uint64](#uint64) | | | +| `signature_one` | [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData) | | | +| `signature_two` | [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData) | | | + + + + + + + + +### NextSequenceRecvData +NextSequenceRecvData returns the SignBytes data for verification of the next +sequence to be received. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `next_seq_recv` | [uint64](#uint64) | | | + + + + + + + + +### PacketAcknowledgementData +PacketAcknowledgementData returns the SignBytes data for acknowledgement +verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `acknowledgement` | [bytes](#bytes) | | | + + + + + + + + +### PacketCommitmentData +PacketCommitmentData returns the SignBytes data for packet commitment +verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | +| `commitment` | [bytes](#bytes) | | | + + + + + + + + +### PacketReceiptAbsenceData +PacketReceiptAbsenceData returns the SignBytes data for +packet receipt absence verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `path` | [bytes](#bytes) | | | + + + + + + + + +### SignBytes +SignBytes defines the signed bytes used for signature verification. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `sequence` | [uint64](#uint64) | | | +| `timestamp` | [uint64](#uint64) | | | +| `diversifier` | [string](#string) | | | +| `data_type` | [DataType](#ibc.lightclients.solomachine.v1.DataType) | | type of the data used | +| `data` | [bytes](#bytes) | | marshaled data | + + + + + + + + +### SignatureAndData +SignatureAndData contains a signature and the data signed over to create that +signature. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `signature` | [bytes](#bytes) | | | +| `data_type` | [DataType](#ibc.lightclients.solomachine.v1.DataType) | | | +| `data` | [bytes](#bytes) | | | +| `timestamp` | [uint64](#uint64) | | | + + + + + + + + +### TimestampedSignatureData +TimestampedSignatureData contains the signature data and the timestamp of the +signature. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `signature_data` | [bytes](#bytes) | | | +| `timestamp` | [uint64](#uint64) | | | + + + + + + + + + + +### DataType +DataType defines the type of solo machine proof being created. This is done +to preserve uniqueness of different data sign byte encodings. + +| Name | Number | Description | +| ---- | ------ | ----------- | +| DATA_TYPE_UNINITIALIZED_UNSPECIFIED | 0 | Default State | +| DATA_TYPE_CLIENT_STATE | 1 | Data type for client state verification | +| DATA_TYPE_CONSENSUS_STATE | 2 | Data type for consensus state verification | +| DATA_TYPE_CONNECTION_STATE | 3 | Data type for connection state verification | +| DATA_TYPE_CHANNEL_STATE | 4 | Data type for channel state verification | +| DATA_TYPE_PACKET_COMMITMENT | 5 | Data type for packet commitment verification | +| DATA_TYPE_PACKET_ACKNOWLEDGEMENT | 6 | Data type for packet acknowledgement verification | +| DATA_TYPE_PACKET_RECEIPT_ABSENCE | 7 | Data type for packet receipt absence verification | +| DATA_TYPE_NEXT_SEQUENCE_RECV | 8 | Data type for next sequence recv verification | +| DATA_TYPE_HEADER | 9 | Data type for header verification | + + + + + + + + + +

Top

diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md index 239900c179b..82154645013 100644 --- a/docs/migrations/ibc-migration-043.md +++ b/docs/migrations/ibc-migration-043.md @@ -27,6 +27,66 @@ Feel free to use your own method for modifying import names. NOTE: Updating to the `v0.43.0` SDK release and then running `go mod tidy` will cause a downgrade to `v0.42.0` in order to support the old IBC import paths. Update the import paths before running `go mod tidy`. +## Chain Upgrades + +Chains may choose to upgrade via an upgrade proposal or genesis upgrades. Both in-place store migrations and genesis migrations are supported. + +**WARNING**: Please read at least the quick guide for [IBC client upgrades](../ibc/upgrades/README.md) before upgrading your chain. It is highly recommended you do not change the chain-ID during an upgrade, otherwise you must follow the IBC client upgrade instructions. + +Both in-place store migrations and genesis migrations will: +- migrate the solo machine client state from v1 to v2 protobuf definitions +- prune all solo machine consensus states +- prune all expired tendermint consensus states + +Chains must set a new connection parameter during either in place store migrations or genesis migration. The new parameter, max expected block time, is used to enforce packet processing delays on the receiving end of an IBC packet flow. Checkout the [docs](https://github.com/cosmos/ibc-go/blob/release/v1.0.x/docs/ibc/proto-docs.md#params-2) for more information. + +### In-Place Store Migrations + +The new chain binary will need to run migrations in the upgrade handler. The fromVM (previous module version) for the IBC module should be 1. This will allow migrations to be run for IBC updating the version from 1 to 2. + +Ex: +```go +app.UpgradeKeeper.SetUpgradeHandler("my-upgrade-proposal", + func(ctx sdk.Context, _ upgradetypes.Plan, _ module.VersionMap) (module.VersionMap, error) { + // set max expected block time parameter. Replace the default with your expected value + // https://github.com/cosmos/ibc-go/blob/release/v1.0.x/docs/ibc/proto-docs.md#params-2 + app.IBCKeeper.ConnectionKeeper.SetParams(ctx, ibcconnectiontypes.DefaultParams()) + + fromVM := map[string]uint64{ + ... // other modules + "ibc": 1, + ... + } + return app.mm.RunMigrations(ctx, app.configurator, fromVM) + }) + +``` + +### Genesis Migrations + +To perform genesis migrations, the following code must be added to your existing migration code. + +```go +// add imports as necessary +import ( + ibcv100 "github.com/cosmos/ibc-go/modules/core/legacy/v100" + ibchost "github.com/cosmos/ibc-go/modules/core/24-host" +) + +... + +// add in migrate cmd function +// expectedTimePerBlock is a new connection parameter +// https://github.com/cosmos/ibc-go/blob/release/v1.0.x/docs/ibc/proto-docs.md#params-2 +newGenState, err = ibcv100.MigrateGenesis(newGenState, clientCtx, *genDoc, expectedTimePerBlock) +if err != nil { + return err +} +``` + +**NOTE:** The genesis chain-id, time and height MUST be updated before migrating IBC, otherwise the tendermint consensus state will not be pruned. + + ## IBC Keeper Changes The IBC Keeper now takes in the Upgrade Keeper. Please add the chains' Upgrade Keeper after the Staking Keeper: diff --git a/go.mod b/go.mod index 233f26b718e..f4100fcb9c5 100644 --- a/go.mod +++ b/go.mod @@ -22,4 +22,5 @@ require ( github.com/tendermint/tm-db v0.6.4 google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f google.golang.org/grpc v1.37.0 + google.golang.org/protobuf v1.26.0 ) diff --git a/modules/core/02-client/keeper/migrations.go b/modules/core/02-client/keeper/migrations.go new file mode 100644 index 00000000000..5f2088d6ccc --- /dev/null +++ b/modules/core/02-client/keeper/migrations.go @@ -0,0 +1,27 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + v100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +// This migration +// - migrates solo machine client states from v1 to v2 protobuf definition +// - prunes solo machine consensus states +// - prunes expired tendermint consensus states +// - adds iteration and processed height keys for unexpired tendermint consensus states +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + return v100.MigrateStore(ctx, m.keeper.storeKey, m.keeper.cdc) +} diff --git a/modules/core/02-client/legacy/v100/genesis.go b/modules/core/02-client/legacy/v100/genesis.go new file mode 100644 index 00000000000..65aa4210e13 --- /dev/null +++ b/modules/core/02-client/legacy/v100/genesis.go @@ -0,0 +1,153 @@ +package v100 + +import ( + "bytes" + "time" + + "github.com/cosmos/cosmos-sdk/codec" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + "github.com/cosmos/ibc-go/modules/core/02-client/types" + host "github.com/cosmos/ibc-go/modules/core/24-host" + "github.com/cosmos/ibc-go/modules/core/exported" + ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" +) + +// MigrateGenesis accepts exported v1.0.0 IBC client genesis file and migrates it to: +// +// - Update solo machine client state protobuf definition (v1 to v2) +// - Remove all solo machine consensus states +// - Remove all expired tendermint consensus states +// - Adds ProcessedHeight and Iteration keys for unexpired tendermint consensus states +func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, genesisBlockTime time.Time, selfHeight exported.Height) (*types.GenesisState, error) { + // To prune the consensus states, we will create new clientsConsensus + // and clientsMetadata. These slices will be filled up with consensus states + // which should not be pruned. No solo machine consensus states should be added + // and only unexpired consensus states for tendermint clients will be added. + // The metadata keys for unexpired consensus states will be added to clientsMetadata + var ( + clientsConsensus []types.ClientConsensusStates + clientsMetadata []types.IdentifiedGenesisMetadata + ) + + for i, client := range clientGenState.Clients { + clientType, _, err := types.ParseClientIdentifier(client.ClientId) + if err != nil { + return nil, err + } + + // update solo machine client state defintions + if clientType == exported.Solomachine { + clientState := &ClientState{} + if err := cdc.Unmarshal(client.ClientState.Value, clientState); err != nil { + return nil, sdkerrors.Wrap(err, "failed to unmarshal client state bytes into solo machine client state") + } + + updatedClientState := migrateSolomachine(clientState) + + any, err := types.PackClientState(updatedClientState) + if err != nil { + return nil, err + } + + clientGenState.Clients[i] = types.IdentifiedClientState{ + ClientId: client.ClientId, + ClientState: any, + } + } + + // iterate consensus states by client + for _, clientConsensusStates := range clientGenState.ClientsConsensus { + // look for consensus states for the current client + if clientConsensusStates.ClientId == client.ClientId { + switch clientType { + case exported.Solomachine: + // remove all consensus states for the solo machine + // do not add to new clientsConsensus + + case exported.Tendermint: + // only add non expired consensus states to new clientsConsensus + tmClientState, ok := client.ClientState.GetCachedValue().(*ibctmtypes.ClientState) + if !ok { + return nil, types.ErrInvalidClient + } + + // collect unexpired consensus states + var unexpiredConsensusStates []types.ConsensusStateWithHeight + for _, consState := range clientConsensusStates.ConsensusStates { + tmConsState := consState.ConsensusState.GetCachedValue().(*ibctmtypes.ConsensusState) + if !tmClientState.IsExpired(tmConsState.Timestamp, genesisBlockTime) { + unexpiredConsensusStates = append(unexpiredConsensusStates, consState) + } + } + + // if we found at least one unexpired consensus state, create a clientConsensusState + // and add it to clientsConsensus + if len(unexpiredConsensusStates) != 0 { + clientsConsensus = append(clientsConsensus, types.ClientConsensusStates{ + ClientId: client.ClientId, + ConsensusStates: unexpiredConsensusStates, + }) + } + + // collect metadata for unexpired consensus states + var clientMetadata []types.GenesisMetadata + + // remove all expired tendermint consensus state metadata by adding only + // unexpired consensus state metadata + for _, consState := range unexpiredConsensusStates { + for _, identifiedGenMetadata := range clientGenState.ClientsMetadata { + // look for metadata for current client + if identifiedGenMetadata.ClientId == client.ClientId { + + // obtain height for consensus state being pruned + height := consState.Height + + // iterate through metadata and find metadata for current unexpired height + // only unexpired consensus state metadata should be added + for _, metadata := range identifiedGenMetadata.ClientMetadata { + // the previous version of IBC only contained the processed time metadata + // if we find the processed time metadata for an unexpired height, add the + // iteration key and processed height keys. + if bytes.Equal(metadata.Key, ibctmtypes.ProcessedTimeKey(height)) { + clientMetadata = append(clientMetadata, + // set the processed height using the current self height + // this is safe, it may cause delays in packet processing if there + // is a non zero connection delay time + types.GenesisMetadata{ + Key: ibctmtypes.ProcessedHeightKey(height), + Value: []byte(selfHeight.String()), + }, + metadata, // processed time + types.GenesisMetadata{ + Key: ibctmtypes.IterationKey(height), + Value: host.ConsensusStateKey(height), + }) + + } + } + + } + } + + } + + // if we have metadata for unexipred consensus states, add it to consensusMetadata + if len(clientMetadata) != 0 { + clientsMetadata = append(clientsMetadata, types.IdentifiedGenesisMetadata{ + ClientId: client.ClientId, + ClientMetadata: clientMetadata, + }) + } + + default: + break + } + } + } + } + + clientGenState.ClientsConsensus = clientsConsensus + clientGenState.ClientsMetadata = clientsMetadata + return clientGenState, nil +} diff --git a/modules/core/02-client/legacy/v100/genesis_test.go b/modules/core/02-client/legacy/v100/genesis_test.go new file mode 100644 index 00000000000..0c3235c6582 --- /dev/null +++ b/modules/core/02-client/legacy/v100/genesis_test.go @@ -0,0 +1,311 @@ +package v100_test + +import ( + "bytes" + "encoding/json" + "time" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + + ibcclient "github.com/cosmos/ibc-go/modules/core/02-client" + v100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" + "github.com/cosmos/ibc-go/modules/core/02-client/types" + host "github.com/cosmos/ibc-go/modules/core/24-host" + "github.com/cosmos/ibc-go/modules/core/exported" + ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/ibc-go/testing" + "github.com/cosmos/ibc-go/testing/simapp" +) + +func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() { + path := ibctesting.NewPath(suite.chainA, suite.chainB) + encodingConfig := simapp.MakeTestEncodingConfig() + clientCtx := client.Context{}. + WithInterfaceRegistry(encodingConfig.InterfaceRegistry). + WithTxConfig(encodingConfig.TxConfig). + WithJSONCodec(encodingConfig.Marshaler) + + // create multiple legacy solo machine clients + solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-0", "testing", 1) + solomachineMulti := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4) + + // create tendermint clients + suite.coordinator.SetupClients(path) + err := path.EndpointA.UpdateClient() + suite.Require().NoError(err) + clientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) + + // manually generate old proto buf definitions and set in genesis + // NOTE: we cannot use 'ExportGenesis' for the solo machines since we are + // using client states and consensus states which do not implement the exported.ClientState + // and exported.ConsensusState interface + var clients []types.IdentifiedClientState + for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} { + clientState := sm.ClientState() + + var seq uint64 + if clientState.IsFrozen { + seq = 1 + } + + // generate old client state proto defintion + legacyClientState := &v100.ClientState{ + Sequence: clientState.Sequence, + FrozenSequence: seq, + ConsensusState: &v100.ConsensusState{ + PublicKey: clientState.ConsensusState.PublicKey, + Diversifier: clientState.ConsensusState.Diversifier, + Timestamp: clientState.ConsensusState.Timestamp, + }, + AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal, + } + + // set client state + any, err := codectypes.NewAnyWithValue(legacyClientState) + suite.Require().NoError(err) + suite.Require().NotNil(any) + client := types.IdentifiedClientState{ + ClientId: sm.ClientID, + ClientState: any, + } + clients = append(clients, client) + + // set in store for ease of determining expected genesis + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(path.EndpointA.Chain.GetContext(), sm.ClientID) + bz, err := path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState) + suite.Require().NoError(err) + clientStore.Set(host.ClientStateKey(), bz) + + // set some consensus states + height1 := types.NewHeight(0, 1) + height2 := types.NewHeight(1, 2) + height3 := types.NewHeight(0, 123) + + any, err = codectypes.NewAnyWithValue(legacyClientState.ConsensusState) + suite.Require().NoError(err) + suite.Require().NotNil(any) + consensusState1 := types.ConsensusStateWithHeight{ + Height: height1, + ConsensusState: any, + } + consensusState2 := types.ConsensusStateWithHeight{ + Height: height2, + ConsensusState: any, + } + consensusState3 := types.ConsensusStateWithHeight{ + Height: height3, + ConsensusState: any, + } + + clientConsensusState := types.ClientConsensusStates{ + ClientId: sm.ClientID, + ConsensusStates: []types.ConsensusStateWithHeight{consensusState1, consensusState2, consensusState3}, + } + + clientGenState.ClientsConsensus = append(clientGenState.ClientsConsensus, clientConsensusState) + + // set in store for ease of determining expected genesis + bz, err = path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState.ConsensusState) + suite.Require().NoError(err) + clientStore.Set(host.ConsensusStateKey(height1), bz) + clientStore.Set(host.ConsensusStateKey(height2), bz) + clientStore.Set(host.ConsensusStateKey(height3), bz) + } + // solo machine clients must come before tendermint in expected + clientGenState.Clients = append(clients, clientGenState.Clients...) + + // migrate store get expected genesis + // store migration and genesis migration should produce identical results + err = v100.MigrateStore(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path.EndpointA.Chain.App.AppCodec()) + suite.Require().NoError(err) + expectedClientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) + + // NOTE: genesis time isn't updated since we aren't testing for tendermint consensus state pruning + migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime, types.GetSelfHeight(suite.chainA.GetContext())) + suite.Require().NoError(err) + + // 'ExportGenesis' order metadata keys by processedheight, processedtime for all heights, then it appends all iteration keys + // In order to match the genesis migration with export genesis (from store migrations) we must reorder the iteration keys to be last + // This isn't ideal, but it is better than modifying the genesis migration from a previous version to match the export genesis of a new version + // which provides no benefit except nicer testing + for i, clientMetadata := range migrated.ClientsMetadata { + var updatedMetadata []types.GenesisMetadata + var iterationKeys []types.GenesisMetadata + for _, metadata := range clientMetadata.ClientMetadata { + if bytes.HasPrefix(metadata.Key, []byte(ibctmtypes.KeyIterateConsensusStatePrefix)) { + iterationKeys = append(iterationKeys, metadata) + } else { + updatedMetadata = append(updatedMetadata, metadata) + } + } + updatedMetadata = append(updatedMetadata, iterationKeys...) + migrated.ClientsMetadata[i] = types.IdentifiedGenesisMetadata{ + ClientId: clientMetadata.ClientId, + ClientMetadata: updatedMetadata, + } + } + + bz, err := clientCtx.JSONCodec.MarshalJSON(&expectedClientGenState) + suite.Require().NoError(err) + + // Indent the JSON bz correctly. + var jsonObj map[string]interface{} + err = json.Unmarshal(bz, &jsonObj) + suite.Require().NoError(err) + expectedIndentedBz, err := json.MarshalIndent(jsonObj, "", "\t") + suite.Require().NoError(err) + + bz, err = clientCtx.JSONCodec.MarshalJSON(migrated) + suite.Require().NoError(err) + + // Indent the JSON bz correctly. + err = json.Unmarshal(bz, &jsonObj) + suite.Require().NoError(err) + indentedBz, err := json.MarshalIndent(jsonObj, "", "\t") + suite.Require().NoError(err) + + suite.Require().Equal(string(expectedIndentedBz), string(indentedBz)) +} + +func (suite *LegacyTestSuite) TestMigrateGenesisTendermint() { + // create two paths and setup clients + path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path2 := ibctesting.NewPath(suite.chainA, suite.chainB) + encodingConfig := simapp.MakeTestEncodingConfig() + clientCtx := client.Context{}. + WithInterfaceRegistry(encodingConfig.InterfaceRegistry). + WithTxConfig(encodingConfig.TxConfig). + WithJSONCodec(encodingConfig.Marshaler) + + suite.coordinator.SetupClients(path1) + suite.coordinator.SetupClients(path2) + + // collect all heights expected to be pruned + var path1PruneHeights, path2PruneHeights []exported.Height + path1PruneHeights = append(path1PruneHeights, path1.EndpointA.GetClientState().GetLatestHeight()) + path2PruneHeights = append(path2PruneHeights, path2.EndpointA.GetClientState().GetLatestHeight()) + + // these heights will be expired and also pruned + for i := 0; i < 3; i++ { + path1.EndpointA.UpdateClient() + path1PruneHeights = append(path1PruneHeights, path1.EndpointA.GetClientState().GetLatestHeight()) + } + for i := 0; i < 3; i++ { + path2.EndpointA.UpdateClient() + path2PruneHeights = append(path2PruneHeights, path2.EndpointA.GetClientState().GetLatestHeight()) + } + + // Increment the time by a week + suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + + // create the consensus state that can be used as trusted height for next update + path1.EndpointA.UpdateClient() + path1.EndpointA.UpdateClient() + path2.EndpointA.UpdateClient() + path2.EndpointA.UpdateClient() + + clientGenState := ibcclient.ExportGenesis(suite.chainA.GetContext(), suite.chainA.App.GetIBCKeeper().ClientKeeper) + suite.Require().NotNil(clientGenState.Clients) + suite.Require().NotNil(clientGenState.ClientsConsensus) + suite.Require().NotNil(clientGenState.ClientsMetadata) + + // Increment the time by another week, then update the client. + // This will cause the consensus states created before the first time increment + // to be expired + suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + + // migrate store get expected genesis + // store migration and genesis migration should produce identical results + err := v100.MigrateStore(path1.EndpointA.Chain.GetContext(), path1.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path1.EndpointA.Chain.App.AppCodec()) + suite.Require().NoError(err) + expectedClientGenState := ibcclient.ExportGenesis(path1.EndpointA.Chain.GetContext(), path1.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) + + migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime, types.GetSelfHeight(suite.chainA.GetContext())) + suite.Require().NoError(err) + + // 'ExportGenesis' order metadata keys by processedheight, processedtime for all heights, then it appends all iteration keys + // In order to match the genesis migration with export genesis we must reorder the iteration keys to be last + // This isn't ideal, but it is better than modifying the genesis migration from a previous version to match the export genesis of a new version + // which provides no benefit except nicer testing + for i, clientMetadata := range migrated.ClientsMetadata { + var updatedMetadata []types.GenesisMetadata + var iterationKeys []types.GenesisMetadata + for _, metadata := range clientMetadata.ClientMetadata { + if bytes.HasPrefix(metadata.Key, []byte(ibctmtypes.KeyIterateConsensusStatePrefix)) { + iterationKeys = append(iterationKeys, metadata) + } else { + updatedMetadata = append(updatedMetadata, metadata) + } + } + updatedMetadata = append(updatedMetadata, iterationKeys...) + migrated.ClientsMetadata[i] = types.IdentifiedGenesisMetadata{ + ClientId: clientMetadata.ClientId, + ClientMetadata: updatedMetadata, + } + } + + // check path 1 client pruning + for _, height := range path1PruneHeights { + for _, client := range migrated.ClientsConsensus { + if client.ClientId == path1.EndpointA.ClientID { + for _, consensusState := range client.ConsensusStates { + suite.Require().NotEqual(height, consensusState.Height) + } + } + + } + for _, client := range migrated.ClientsMetadata { + if client.ClientId == path1.EndpointA.ClientID { + for _, metadata := range client.ClientMetadata { + suite.Require().NotEqual(ibctmtypes.ProcessedTimeKey(height), metadata.Key) + suite.Require().NotEqual(ibctmtypes.ProcessedHeightKey(height), metadata.Key) + suite.Require().NotEqual(ibctmtypes.IterationKey(height), metadata.Key) + } + } + } + } + + // check path 2 client pruning + for _, height := range path2PruneHeights { + for _, client := range migrated.ClientsConsensus { + if client.ClientId == path2.EndpointA.ClientID { + for _, consensusState := range client.ConsensusStates { + suite.Require().NotEqual(height, consensusState.Height) + } + } + + } + for _, client := range migrated.ClientsMetadata { + if client.ClientId == path2.EndpointA.ClientID { + for _, metadata := range client.ClientMetadata { + suite.Require().NotEqual(ibctmtypes.ProcessedTimeKey(height), metadata.Key) + suite.Require().NotEqual(ibctmtypes.ProcessedHeightKey(height), metadata.Key) + suite.Require().NotEqual(ibctmtypes.IterationKey(height), metadata.Key) + } + } + + } + } + bz, err := clientCtx.JSONCodec.MarshalJSON(&expectedClientGenState) + suite.Require().NoError(err) + + // Indent the JSON bz correctly. + var jsonObj map[string]interface{} + err = json.Unmarshal(bz, &jsonObj) + suite.Require().NoError(err) + expectedIndentedBz, err := json.MarshalIndent(jsonObj, "", "\t") + suite.Require().NoError(err) + + bz, err = clientCtx.JSONCodec.MarshalJSON(migrated) + suite.Require().NoError(err) + + // Indent the JSON bz correctly. + err = json.Unmarshal(bz, &jsonObj) + suite.Require().NoError(err) + indentedBz, err := json.MarshalIndent(jsonObj, "", "\t") + suite.Require().NoError(err) + + suite.Require().Equal(string(expectedIndentedBz), string(indentedBz)) +} diff --git a/modules/core/02-client/legacy/v100/solomachine.go b/modules/core/02-client/legacy/v100/solomachine.go new file mode 100644 index 00000000000..80b062faff1 --- /dev/null +++ b/modules/core/02-client/legacy/v100/solomachine.go @@ -0,0 +1,208 @@ +package v100 + +import ( + ics23 "github.com/confio/ics23/go" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/modules/core/exported" +) + +// NOTE: this is a mock implmentation for exported.ClientState. This implementation +// should only be registered on the InterfaceRegistry during cli command genesis migration. +// This implementation is only used to successfully unmarshal the previous solo machine +// client state and consensus state and migrate them to the new implementations. When the proto +// codec unmarshals, it calls UnpackInterfaces() to create a cached value of the any. The +// UnpackInterfaces function for IdenitifiedClientState will attempt to unpack the any to +// exported.ClientState. If the solomachine v1 type is not registered against the exported.ClientState +// the unmarshal will fail. This implementation will panic on every interface function. +// The same is done for the ConsensusState. + +// Interface implementation checks. +var ( + _, _ codectypes.UnpackInterfacesMessage = &ClientState{}, &ConsensusState{} + _ exported.ClientState = (*ClientState)(nil) + _ exported.ConsensusState = &ConsensusState{} +) + +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterImplementations( + (*exported.ClientState)(nil), + &ClientState{}, + ) + registry.RegisterImplementations( + (*exported.ConsensusState)(nil), + &ConsensusState{}, + ) +} + +// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method +func (cs ClientState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return cs.ConsensusState.UnpackInterfaces(unpacker) +} + +// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method +func (cs ConsensusState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(cs.PublicKey, new(cryptotypes.PubKey)) +} + +// ClientType panics! +func (cs ClientState) ClientType() string { + panic("legacy solo machine is deprecated!") +} + +// GetLatestHeight panics! +func (cs ClientState) GetLatestHeight() exported.Height { + panic("legacy solo machine is deprecated!") +} + +// Status panics! +func (cs ClientState) Status(_ sdk.Context, _ sdk.KVStore, _ codec.BinaryCodec) exported.Status { + panic("legacy solo machine is deprecated!") +} + +// Validate panics! +func (cs ClientState) Validate() error { + panic("legacy solo machine is deprecated!") +} + +// GetProofSpecs panics! +func (cs ClientState) GetProofSpecs() []*ics23.ProofSpec { + panic("legacy solo machine is deprecated!") +} + +// ZeroCustomFields panics! +func (cs ClientState) ZeroCustomFields() exported.ClientState { + panic("legacy solo machine is deprecated!") +} + +// Initialize panics! +func (cs ClientState) Initialize(_ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, consState exported.ConsensusState) error { + panic("legacy solo machine is deprecated!") +} + +// ExportMetadata panics! +func (cs ClientState) ExportMetadata(_ sdk.KVStore) []exported.GenesisMetadata { + panic("legacy solo machine is deprecated!") +} + +// CheckHeaderAndUpdateState panics! +func (cs *ClientState) CheckHeaderAndUpdateState( + _ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, _ exported.Header, +) (exported.ClientState, exported.ConsensusState, error) { + panic("legacy solo machine is deprecated!") +} + +// CheckMisbehaviourAndUpdateState panics! +func (cs ClientState) CheckMisbehaviourAndUpdateState( + _ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, _ exported.Misbehaviour, +) (exported.ClientState, error) { + panic("legacy solo machine is deprecated!") +} + +// CheckSubstituteAndUpdateState panics! +func (cs ClientState) CheckSubstituteAndUpdateState( + ctx sdk.Context, _ codec.BinaryCodec, _, _ sdk.KVStore, + _ exported.ClientState, +) (exported.ClientState, error) { + panic("legacy solo machine is deprecated!") +} + +// VerifyUpgradeAndUpdateState panics! +func (cs ClientState) VerifyUpgradeAndUpdateState( + _ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, + _ exported.ClientState, _ exported.ConsensusState, _, _ []byte, +) (exported.ClientState, exported.ConsensusState, error) { + panic("legacy solo machine is deprecated!") +} + +// VerifyClientState panics! +func (cs ClientState) VerifyClientState( + store sdk.KVStore, cdc codec.BinaryCodec, + _ exported.Height, _ exported.Prefix, _ string, _ []byte, clientState exported.ClientState, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyClientConsensusState panics! +func (cs ClientState) VerifyClientConsensusState( + sdk.KVStore, codec.BinaryCodec, + exported.Height, string, exported.Height, exported.Prefix, + []byte, exported.ConsensusState, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyConnectionState panics! +func (cs ClientState) VerifyConnectionState( + sdk.KVStore, codec.BinaryCodec, exported.Height, + exported.Prefix, []byte, string, exported.ConnectionI, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyChannelState panics! +func (cs ClientState) VerifyChannelState( + sdk.KVStore, codec.BinaryCodec, exported.Height, exported.Prefix, + []byte, string, string, exported.ChannelI, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyPacketCommitment panics! +func (cs ClientState) VerifyPacketCommitment( + sdk.Context, sdk.KVStore, codec.BinaryCodec, exported.Height, + uint64, uint64, exported.Prefix, []byte, + string, string, uint64, []byte, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyPacketAcknowledgement panics! +func (cs ClientState) VerifyPacketAcknowledgement( + sdk.Context, sdk.KVStore, codec.BinaryCodec, exported.Height, + uint64, uint64, exported.Prefix, []byte, + string, string, uint64, []byte, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyPacketReceiptAbsence panics! +func (cs ClientState) VerifyPacketReceiptAbsence( + sdk.Context, sdk.KVStore, codec.BinaryCodec, exported.Height, + uint64, uint64, exported.Prefix, []byte, + string, string, uint64, +) error { + panic("legacy solo machine is deprecated!") +} + +// VerifyNextSequenceRecv panics! +func (cs ClientState) VerifyNextSequenceRecv( + sdk.Context, sdk.KVStore, codec.BinaryCodec, exported.Height, + uint64, uint64, exported.Prefix, []byte, + string, string, uint64, +) error { + panic("legacy solo machine is deprecated!") +} + +// ClientType panics! +func (ConsensusState) ClientType() string { + panic("legacy solo machine is deprecated!") +} + +// GetTimestamp panics! +func (cs ConsensusState) GetTimestamp() uint64 { + panic("legacy solo machine is deprecated!") +} + +// GetRoot panics! +func (cs ConsensusState) GetRoot() exported.Root { + panic("legacy solo machine is deprecated!") +} + +// ValidateBasic panics! +func (cs ConsensusState) ValidateBasic() error { + panic("legacy solo machine is deprecated!") +} diff --git a/modules/core/02-client/legacy/v100/solomachine.pb.go b/modules/core/02-client/legacy/v100/solomachine.pb.go new file mode 100644 index 00000000000..c35edaf8b39 --- /dev/null +++ b/modules/core/02-client/legacy/v100/solomachine.pb.go @@ -0,0 +1,4121 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/lightclients/solomachine/v1/solomachine.proto + +package v100 + +import ( + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/codec/types" + types1 "github.com/cosmos/ibc-go/modules/core/03-connection/types" + types2 "github.com/cosmos/ibc-go/modules/core/04-channel/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// DataType defines the type of solo machine proof being created. This is done +// to preserve uniqueness of different data sign byte encodings. +type DataType int32 + +const ( + // Default State + UNSPECIFIED DataType = 0 + // Data type for client state verification + CLIENT DataType = 1 + // Data type for consensus state verification + CONSENSUS DataType = 2 + // Data type for connection state verification + CONNECTION DataType = 3 + // Data type for channel state verification + CHANNEL DataType = 4 + // Data type for packet commitment verification + PACKETCOMMITMENT DataType = 5 + // Data type for packet acknowledgement verification + PACKETACKNOWLEDGEMENT DataType = 6 + // Data type for packet receipt absence verification + PACKETRECEIPTABSENCE DataType = 7 + // Data type for next sequence recv verification + NEXTSEQUENCERECV DataType = 8 + // Data type for header verification + HEADER DataType = 9 +) + +var DataType_name = map[int32]string{ + 0: "DATA_TYPE_UNINITIALIZED_UNSPECIFIED", + 1: "DATA_TYPE_CLIENT_STATE", + 2: "DATA_TYPE_CONSENSUS_STATE", + 3: "DATA_TYPE_CONNECTION_STATE", + 4: "DATA_TYPE_CHANNEL_STATE", + 5: "DATA_TYPE_PACKET_COMMITMENT", + 6: "DATA_TYPE_PACKET_ACKNOWLEDGEMENT", + 7: "DATA_TYPE_PACKET_RECEIPT_ABSENCE", + 8: "DATA_TYPE_NEXT_SEQUENCE_RECV", + 9: "DATA_TYPE_HEADER", +} + +var DataType_value = map[string]int32{ + "DATA_TYPE_UNINITIALIZED_UNSPECIFIED": 0, + "DATA_TYPE_CLIENT_STATE": 1, + "DATA_TYPE_CONSENSUS_STATE": 2, + "DATA_TYPE_CONNECTION_STATE": 3, + "DATA_TYPE_CHANNEL_STATE": 4, + "DATA_TYPE_PACKET_COMMITMENT": 5, + "DATA_TYPE_PACKET_ACKNOWLEDGEMENT": 6, + "DATA_TYPE_PACKET_RECEIPT_ABSENCE": 7, + "DATA_TYPE_NEXT_SEQUENCE_RECV": 8, + "DATA_TYPE_HEADER": 9, +} + +func (x DataType) String() string { + return proto.EnumName(DataType_name, int32(x)) +} + +func (DataType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{0} +} + +// ClientState defines a solo machine client that tracks the current consensus +// state and if the client is frozen. +type ClientState struct { + // latest sequence of the client state + Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` + // frozen sequence of the solo machine + FrozenSequence uint64 `protobuf:"varint,2,opt,name=frozen_sequence,json=frozenSequence,proto3" json:"frozen_sequence,omitempty" yaml:"frozen_sequence"` + ConsensusState *ConsensusState `protobuf:"bytes,3,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"` + // when set to true, will allow governance to update a solo machine client. + // The client will be unfrozen if it is frozen. + AllowUpdateAfterProposal bool `protobuf:"varint,4,opt,name=allow_update_after_proposal,json=allowUpdateAfterProposal,proto3" json:"allow_update_after_proposal,omitempty" yaml:"allow_update_after_proposal"` +} + +func (m *ClientState) Reset() { *m = ClientState{} } +func (m *ClientState) String() string { return proto.CompactTextString(m) } +func (*ClientState) ProtoMessage() {} +func (*ClientState) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{0} +} +func (m *ClientState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClientState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientState.Merge(m, src) +} +func (m *ClientState) XXX_Size() int { + return m.Size() +} +func (m *ClientState) XXX_DiscardUnknown() { + xxx_messageInfo_ClientState.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientState proto.InternalMessageInfo + +// ConsensusState defines a solo machine consensus state. The sequence of a +// consensus state is contained in the "height" key used in storing the +// consensus state. +type ConsensusState struct { + // public key of the solo machine + PublicKey *types.Any `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty" yaml:"public_key"` + // diversifier allows the same public key to be re-used across different solo + // machine clients (potentially on different chains) without being considered + // misbehaviour. + Diversifier string `protobuf:"bytes,2,opt,name=diversifier,proto3" json:"diversifier,omitempty"` + Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *ConsensusState) Reset() { *m = ConsensusState{} } +func (m *ConsensusState) String() string { return proto.CompactTextString(m) } +func (*ConsensusState) ProtoMessage() {} +func (*ConsensusState) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{1} +} +func (m *ConsensusState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusState.Merge(m, src) +} +func (m *ConsensusState) XXX_Size() int { + return m.Size() +} +func (m *ConsensusState) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusState.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusState proto.InternalMessageInfo + +// Header defines a solo machine consensus header +type Header struct { + // sequence to update solo machine public key at + Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` + Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` + NewPublicKey *types.Any `protobuf:"bytes,4,opt,name=new_public_key,json=newPublicKey,proto3" json:"new_public_key,omitempty" yaml:"new_public_key"` + NewDiversifier string `protobuf:"bytes,5,opt,name=new_diversifier,json=newDiversifier,proto3" json:"new_diversifier,omitempty" yaml:"new_diversifier"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{2} +} +func (m *Header) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return m.Size() +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo + +// Misbehaviour defines misbehaviour for a solo machine which consists +// of a sequence and two signatures over different messages at that sequence. +type Misbehaviour struct { + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + Sequence uint64 `protobuf:"varint,2,opt,name=sequence,proto3" json:"sequence,omitempty"` + SignatureOne *SignatureAndData `protobuf:"bytes,3,opt,name=signature_one,json=signatureOne,proto3" json:"signature_one,omitempty" yaml:"signature_one"` + SignatureTwo *SignatureAndData `protobuf:"bytes,4,opt,name=signature_two,json=signatureTwo,proto3" json:"signature_two,omitempty" yaml:"signature_two"` +} + +func (m *Misbehaviour) Reset() { *m = Misbehaviour{} } +func (m *Misbehaviour) String() string { return proto.CompactTextString(m) } +func (*Misbehaviour) ProtoMessage() {} +func (*Misbehaviour) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{3} +} +func (m *Misbehaviour) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Misbehaviour) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Misbehaviour.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Misbehaviour) XXX_Merge(src proto.Message) { + xxx_messageInfo_Misbehaviour.Merge(m, src) +} +func (m *Misbehaviour) XXX_Size() int { + return m.Size() +} +func (m *Misbehaviour) XXX_DiscardUnknown() { + xxx_messageInfo_Misbehaviour.DiscardUnknown(m) +} + +var xxx_messageInfo_Misbehaviour proto.InternalMessageInfo + +// SignatureAndData contains a signature and the data signed over to create that +// signature. +type SignatureAndData struct { + Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` + DataType DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=ibc.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *SignatureAndData) Reset() { *m = SignatureAndData{} } +func (m *SignatureAndData) String() string { return proto.CompactTextString(m) } +func (*SignatureAndData) ProtoMessage() {} +func (*SignatureAndData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{4} +} +func (m *SignatureAndData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureAndData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignatureAndData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignatureAndData) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureAndData.Merge(m, src) +} +func (m *SignatureAndData) XXX_Size() int { + return m.Size() +} +func (m *SignatureAndData) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureAndData.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureAndData proto.InternalMessageInfo + +// TimestampedSignatureData contains the signature data and the timestamp of the +// signature. +type TimestampedSignatureData struct { + SignatureData []byte `protobuf:"bytes,1,opt,name=signature_data,json=signatureData,proto3" json:"signature_data,omitempty" yaml:"signature_data"` + Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *TimestampedSignatureData) Reset() { *m = TimestampedSignatureData{} } +func (m *TimestampedSignatureData) String() string { return proto.CompactTextString(m) } +func (*TimestampedSignatureData) ProtoMessage() {} +func (*TimestampedSignatureData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{5} +} +func (m *TimestampedSignatureData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimestampedSignatureData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimestampedSignatureData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimestampedSignatureData) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimestampedSignatureData.Merge(m, src) +} +func (m *TimestampedSignatureData) XXX_Size() int { + return m.Size() +} +func (m *TimestampedSignatureData) XXX_DiscardUnknown() { + xxx_messageInfo_TimestampedSignatureData.DiscardUnknown(m) +} + +var xxx_messageInfo_TimestampedSignatureData proto.InternalMessageInfo + +// SignBytes defines the signed bytes used for signature verification. +type SignBytes struct { + Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` + Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Diversifier string `protobuf:"bytes,3,opt,name=diversifier,proto3" json:"diversifier,omitempty"` + // type of the data used + DataType DataType `protobuf:"varint,4,opt,name=data_type,json=dataType,proto3,enum=ibc.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"` + // marshaled data + Data []byte `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *SignBytes) Reset() { *m = SignBytes{} } +func (m *SignBytes) String() string { return proto.CompactTextString(m) } +func (*SignBytes) ProtoMessage() {} +func (*SignBytes) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{6} +} +func (m *SignBytes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignBytes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignBytes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignBytes) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignBytes.Merge(m, src) +} +func (m *SignBytes) XXX_Size() int { + return m.Size() +} +func (m *SignBytes) XXX_DiscardUnknown() { + xxx_messageInfo_SignBytes.DiscardUnknown(m) +} + +var xxx_messageInfo_SignBytes proto.InternalMessageInfo + +// HeaderData returns the SignBytes data for update verification. +type HeaderData struct { + // header public key + NewPubKey *types.Any `protobuf:"bytes,1,opt,name=new_pub_key,json=newPubKey,proto3" json:"new_pub_key,omitempty" yaml:"new_pub_key"` + // header diversifier + NewDiversifier string `protobuf:"bytes,2,opt,name=new_diversifier,json=newDiversifier,proto3" json:"new_diversifier,omitempty" yaml:"new_diversifier"` +} + +func (m *HeaderData) Reset() { *m = HeaderData{} } +func (m *HeaderData) String() string { return proto.CompactTextString(m) } +func (*HeaderData) ProtoMessage() {} +func (*HeaderData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{7} +} +func (m *HeaderData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HeaderData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HeaderData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HeaderData) XXX_Merge(src proto.Message) { + xxx_messageInfo_HeaderData.Merge(m, src) +} +func (m *HeaderData) XXX_Size() int { + return m.Size() +} +func (m *HeaderData) XXX_DiscardUnknown() { + xxx_messageInfo_HeaderData.DiscardUnknown(m) +} + +var xxx_messageInfo_HeaderData proto.InternalMessageInfo + +// ClientStateData returns the SignBytes data for client state verification. +type ClientStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + ClientState *types.Any `protobuf:"bytes,2,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"` +} + +func (m *ClientStateData) Reset() { *m = ClientStateData{} } +func (m *ClientStateData) String() string { return proto.CompactTextString(m) } +func (*ClientStateData) ProtoMessage() {} +func (*ClientStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{8} +} +func (m *ClientStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClientStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientStateData.Merge(m, src) +} +func (m *ClientStateData) XXX_Size() int { + return m.Size() +} +func (m *ClientStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ClientStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientStateData proto.InternalMessageInfo + +// ConsensusStateData returns the SignBytes data for consensus state +// verification. +type ConsensusStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + ConsensusState *types.Any `protobuf:"bytes,2,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"` +} + +func (m *ConsensusStateData) Reset() { *m = ConsensusStateData{} } +func (m *ConsensusStateData) String() string { return proto.CompactTextString(m) } +func (*ConsensusStateData) ProtoMessage() {} +func (*ConsensusStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{9} +} +func (m *ConsensusStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusStateData.Merge(m, src) +} +func (m *ConsensusStateData) XXX_Size() int { + return m.Size() +} +func (m *ConsensusStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusStateData proto.InternalMessageInfo + +// ConnectionStateData returns the SignBytes data for connection state +// verification. +type ConnectionStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Connection *types1.ConnectionEnd `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` +} + +func (m *ConnectionStateData) Reset() { *m = ConnectionStateData{} } +func (m *ConnectionStateData) String() string { return proto.CompactTextString(m) } +func (*ConnectionStateData) ProtoMessage() {} +func (*ConnectionStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{10} +} +func (m *ConnectionStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConnectionStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConnectionStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConnectionStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectionStateData.Merge(m, src) +} +func (m *ConnectionStateData) XXX_Size() int { + return m.Size() +} +func (m *ConnectionStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectionStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectionStateData proto.InternalMessageInfo + +// ChannelStateData returns the SignBytes data for channel state +// verification. +type ChannelStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Channel *types2.Channel `protobuf:"bytes,2,opt,name=channel,proto3" json:"channel,omitempty"` +} + +func (m *ChannelStateData) Reset() { *m = ChannelStateData{} } +func (m *ChannelStateData) String() string { return proto.CompactTextString(m) } +func (*ChannelStateData) ProtoMessage() {} +func (*ChannelStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{11} +} +func (m *ChannelStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChannelStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChannelStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChannelStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChannelStateData.Merge(m, src) +} +func (m *ChannelStateData) XXX_Size() int { + return m.Size() +} +func (m *ChannelStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ChannelStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ChannelStateData proto.InternalMessageInfo + +// PacketCommitmentData returns the SignBytes data for packet commitment +// verification. +type PacketCommitmentData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Commitment []byte `protobuf:"bytes,2,opt,name=commitment,proto3" json:"commitment,omitempty"` +} + +func (m *PacketCommitmentData) Reset() { *m = PacketCommitmentData{} } +func (m *PacketCommitmentData) String() string { return proto.CompactTextString(m) } +func (*PacketCommitmentData) ProtoMessage() {} +func (*PacketCommitmentData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{12} +} +func (m *PacketCommitmentData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketCommitmentData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketCommitmentData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketCommitmentData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketCommitmentData.Merge(m, src) +} +func (m *PacketCommitmentData) XXX_Size() int { + return m.Size() +} +func (m *PacketCommitmentData) XXX_DiscardUnknown() { + xxx_messageInfo_PacketCommitmentData.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketCommitmentData proto.InternalMessageInfo + +func (m *PacketCommitmentData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *PacketCommitmentData) GetCommitment() []byte { + if m != nil { + return m.Commitment + } + return nil +} + +// PacketAcknowledgementData returns the SignBytes data for acknowledgement +// verification. +type PacketAcknowledgementData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Acknowledgement []byte `protobuf:"bytes,2,opt,name=acknowledgement,proto3" json:"acknowledgement,omitempty"` +} + +func (m *PacketAcknowledgementData) Reset() { *m = PacketAcknowledgementData{} } +func (m *PacketAcknowledgementData) String() string { return proto.CompactTextString(m) } +func (*PacketAcknowledgementData) ProtoMessage() {} +func (*PacketAcknowledgementData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{13} +} +func (m *PacketAcknowledgementData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketAcknowledgementData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketAcknowledgementData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketAcknowledgementData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketAcknowledgementData.Merge(m, src) +} +func (m *PacketAcknowledgementData) XXX_Size() int { + return m.Size() +} +func (m *PacketAcknowledgementData) XXX_DiscardUnknown() { + xxx_messageInfo_PacketAcknowledgementData.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketAcknowledgementData proto.InternalMessageInfo + +func (m *PacketAcknowledgementData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *PacketAcknowledgementData) GetAcknowledgement() []byte { + if m != nil { + return m.Acknowledgement + } + return nil +} + +// PacketReceiptAbsenceData returns the SignBytes data for +// packet receipt absence verification. +type PacketReceiptAbsenceData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (m *PacketReceiptAbsenceData) Reset() { *m = PacketReceiptAbsenceData{} } +func (m *PacketReceiptAbsenceData) String() string { return proto.CompactTextString(m) } +func (*PacketReceiptAbsenceData) ProtoMessage() {} +func (*PacketReceiptAbsenceData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{14} +} +func (m *PacketReceiptAbsenceData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketReceiptAbsenceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketReceiptAbsenceData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketReceiptAbsenceData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketReceiptAbsenceData.Merge(m, src) +} +func (m *PacketReceiptAbsenceData) XXX_Size() int { + return m.Size() +} +func (m *PacketReceiptAbsenceData) XXX_DiscardUnknown() { + xxx_messageInfo_PacketReceiptAbsenceData.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketReceiptAbsenceData proto.InternalMessageInfo + +func (m *PacketReceiptAbsenceData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +// NextSequenceRecvData returns the SignBytes data for verification of the next +// sequence to be received. +type NextSequenceRecvData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + NextSeqRecv uint64 `protobuf:"varint,2,opt,name=next_seq_recv,json=nextSeqRecv,proto3" json:"next_seq_recv,omitempty" yaml:"next_seq_recv"` +} + +func (m *NextSequenceRecvData) Reset() { *m = NextSequenceRecvData{} } +func (m *NextSequenceRecvData) String() string { return proto.CompactTextString(m) } +func (*NextSequenceRecvData) ProtoMessage() {} +func (*NextSequenceRecvData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{15} +} +func (m *NextSequenceRecvData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NextSequenceRecvData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NextSequenceRecvData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NextSequenceRecvData) XXX_Merge(src proto.Message) { + xxx_messageInfo_NextSequenceRecvData.Merge(m, src) +} +func (m *NextSequenceRecvData) XXX_Size() int { + return m.Size() +} +func (m *NextSequenceRecvData) XXX_DiscardUnknown() { + xxx_messageInfo_NextSequenceRecvData.DiscardUnknown(m) +} + +var xxx_messageInfo_NextSequenceRecvData proto.InternalMessageInfo + +func (m *NextSequenceRecvData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *NextSequenceRecvData) GetNextSeqRecv() uint64 { + if m != nil { + return m.NextSeqRecv + } + return 0 +} + +func init() { + proto.RegisterEnum("ibc.lightclients.solomachine.v1.DataType", DataType_name, DataType_value) + proto.RegisterType((*ClientState)(nil), "ibc.lightclients.solomachine.v1.ClientState") + proto.RegisterType((*ConsensusState)(nil), "ibc.lightclients.solomachine.v1.ConsensusState") + proto.RegisterType((*Header)(nil), "ibc.lightclients.solomachine.v1.Header") + proto.RegisterType((*Misbehaviour)(nil), "ibc.lightclients.solomachine.v1.Misbehaviour") + proto.RegisterType((*SignatureAndData)(nil), "ibc.lightclients.solomachine.v1.SignatureAndData") + proto.RegisterType((*TimestampedSignatureData)(nil), "ibc.lightclients.solomachine.v1.TimestampedSignatureData") + proto.RegisterType((*SignBytes)(nil), "ibc.lightclients.solomachine.v1.SignBytes") + proto.RegisterType((*HeaderData)(nil), "ibc.lightclients.solomachine.v1.HeaderData") + proto.RegisterType((*ClientStateData)(nil), "ibc.lightclients.solomachine.v1.ClientStateData") + proto.RegisterType((*ConsensusStateData)(nil), "ibc.lightclients.solomachine.v1.ConsensusStateData") + proto.RegisterType((*ConnectionStateData)(nil), "ibc.lightclients.solomachine.v1.ConnectionStateData") + proto.RegisterType((*ChannelStateData)(nil), "ibc.lightclients.solomachine.v1.ChannelStateData") + proto.RegisterType((*PacketCommitmentData)(nil), "ibc.lightclients.solomachine.v1.PacketCommitmentData") + proto.RegisterType((*PacketAcknowledgementData)(nil), "ibc.lightclients.solomachine.v1.PacketAcknowledgementData") + proto.RegisterType((*PacketReceiptAbsenceData)(nil), "ibc.lightclients.solomachine.v1.PacketReceiptAbsenceData") + proto.RegisterType((*NextSequenceRecvData)(nil), "ibc.lightclients.solomachine.v1.NextSequenceRecvData") +} + +func init() { + proto.RegisterFile("ibc/lightclients/solomachine/v1/solomachine.proto", fileDescriptor_6cc2ee18f7f86d4e) +} + +var fileDescriptor_6cc2ee18f7f86d4e = []byte{ + // 1368 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xdf, 0x8e, 0xdb, 0x54, + 0x13, 0x5f, 0xa7, 0xe9, 0x76, 0x33, 0xd9, 0xee, 0xe6, 0x73, 0xd3, 0x36, 0xeb, 0x56, 0x89, 0x3f, + 0x7f, 0xfa, 0xca, 0x82, 0x68, 0xd2, 0x5d, 0x44, 0x85, 0x0a, 0x02, 0x1c, 0xc7, 0xd0, 0xb4, 0xbb, + 0xde, 0xe0, 0x38, 0x40, 0x2b, 0x24, 0xcb, 0x71, 0xce, 0x26, 0x56, 0x13, 0x9f, 0x10, 0x3b, 0x49, + 0x83, 0x84, 0x84, 0xb8, 0x2a, 0x11, 0x17, 0xbc, 0x40, 0x24, 0x04, 0xe2, 0x55, 0x80, 0xcb, 0x72, + 0xc7, 0x55, 0x40, 0xed, 0x1b, 0xe4, 0x09, 0x90, 0x7d, 0x4e, 0x62, 0x3b, 0xdb, 0xcd, 0x8a, 0x7f, + 0x77, 0xe7, 0xcc, 0xfc, 0xe6, 0x37, 0x73, 0x66, 0xc6, 0x73, 0x8e, 0x61, 0xcf, 0xaa, 0x9b, 0x85, + 0xb6, 0xd5, 0x6c, 0xb9, 0x66, 0xdb, 0x42, 0xb6, 0xeb, 0x14, 0x1c, 0xdc, 0xc6, 0x1d, 0xc3, 0x6c, + 0x59, 0x36, 0x2a, 0x0c, 0xf6, 0xc2, 0xdb, 0x7c, 0xb7, 0x87, 0x5d, 0xcc, 0xe6, 0xac, 0xba, 0x99, + 0x0f, 0x9b, 0xe4, 0xc3, 0x98, 0xc1, 0x1e, 0xf7, 0x92, 0xc7, 0x69, 0xe2, 0x1e, 0x2a, 0x98, 0xd8, + 0xb6, 0x91, 0xe9, 0x5a, 0xd8, 0xf6, 0xa8, 0x82, 0x1d, 0x61, 0xe2, 0xfe, 0x1b, 0x00, 0x5b, 0x86, + 0x6d, 0xa3, 0xb6, 0x8f, 0x22, 0x4b, 0x0a, 0x49, 0x37, 0x71, 0x13, 0xfb, 0xcb, 0x82, 0xb7, 0xa2, + 0xd2, 0x9d, 0x26, 0xc6, 0xcd, 0x36, 0x2a, 0xf8, 0xbb, 0x7a, 0xff, 0xb8, 0x60, 0xd8, 0x23, 0xa2, + 0x12, 0x7e, 0x89, 0x41, 0x52, 0xf2, 0xe3, 0xaa, 0xba, 0x86, 0x8b, 0x58, 0x0e, 0x36, 0x1c, 0xf4, + 0x69, 0x1f, 0xd9, 0x26, 0xca, 0x30, 0x3c, 0xb3, 0x1b, 0x57, 0x17, 0x7b, 0x56, 0x82, 0xed, 0xe3, + 0x1e, 0xfe, 0x0c, 0xd9, 0xfa, 0x02, 0x12, 0xf3, 0x20, 0x45, 0x6e, 0x36, 0xcd, 0x5d, 0x19, 0x19, + 0x9d, 0xf6, 0x1d, 0x61, 0x09, 0x20, 0xa8, 0x5b, 0x44, 0x52, 0x9d, 0x93, 0xb8, 0xb0, 0x6d, 0x62, + 0xdb, 0x41, 0xb6, 0xd3, 0x77, 0x74, 0xc7, 0xf3, 0x99, 0x39, 0xc7, 0x33, 0xbb, 0xc9, 0xfd, 0x42, + 0xfe, 0x8c, 0x44, 0xe5, 0xa5, 0xb9, 0x9d, 0x1f, 0x6a, 0xd8, 0xeb, 0x12, 0xa3, 0xa0, 0x6e, 0x99, + 0x11, 0x2c, 0x8b, 0xe0, 0x9a, 0xd1, 0x6e, 0xe3, 0xa1, 0xde, 0xef, 0x36, 0x0c, 0x17, 0xe9, 0xc6, + 0xb1, 0x8b, 0x7a, 0x7a, 0xb7, 0x87, 0xbb, 0xd8, 0x31, 0xda, 0x99, 0x38, 0xcf, 0xec, 0x6e, 0x14, + 0x6f, 0xcc, 0xa6, 0x39, 0x81, 0x10, 0xae, 0x00, 0x0b, 0x6a, 0xc6, 0xd7, 0xd6, 0x7c, 0xa5, 0xe8, + 0xe9, 0x2a, 0x54, 0x75, 0x27, 0xfe, 0xe4, 0xdb, 0xdc, 0x9a, 0xf0, 0x1d, 0x03, 0x5b, 0xd1, 0x58, + 0xd9, 0x7b, 0x00, 0xdd, 0x7e, 0xbd, 0x6d, 0x99, 0xfa, 0x23, 0x34, 0xf2, 0x13, 0x9b, 0xdc, 0x4f, + 0xe7, 0x49, 0x59, 0xf2, 0xf3, 0xb2, 0xe4, 0x45, 0x7b, 0x54, 0xbc, 0x3c, 0x9b, 0xe6, 0xfe, 0x43, + 0x82, 0x08, 0x2c, 0x04, 0x35, 0x41, 0x36, 0xf7, 0xd1, 0x88, 0xe5, 0x21, 0xd9, 0xb0, 0x06, 0xa8, + 0xe7, 0x58, 0xc7, 0x16, 0xea, 0xf9, 0x25, 0x48, 0xa8, 0x61, 0x11, 0x7b, 0x1d, 0x12, 0xae, 0xd5, + 0x41, 0x8e, 0x6b, 0x74, 0xba, 0x7e, 0x76, 0xe3, 0x6a, 0x20, 0xa0, 0x41, 0x7e, 0x19, 0x83, 0xf5, + 0xbb, 0xc8, 0x68, 0xa0, 0xde, 0xca, 0x9a, 0x47, 0xa8, 0x62, 0x4b, 0x54, 0x9e, 0xd6, 0xb1, 0x9a, + 0xb6, 0xe1, 0xf6, 0x7b, 0xa4, 0x8c, 0x9b, 0x6a, 0x20, 0x60, 0x6b, 0xb0, 0x65, 0xa3, 0xa1, 0x1e, + 0x3a, 0x78, 0x7c, 0xc5, 0xc1, 0x77, 0x66, 0xd3, 0xdc, 0x65, 0x72, 0xf0, 0xa8, 0x95, 0xa0, 0x6e, + 0xda, 0x68, 0x58, 0x59, 0x9c, 0x5f, 0x82, 0x6d, 0x0f, 0x10, 0xce, 0xc1, 0x79, 0x2f, 0x07, 0xe1, + 0x86, 0x58, 0x02, 0x08, 0xaa, 0x17, 0x49, 0x29, 0x10, 0xd0, 0x24, 0xfc, 0x14, 0x83, 0xcd, 0x43, + 0xcb, 0xa9, 0xa3, 0x96, 0x31, 0xb0, 0x70, 0xbf, 0xc7, 0xee, 0x41, 0x82, 0x34, 0x9f, 0x6e, 0x35, + 0xfc, 0x5c, 0x24, 0x8a, 0xe9, 0xd9, 0x34, 0x97, 0xa2, 0x6d, 0x36, 0x57, 0x09, 0xea, 0x06, 0x59, + 0x97, 0x1b, 0x91, 0xec, 0xc5, 0x96, 0xb2, 0xd7, 0x85, 0x8b, 0x8b, 0x74, 0xe8, 0xd8, 0x9e, 0xb7, + 0xfa, 0xde, 0x99, 0xad, 0x5e, 0x9d, 0x5b, 0x89, 0x76, 0xa3, 0x64, 0xb8, 0x46, 0x31, 0x33, 0x9b, + 0xe6, 0xd2, 0x24, 0x8a, 0x08, 0xa3, 0xa0, 0x6e, 0x2e, 0xf6, 0x47, 0xf6, 0x92, 0x47, 0x77, 0x88, + 0x69, 0xca, 0xff, 0x29, 0x8f, 0xee, 0x10, 0x87, 0x3d, 0x6a, 0x43, 0x4c, 0x33, 0xf9, 0x23, 0x03, + 0xa9, 0x65, 0x8a, 0x68, 0x7b, 0x30, 0xcb, 0xed, 0xf1, 0x09, 0x24, 0x1a, 0x86, 0x6b, 0xe8, 0xee, + 0xa8, 0x4b, 0x32, 0xb7, 0xb5, 0xff, 0xf2, 0x99, 0x61, 0x7a, 0xbc, 0xda, 0xa8, 0x8b, 0xc2, 0x65, + 0x59, 0xb0, 0x08, 0xea, 0x46, 0x83, 0xea, 0x59, 0x16, 0xe2, 0xde, 0x9a, 0x76, 0xa5, 0xbf, 0x8e, + 0x36, 0x73, 0xfc, 0xc5, 0xdf, 0xc5, 0x17, 0x0c, 0x64, 0xb4, 0xb9, 0x0c, 0x35, 0x16, 0x67, 0xf2, + 0x0f, 0xf4, 0x2e, 0x6c, 0x05, 0xb9, 0xf0, 0xe9, 0xfd, 0x53, 0x85, 0x7b, 0x37, 0xaa, 0x17, 0xd4, + 0xa0, 0x1c, 0xa5, 0x13, 0x21, 0xc4, 0x5e, 0x1c, 0xc2, 0x6f, 0x0c, 0x24, 0x3c, 0xbf, 0xc5, 0x91, + 0x8b, 0x9c, 0xbf, 0xf1, 0x75, 0x2e, 0x0d, 0x8a, 0x73, 0x27, 0x07, 0x45, 0xa4, 0x04, 0xf1, 0x7f, + 0xab, 0x04, 0xe7, 0x83, 0x12, 0xd0, 0x13, 0xfe, 0xc0, 0x00, 0x90, 0xe1, 0xe3, 0x27, 0xe5, 0x00, + 0x92, 0xf4, 0x93, 0x3f, 0x73, 0x3c, 0x5e, 0x99, 0x4d, 0x73, 0x6c, 0x64, 0x4a, 0xd0, 0xf9, 0x48, + 0x46, 0xc4, 0x29, 0xf3, 0x21, 0xf6, 0x17, 0xe7, 0xc3, 0xe7, 0xb0, 0x1d, 0xba, 0x1c, 0xfd, 0x58, + 0x59, 0x88, 0x77, 0x0d, 0xb7, 0x45, 0xdb, 0xd9, 0x5f, 0xb3, 0x15, 0xd8, 0xa4, 0xa3, 0x81, 0x5c, + 0x68, 0xb1, 0x15, 0x07, 0xb8, 0x3a, 0x9b, 0xe6, 0x2e, 0x45, 0xc6, 0x09, 0xbd, 0xb2, 0x92, 0x66, + 0xe0, 0x89, 0xba, 0xff, 0x8a, 0x01, 0x36, 0x7a, 0x91, 0x9c, 0x1a, 0xc2, 0x83, 0x93, 0xd7, 0xea, + 0xaa, 0x28, 0xfe, 0xc4, 0xdd, 0x49, 0x63, 0x19, 0xc0, 0x25, 0x69, 0xf1, 0x20, 0x59, 0x1d, 0x8b, + 0x0c, 0x10, 0xbc, 0x5d, 0x68, 0x18, 0xff, 0xf7, 0xdb, 0xca, 0x7b, 0xbc, 0xe4, 0x43, 0xef, 0x1a, + 0x72, 0xa9, 0xd3, 0x9d, 0x6c, 0x37, 0xd4, 0x90, 0x21, 0xf5, 0xdb, 0x80, 0x94, 0x44, 0x9e, 0x38, + 0xab, 0x9d, 0xde, 0x86, 0x0b, 0xf4, 0x29, 0x44, 0x3d, 0x5e, 0x0f, 0x79, 0xa4, 0x6f, 0x24, 0xcf, + 0x1d, 0x59, 0xaa, 0x73, 0x30, 0xf5, 0x72, 0x0f, 0xd2, 0x15, 0xc3, 0x7c, 0x84, 0x5c, 0x09, 0x77, + 0x3a, 0x96, 0xdb, 0x41, 0xb6, 0x7b, 0xaa, 0xa7, 0xac, 0x77, 0xbc, 0x39, 0xca, 0x77, 0xb6, 0xa9, + 0x86, 0x24, 0xc2, 0x03, 0xd8, 0x21, 0x5c, 0xa2, 0xf9, 0xc8, 0xc6, 0xc3, 0x36, 0x6a, 0x34, 0xd1, + 0x4a, 0xc2, 0x5d, 0xd8, 0x36, 0xa2, 0x50, 0xca, 0xba, 0x2c, 0x16, 0xf2, 0x90, 0x21, 0xd4, 0x2a, + 0x32, 0x91, 0xd5, 0x75, 0xc5, 0xba, 0xe3, 0xcd, 0x81, 0xd3, 0x98, 0x85, 0x16, 0xa4, 0x15, 0xf4, + 0xd8, 0x9d, 0x3f, 0xbe, 0x54, 0x64, 0x0e, 0x4e, 0x8d, 0xe2, 0x2d, 0xb8, 0x68, 0xa3, 0xc7, 0xae, + 0xf7, 0x74, 0xd3, 0x7b, 0xc8, 0x1c, 0xd0, 0xb7, 0x5d, 0xe8, 0x1a, 0x88, 0xa8, 0x05, 0x35, 0x69, + 0x13, 0x6a, 0x8f, 0xf5, 0x95, 0xaf, 0xe3, 0xb0, 0x31, 0x1f, 0x0c, 0xec, 0x1b, 0xf0, 0xbf, 0x92, + 0xa8, 0x89, 0xba, 0xf6, 0xa0, 0x22, 0xeb, 0x35, 0xa5, 0xac, 0x94, 0xb5, 0xb2, 0x78, 0x50, 0x7e, + 0x28, 0x97, 0xf4, 0x9a, 0x52, 0xad, 0xc8, 0x52, 0xf9, 0xbd, 0xb2, 0x5c, 0x4a, 0xad, 0x71, 0xdb, + 0xe3, 0x09, 0x9f, 0x0c, 0x89, 0xd8, 0x1b, 0x70, 0x25, 0xb0, 0x94, 0x0e, 0xca, 0xb2, 0xa2, 0xe9, + 0x55, 0x4d, 0xd4, 0xe4, 0x14, 0xc3, 0xc1, 0x78, 0xc2, 0xaf, 0x13, 0x19, 0xfb, 0x2a, 0xec, 0x84, + 0x70, 0x47, 0x4a, 0x55, 0x56, 0xaa, 0xb5, 0x2a, 0x85, 0xc6, 0xb8, 0x8b, 0xe3, 0x09, 0x9f, 0x58, + 0x88, 0xd9, 0x3c, 0x70, 0x11, 0xb4, 0x22, 0x4b, 0x5a, 0xf9, 0x48, 0xa1, 0xf0, 0x73, 0xdc, 0xd6, + 0x78, 0xc2, 0x43, 0x20, 0x67, 0x77, 0xe1, 0x6a, 0x08, 0x7f, 0x57, 0x54, 0x14, 0xf9, 0x80, 0x82, + 0xe3, 0x5c, 0x72, 0x3c, 0xe1, 0x2f, 0x50, 0x21, 0xfb, 0x3a, 0x5c, 0x0b, 0x90, 0x15, 0x51, 0xba, + 0x2f, 0x6b, 0xba, 0x74, 0x74, 0x78, 0x58, 0xd6, 0x0e, 0x65, 0x45, 0x4b, 0x9d, 0xe7, 0xd2, 0xe3, + 0x09, 0x9f, 0x22, 0x8a, 0x40, 0xce, 0xbe, 0x03, 0xfc, 0x09, 0x33, 0x51, 0xba, 0xaf, 0x1c, 0x7d, + 0x74, 0x20, 0x97, 0xde, 0x97, 0x7d, 0xdb, 0x75, 0x6e, 0x67, 0x3c, 0xe1, 0x2f, 0x13, 0xed, 0x92, + 0x92, 0x7d, 0xfb, 0x05, 0x04, 0xaa, 0x2c, 0xc9, 0xe5, 0x8a, 0xa6, 0x8b, 0xc5, 0xaa, 0xac, 0x48, + 0x72, 0xea, 0x02, 0x97, 0x19, 0x4f, 0xf8, 0x34, 0xd1, 0x52, 0x25, 0xd5, 0xb1, 0xb7, 0xe1, 0x7a, + 0x60, 0xaf, 0xc8, 0x1f, 0x6b, 0x7a, 0x55, 0xfe, 0xa0, 0xe6, 0xa9, 0x3c, 0x9a, 0x0f, 0x53, 0x1b, + 0x24, 0x70, 0x4f, 0x33, 0x57, 0x78, 0x72, 0x96, 0x87, 0x54, 0x60, 0x77, 0x57, 0x16, 0x4b, 0xb2, + 0x9a, 0x4a, 0x90, 0xca, 0x90, 0x1d, 0x17, 0x7f, 0xf2, 0x7d, 0x76, 0xad, 0x58, 0xfb, 0xf9, 0x59, + 0x96, 0x79, 0xfa, 0x2c, 0xcb, 0xfc, 0xfe, 0x2c, 0xcb, 0x7c, 0xf3, 0x3c, 0xbb, 0xf6, 0xf4, 0x79, + 0x76, 0xed, 0xd7, 0xe7, 0xd9, 0xb5, 0x87, 0x6f, 0x36, 0x2d, 0xb7, 0xd5, 0xaf, 0xe7, 0x4d, 0xdc, + 0x29, 0x98, 0xd8, 0xe9, 0x60, 0xa7, 0x60, 0xd5, 0xcd, 0x9b, 0x4d, 0x5c, 0xe8, 0xe0, 0x46, 0xbf, + 0x8d, 0x1c, 0xf2, 0x87, 0x73, 0x6b, 0xff, 0x26, 0x99, 0x87, 0x85, 0x36, 0x6a, 0x1a, 0xe6, 0xa8, + 0x30, 0xd8, 0xbb, 0x75, 0xab, 0xbe, 0xee, 0x0f, 0xb1, 0xd7, 0xfe, 0x08, 0x00, 0x00, 0xff, 0xff, + 0xef, 0x59, 0x70, 0x2d, 0x87, 0x0d, 0x00, 0x00, +} + +func (m *ClientState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AllowUpdateAfterProposal { + i-- + if m.AllowUpdateAfterProposal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.ConsensusState != nil { + { + size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.FrozenSequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.FrozenSequence)) + i-- + dAtA[i] = 0x10 + } + if m.Sequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ConsensusState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x18 + } + if len(m.Diversifier) > 0 { + i -= len(m.Diversifier) + copy(dAtA[i:], m.Diversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Diversifier))) + i-- + dAtA[i] = 0x12 + } + if m.PublicKey != nil { + { + size, err := m.PublicKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Header) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Header) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NewDiversifier) > 0 { + i -= len(m.NewDiversifier) + copy(dAtA[i:], m.NewDiversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.NewDiversifier))) + i-- + dAtA[i] = 0x2a + } + if m.NewPublicKey != nil { + { + size, err := m.NewPublicKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x1a + } + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Sequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Misbehaviour) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Misbehaviour) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Misbehaviour) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SignatureTwo != nil { + { + size, err := m.SignatureTwo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.SignatureOne != nil { + { + size, err := m.SignatureOne.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Sequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x10 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignatureAndData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureAndData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureAndData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x20 + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a + } + if m.DataType != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.DataType)) + i-- + dAtA[i] = 0x10 + } + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TimestampedSignatureData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimestampedSignatureData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimestampedSignatureData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if len(m.SignatureData) > 0 { + i -= len(m.SignatureData) + copy(dAtA[i:], m.SignatureData) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.SignatureData))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignBytes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignBytes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignBytes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x2a + } + if m.DataType != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.DataType)) + i-- + dAtA[i] = 0x20 + } + if len(m.Diversifier) > 0 { + i -= len(m.Diversifier) + copy(dAtA[i:], m.Diversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Diversifier))) + i-- + dAtA[i] = 0x1a + } + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Sequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HeaderData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HeaderData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NewDiversifier) > 0 { + i -= len(m.NewDiversifier) + copy(dAtA[i:], m.NewDiversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.NewDiversifier))) + i-- + dAtA[i] = 0x12 + } + if m.NewPubKey != nil { + { + size, err := m.NewPubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClientState != nil { + { + size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConsensusStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ConsensusState != nil { + { + size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConnectionStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConnectionStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Connection != nil { + { + size, err := m.Connection.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ChannelStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChannelStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChannelStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Channel != nil { + { + size, err := m.Channel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PacketCommitmentData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketCommitmentData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketCommitmentData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Commitment) > 0 { + i -= len(m.Commitment) + copy(dAtA[i:], m.Commitment) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Commitment))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PacketAcknowledgementData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketAcknowledgementData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketAcknowledgementData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Acknowledgement) > 0 { + i -= len(m.Acknowledgement) + copy(dAtA[i:], m.Acknowledgement) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Acknowledgement))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PacketReceiptAbsenceData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketReceiptAbsenceData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketReceiptAbsenceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NextSequenceRecvData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NextSequenceRecvData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NextSequenceRecvData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NextSeqRecv != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.NextSeqRecv)) + i-- + dAtA[i] = 0x10 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSolomachine(dAtA []byte, offset int, v uint64) int { + offset -= sovSolomachine(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClientState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sequence != 0 { + n += 1 + sovSolomachine(uint64(m.Sequence)) + } + if m.FrozenSequence != 0 { + n += 1 + sovSolomachine(uint64(m.FrozenSequence)) + } + if m.ConsensusState != nil { + l = m.ConsensusState.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.AllowUpdateAfterProposal { + n += 2 + } + return n +} + +func (m *ConsensusState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PublicKey != nil { + l = m.PublicKey.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.Diversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + return n +} + +func (m *Header) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sequence != 0 { + n += 1 + sovSolomachine(uint64(m.Sequence)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.NewPublicKey != nil { + l = m.NewPublicKey.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.NewDiversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *Misbehaviour) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Sequence != 0 { + n += 1 + sovSolomachine(uint64(m.Sequence)) + } + if m.SignatureOne != nil { + l = m.SignatureOne.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.SignatureTwo != nil { + l = m.SignatureTwo.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *SignatureAndData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.DataType != 0 { + n += 1 + sovSolomachine(uint64(m.DataType)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + return n +} + +func (m *TimestampedSignatureData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SignatureData) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + return n +} + +func (m *SignBytes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sequence != 0 { + n += 1 + sovSolomachine(uint64(m.Sequence)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + l = len(m.Diversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.DataType != 0 { + n += 1 + sovSolomachine(uint64(m.DataType)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *HeaderData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NewPubKey != nil { + l = m.NewPubKey.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.NewDiversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ClientStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.ClientState != nil { + l = m.ClientState.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ConsensusStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.ConsensusState != nil { + l = m.ConsensusState.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ConnectionStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Connection != nil { + l = m.Connection.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ChannelStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Channel != nil { + l = m.Channel.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *PacketCommitmentData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.Commitment) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *PacketAcknowledgementData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.Acknowledgement) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *PacketReceiptAbsenceData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *NextSequenceRecvData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.NextSeqRecv != 0 { + n += 1 + sovSolomachine(uint64(m.NextSeqRecv)) + } + return n +} + +func sovSolomachine(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSolomachine(x uint64) (n int) { + return sovSolomachine(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ClientState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FrozenSequence", wireType) + } + m.FrozenSequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FrozenSequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusState == nil { + m.ConsensusState = &ConsensusState{} + } + if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowUpdateAfterProposal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowUpdateAfterProposal = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PublicKey == nil { + m.PublicKey = &types.Any{} + } + if err := m.PublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Diversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Diversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Header) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Header: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPublicKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewPublicKey == nil { + m.NewPublicKey = &types.Any{} + } + if err := m.NewPublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewDiversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NewDiversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Misbehaviour) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Misbehaviour: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Misbehaviour: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureOne", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignatureOne == nil { + m.SignatureOne = &SignatureAndData{} + } + if err := m.SignatureOne.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureTwo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignatureTwo == nil { + m.SignatureTwo = &SignatureAndData{} + } + if err := m.SignatureTwo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureAndData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureAndData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureAndData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataType", wireType) + } + m.DataType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataType |= DataType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimestampedSignatureData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimestampedSignatureData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimestampedSignatureData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureData", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignatureData = append(m.SignatureData[:0], dAtA[iNdEx:postIndex]...) + if m.SignatureData == nil { + m.SignatureData = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignBytes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignBytes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignBytes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Diversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Diversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataType", wireType) + } + m.DataType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataType |= DataType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeaderData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeaderData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeaderData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewPubKey == nil { + m.NewPubKey = &types.Any{} + } + if err := m.NewPubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewDiversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NewDiversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientState == nil { + m.ClientState = &types.Any{} + } + if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusState == nil { + m.ConsensusState = &types.Any{} + } + if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectionStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectionStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectionStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Connection == nil { + m.Connection = &types1.ConnectionEnd{} + } + if err := m.Connection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChannelStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChannelStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChannelStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Channel == nil { + m.Channel = &types2.Channel{} + } + if err := m.Channel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketCommitmentData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketCommitmentData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketCommitmentData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commitment", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commitment = append(m.Commitment[:0], dAtA[iNdEx:postIndex]...) + if m.Commitment == nil { + m.Commitment = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketAcknowledgementData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketAcknowledgementData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketAcknowledgementData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgement", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Acknowledgement = append(m.Acknowledgement[:0], dAtA[iNdEx:postIndex]...) + if m.Acknowledgement == nil { + m.Acknowledgement = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketReceiptAbsenceData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketReceiptAbsenceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketReceiptAbsenceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NextSequenceRecvData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NextSequenceRecvData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NextSequenceRecvData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextSeqRecv", wireType) + } + m.NextSeqRecv = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextSeqRecv |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSolomachine(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSolomachine + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSolomachine + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSolomachine + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSolomachine + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSolomachine + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSolomachine + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSolomachine = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSolomachine = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSolomachine = fmt.Errorf("proto: unexpected end of group") +) diff --git a/modules/core/02-client/legacy/v100/store.go b/modules/core/02-client/legacy/v100/store.go new file mode 100644 index 00000000000..842086f956b --- /dev/null +++ b/modules/core/02-client/legacy/v100/store.go @@ -0,0 +1,180 @@ +package v100 + +import ( + "fmt" + "strings" + + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + "github.com/cosmos/ibc-go/modules/core/02-client/types" + clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" + host "github.com/cosmos/ibc-go/modules/core/24-host" + "github.com/cosmos/ibc-go/modules/core/exported" + smtypes "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types" + ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" +) + +// MigrateStore performs in-place store migrations from SDK v0.40 of the IBC module to v1.0.0 of ibc-go. +// The migration includes: +// +// - Migrating solo machine client states from v1 to v2 protobuf definition +// - Pruning all solo machine consensus states +// - Pruning expired tendermint consensus states +// - Adds ProcessedHeight and Iteration keys for unexpired tendermint consensus states +func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) (err error) { + store := ctx.KVStore(storeKey) + iterator := sdk.KVStorePrefixIterator(store, host.KeyClientStorePrefix) + + var clients []string + + // collect all clients + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + keySplit := strings.Split(string(iterator.Key()), "/") + if keySplit[len(keySplit)-1] != host.KeyClientState { + continue + } + + // key is clients/{clientid}/clientState + // Thus, keySplit[1] is clientID + clients = append(clients, keySplit[1]) + } + + for _, clientID := range clients { + clientType, _, err := types.ParseClientIdentifier(clientID) + if err != nil { + return err + } + + clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, clientID)) + clientStore := prefix.NewStore(ctx.KVStore(storeKey), clientPrefix) + + bz := clientStore.Get(host.ClientStateKey()) + if bz == nil { + return clienttypes.ErrClientNotFound + } + + switch clientType { + case exported.Solomachine: + any := &codectypes.Any{} + if err := cdc.Unmarshal(bz, any); err != nil { + return sdkerrors.Wrap(err, "failed to unmarshal client state bytes into solo machine client state") + } + + clientState := &ClientState{} + if err := cdc.Unmarshal(any.Value, clientState); err != nil { + return sdkerrors.Wrap(err, "failed to unmarshal client state bytes into solo machine client state") + } + + updatedClientState := migrateSolomachine(clientState) + + bz, err := clienttypes.MarshalClientState(cdc, updatedClientState) + if err != nil { + return sdkerrors.Wrap(err, "failed to unmarshal client state bytes into solo machine client state") + } + + // update solomachine in store + clientStore.Set(host.ClientStateKey(), bz) + + pruneSolomachineConsensusStates(clientStore) + + case exported.Tendermint: + var clientState exported.ClientState + if err := cdc.UnmarshalInterface(bz, &clientState); err != nil { + return sdkerrors.Wrap(err, "failed to unmarshal client state bytes into tendermint client state") + } + + tmClientState, ok := clientState.(*ibctmtypes.ClientState) + if !ok { + return sdkerrors.Wrap(types.ErrInvalidClient, "client state is not tendermint even though client id contains 07-tendermint") + } + + // add iteration keys so pruning will be successful + if err = addConsensusMetadata(ctx, clientStore, cdc, tmClientState); err != nil { + return err + } + + if err = ibctmtypes.PruneAllExpiredConsensusStates(ctx, clientStore, cdc, tmClientState); err != nil { + return err + } + + default: + continue + } + } + + return nil +} + +// migrateSolomachine migrates the solomachine from v1 to v2 solo machine protobuf defintion. +func migrateSolomachine(clientState *ClientState) *smtypes.ClientState { + isFrozen := clientState.FrozenSequence != 0 + consensusState := &smtypes.ConsensusState{ + PublicKey: clientState.ConsensusState.PublicKey, + Diversifier: clientState.ConsensusState.Diversifier, + Timestamp: clientState.ConsensusState.Timestamp, + } + + return &smtypes.ClientState{ + Sequence: clientState.Sequence, + IsFrozen: isFrozen, + ConsensusState: consensusState, + AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal, + } +} + +// pruneSolomachineConsensusStates removes all solomachine consensus states from the +// client store. +func pruneSolomachineConsensusStates(clientStore sdk.KVStore) { + iterator := sdk.KVStorePrefixIterator(clientStore, []byte(host.KeyConsensusStatePrefix)) + var heights []exported.Height + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + keySplit := strings.Split(string(iterator.Key()), "/") + // key is in the format "consensusStates/" + if len(keySplit) != 2 || keySplit[0] != string(host.KeyConsensusStatePrefix) { + continue + } + + // collect consensus states to be pruned + heights = append(heights, types.MustParseHeight(keySplit[1])) + } + + // delete all consensus states + for _, height := range heights { + clientStore.Delete(host.ConsensusStateKey(height)) + } +} + +// addConsensusMetadata adds the iteration key and processed height for all tendermint consensus states +// These keys were not included in the previous release of the IBC module. Adding the iteration keys allows +// for pruning iteration. +func addConsensusMetadata(ctx sdk.Context, clientStore sdk.KVStore, cdc codec.BinaryCodec, clientState *ibctmtypes.ClientState) error { + var heights []exported.Height + iterator := sdk.KVStorePrefixIterator(clientStore, []byte(host.KeyConsensusStatePrefix)) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + keySplit := strings.Split(string(iterator.Key()), "/") + // consensus key is in the format "consensusStates/" + if len(keySplit) != 2 { + continue + } + + heights = append(heights, types.MustParseHeight(keySplit[1])) + } + + for _, height := range heights { + // set the iteration key and processed height + // these keys were not included in the SDK v0.42.0 release + ibctmtypes.SetProcessedHeight(clientStore, height, clienttypes.GetSelfHeight(ctx)) + ibctmtypes.SetIterationKey(clientStore, height) + } + + return nil +} diff --git a/modules/core/02-client/legacy/v100/store_test.go b/modules/core/02-client/legacy/v100/store_test.go new file mode 100644 index 00000000000..1b9856da183 --- /dev/null +++ b/modules/core/02-client/legacy/v100/store_test.go @@ -0,0 +1,231 @@ +package v100_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/suite" + + "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" + "github.com/cosmos/ibc-go/modules/core/02-client/types" + host "github.com/cosmos/ibc-go/modules/core/24-host" + "github.com/cosmos/ibc-go/modules/core/exported" + ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/ibc-go/testing" +) + +type LegacyTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain +} + +// TestLegacyTestSuite runs all the tests within this package. +func TestLegacyTestSuite(t *testing.T) { + suite.Run(t, new(LegacyTestSuite)) +} + +// SetupTest creates a coordinator with 2 test chains. +func (suite *LegacyTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1) + suite.coordinator.CommitNBlocks(suite.chainA, 2) + suite.coordinator.CommitNBlocks(suite.chainB, 2) +} + +// only test migration for solo machines +// ensure all client states are migrated and all consensus states +// are removed +func (suite *LegacyTestSuite) TestMigrateStoreSolomachine() { + path := ibctesting.NewPath(suite.chainA, suite.chainB) + + // create multiple legacy solo machine clients + solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-0", "testing", 1) + solomachineMulti := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4) + + // manually generate old proto buf definitions and set in store + // NOTE: we cannot use 'CreateClient' and 'UpdateClient' functions since we are + // using client states and consensus states which do not implement the exported.ClientState + // and exported.ConsensusState interface + for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} { + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(path.EndpointA.Chain.GetContext(), sm.ClientID) + clientState := sm.ClientState() + + var seq uint64 + if clientState.IsFrozen { + seq = 1 + } + + // generate old client state proto defintion + legacyClientState := &v100.ClientState{ + Sequence: clientState.Sequence, + FrozenSequence: seq, + ConsensusState: &v100.ConsensusState{ + PublicKey: clientState.ConsensusState.PublicKey, + Diversifier: clientState.ConsensusState.Diversifier, + Timestamp: clientState.ConsensusState.Timestamp, + }, + AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal, + } + + // set client state + bz, err := path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState) + suite.Require().NoError(err) + clientStore.Set(host.ClientStateKey(), bz) + + // set some consensus states + height1 := types.NewHeight(0, 1) + height2 := types.NewHeight(1, 2) + height3 := types.NewHeight(0, 123) + + bz, err = path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState.ConsensusState) + suite.Require().NoError(err) + clientStore.Set(host.ConsensusStateKey(height1), bz) + clientStore.Set(host.ConsensusStateKey(height2), bz) + clientStore.Set(host.ConsensusStateKey(height3), bz) + } + + // create tendermint clients + suite.coordinator.SetupClients(path) + + err := v100.MigrateStore(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path.EndpointA.Chain.App.AppCodec()) + suite.Require().NoError(err) + + // verify client state has been migrated + for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} { + clientState, ok := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.GetClientState(path.EndpointA.Chain.GetContext(), sm.ClientID) + suite.Require().True(ok) + suite.Require().Equal(sm.ClientState(), clientState) + } + + // verify consensus states have been removed + for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} { + clientConsensusStates := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.GetAllConsensusStates(path.EndpointA.Chain.GetContext()) + for _, client := range clientConsensusStates { + // GetAllConsensusStates should not return consensus states for our solo machine clients + suite.Require().NotEqual(sm.ClientID, client.ClientId) + } + } +} + +// only test migration for tendermint clients +// ensure all expired consensus states are removed from tendermint client stores +func (suite *LegacyTestSuite) TestMigrateStoreTendermint() { + // create path and setup clients + path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.SetupClients(path1) + + path2 := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.SetupClients(path2) + pruneHeightMap := make(map[*ibctesting.Path][]exported.Height) + unexpiredHeightMap := make(map[*ibctesting.Path][]exported.Height) + + for _, path := range []*ibctesting.Path{path1, path2} { + // collect all heights expected to be pruned + var pruneHeights []exported.Height + pruneHeights = append(pruneHeights, path.EndpointA.GetClientState().GetLatestHeight()) + + // these heights will be expired and also pruned + for i := 0; i < 3; i++ { + path.EndpointA.UpdateClient() + pruneHeights = append(pruneHeights, path.EndpointA.GetClientState().GetLatestHeight()) + } + + // double chedck all information is currently stored + for _, pruneHeight := range pruneHeights { + consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) + suite.Require().True(ok) + suite.Require().NotNil(consState) + + ctx := path.EndpointA.Chain.GetContext() + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) + + processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, pruneHeight) + suite.Require().True(ok) + suite.Require().NotNil(processedTime) + + processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, pruneHeight) + suite.Require().True(ok) + suite.Require().NotNil(processedHeight) + + expectedConsKey := ibctmtypes.GetIterationKey(clientStore, pruneHeight) + suite.Require().NotNil(expectedConsKey) + } + pruneHeightMap[path] = pruneHeights + } + + // Increment the time by a week + suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + + for _, path := range []*ibctesting.Path{path1, path2} { + // create the consensus state that can be used as trusted height for next update + var unexpiredHeights []exported.Height + path.EndpointA.UpdateClient() + unexpiredHeights = append(unexpiredHeights, path.EndpointA.GetClientState().GetLatestHeight()) + path.EndpointA.UpdateClient() + unexpiredHeights = append(unexpiredHeights, path.EndpointA.GetClientState().GetLatestHeight()) + + // remove processed height and iteration keys since these were missing from previous version of ibc module + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(path.EndpointA.Chain.GetContext(), path.EndpointA.ClientID) + for _, height := range unexpiredHeights { + clientStore.Delete(ibctmtypes.ProcessedHeightKey(height)) + clientStore.Delete(ibctmtypes.IterationKey(height)) + } + + unexpiredHeightMap[path] = unexpiredHeights + } + + // Increment the time by another week, then update the client. + // This will cause the consensus states created before the first time increment + // to be expired + suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + err := v100.MigrateStore(path1.EndpointA.Chain.GetContext(), path1.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path1.EndpointA.Chain.App.AppCodec()) + suite.Require().NoError(err) + + for _, path := range []*ibctesting.Path{path1, path2} { + ctx := path.EndpointA.Chain.GetContext() + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) + + // ensure everything has been pruned + for i, pruneHeight := range pruneHeightMap[path] { + consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) + suite.Require().False(ok, i) + suite.Require().Nil(consState, i) + + processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, pruneHeight) + suite.Require().False(ok, i) + suite.Require().Equal(uint64(0), processedTime, i) + + processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, pruneHeight) + suite.Require().False(ok, i) + suite.Require().Nil(processedHeight, i) + + expectedConsKey := ibctmtypes.GetIterationKey(clientStore, pruneHeight) + suite.Require().Nil(expectedConsKey, i) + } + + // ensure metadata is set for unexpired consensus state + for _, height := range unexpiredHeightMap[path] { + consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, height) + suite.Require().True(ok) + suite.Require().NotNil(consState) + + processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, height) + suite.Require().True(ok) + suite.Require().NotEqual(uint64(0), processedTime) + + processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, height) + suite.Require().True(ok) + suite.Require().Equal(types.GetSelfHeight(path.EndpointA.Chain.GetContext()), processedHeight) + + consKey := ibctmtypes.GetIterationKey(clientStore, height) + suite.Require().Equal(host.ConsensusStateKey(height), consKey) + } + } +} diff --git a/modules/core/03-connection/types/connection.pb.go b/modules/core/03-connection/types/connection.pb.go index 07577489103..6fbe5ba997f 100644 --- a/modules/core/03-connection/types/connection.pb.go +++ b/modules/core/03-connection/types/connection.pb.go @@ -356,9 +356,9 @@ var xxx_messageInfo_Version proto.InternalMessageInfo // Params defines the set of Connection parameters. type Params struct { - // maximum expected time per block, used to enforce block delay. This parameter should reflect the largest amount of - // time that the chain might reasonably take to produce the next block under normal operating conditions. A safe - // choice is 3-5x the expected time per block. + // maximum expected time per block (in nanoseconds), used to enforce block delay. This parameter should reflect the + // largest amount of time that the chain might reasonably take to produce the next block under normal operating + // conditions. A safe choice is 3-5x the expected time per block. MaxExpectedTimePerBlock uint64 `protobuf:"varint,1,opt,name=max_expected_time_per_block,json=maxExpectedTimePerBlock,proto3" json:"max_expected_time_per_block,omitempty" yaml:"max_expected_time_per_block"` } diff --git a/modules/core/03-connection/types/params.go b/modules/core/03-connection/types/params.go index 904bde60415..35677062fdb 100644 --- a/modules/core/03-connection/types/params.go +++ b/modules/core/03-connection/types/params.go @@ -7,7 +7,7 @@ import ( paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" ) -// DefaultTimePerBlock is the default value for maximum expected time per block. +// DefaultTimePerBlock is the default value for maximum expected time per block (in nanoseconds). const DefaultTimePerBlock = 30 * time.Second // KeyMaxExpectedTimePerBlock is store's key for MaxExpectedTimePerBlock parameter diff --git a/modules/core/exported/client.go b/modules/core/exported/client.go index 1578900af21..de4cbe48c8a 100644 --- a/modules/core/exported/client.go +++ b/modules/core/exported/client.go @@ -2,10 +2,9 @@ package exported import ( ics23 "github.com/confio/ics23/go" - proto "github.com/gogo/protobuf/proto" - "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" + proto "github.com/gogo/protobuf/proto" ) // Status represents the status of a client diff --git a/modules/core/keeper/migrations.go b/modules/core/keeper/migrations.go new file mode 100644 index 00000000000..c6691005bd3 --- /dev/null +++ b/modules/core/keeper/migrations.go @@ -0,0 +1,32 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + clientkeeper "github.com/cosmos/ibc-go/modules/core/02-client/keeper" +) + +// Migrator is a struct for handling in-place store migrations. +type Migrator struct { + keeper Keeper +} + +// NewMigrator returns a new Migrator. +func NewMigrator(keeper Keeper) Migrator { + return Migrator{keeper: keeper} +} + +// Migrate1to2 migrates from version 1 to 2. +// This migration prunes: +// - migrates solo machine client state from protobuf definition v1 to v2 +// - prunes solo machine consensus states +// - prunes expired tendermint consensus states +// - adds ProcessedHeight and Iteration keys for unexpired tendermint consensus states +func (m Migrator) Migrate1to2(ctx sdk.Context) error { + clientMigrator := clientkeeper.NewMigrator(m.keeper.ClientKeeper) + if err := clientMigrator.Migrate1to2(ctx); err != nil { + return err + } + + return nil +} diff --git a/modules/core/legacy/v100/genesis.go b/modules/core/legacy/v100/genesis.go new file mode 100644 index 00000000000..42932613499 --- /dev/null +++ b/modules/core/legacy/v100/genesis.go @@ -0,0 +1,54 @@ +package v100 + +import ( + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" + tmtypes "github.com/tendermint/tendermint/types" + + clientv100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" + clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" + connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types" + host "github.com/cosmos/ibc-go/modules/core/24-host" + "github.com/cosmos/ibc-go/modules/core/types" +) + +// MigrateGenesis accepts exported v1.0.0 IBC client genesis file and migrates it to: +// +// - Update solo machine client state protobuf definition (v1 to v2) +// - Remove all solo machine consensus states +// - Remove all expired tendermint consensus states +func MigrateGenesis(appState genutiltypes.AppMap, clientCtx client.Context, genDoc tmtypes.GenesisDoc, maxExpectedTimePerBlock uint64) (genutiltypes.AppMap, error) { + if appState[host.ModuleName] != nil { + // ensure legacy solo machines are registered + clientv100.RegisterInterfaces(clientCtx.InterfaceRegistry) + + // unmarshal relative source genesis application state + ibcGenState := &types.GenesisState{} + clientCtx.JSONCodec.MustUnmarshalJSON(appState[host.ModuleName], ibcGenState) + + clientGenState, err := clientv100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &ibcGenState.ClientGenesis, genDoc.GenesisTime, clienttypes.NewHeight(clienttypes.ParseChainID(genDoc.ChainID), uint64(genDoc.InitialHeight))) + if err != nil { + return nil, err + } + + ibcGenState.ClientGenesis = *clientGenState + + // set max expected time per block + connectionGenesis := connectiontypes.GenesisState{ + Connections: ibcGenState.ConnectionGenesis.Connections, + ClientConnectionPaths: ibcGenState.ConnectionGenesis.ClientConnectionPaths, + NextConnectionSequence: ibcGenState.ConnectionGenesis.NextConnectionSequence, + Params: connectiontypes.NewParams(maxExpectedTimePerBlock), + } + + ibcGenState.ConnectionGenesis = connectionGenesis + + // delete old genesis state + delete(appState, host.ModuleName) + + // set new ibc genesis state + appState[host.ModuleName] = clientCtx.JSONCodec.MustMarshalJSON(ibcGenState) + } + return appState, nil +} diff --git a/modules/core/legacy/v100/genesis_test.go b/modules/core/legacy/v100/genesis_test.go new file mode 100644 index 00000000000..d4e53d9fba2 --- /dev/null +++ b/modules/core/legacy/v100/genesis_test.go @@ -0,0 +1,178 @@ +package v100_test + +import ( + "testing" + + "github.com/cosmos/cosmos-sdk/client" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" + "github.com/stretchr/testify/suite" + tmtypes "github.com/tendermint/tendermint/types" + + ibcclient "github.com/cosmos/ibc-go/modules/core/02-client" + clientv100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100" + clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" + connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types" + host "github.com/cosmos/ibc-go/modules/core/24-host" + "github.com/cosmos/ibc-go/modules/core/legacy/v100" + "github.com/cosmos/ibc-go/modules/core/types" + ibctesting "github.com/cosmos/ibc-go/testing" + "github.com/cosmos/ibc-go/testing/simapp" +) + +type LegacyTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain +} + +// TestLegacyTestSuite runs all the tests within this package. +func TestLegacyTestSuite(t *testing.T) { + suite.Run(t, new(LegacyTestSuite)) +} + +// SetupTest creates a coordinator with 2 test chains. +func (suite *LegacyTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1) + suite.coordinator.CommitNBlocks(suite.chainA, 2) + suite.coordinator.CommitNBlocks(suite.chainB, 2) +} + +// NOTE: this test is mainly copied from 02-client/legacy/v100 +func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() { + path := ibctesting.NewPath(suite.chainA, suite.chainB) + encodingConfig := simapp.MakeTestEncodingConfig() + clientCtx := client.Context{}. + WithInterfaceRegistry(encodingConfig.InterfaceRegistry). + WithTxConfig(encodingConfig.TxConfig). + WithJSONCodec(encodingConfig.Marshaler) + + // create multiple legacy solo machine clients + solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-0", "testing", 1) + solomachineMulti := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4) + + // create tendermint clients + // NOTE: only 1 set of metadata is created, we aren't testing ordering + // The purpose of this test is to ensure the genesis states can be marshalled/unmarshalled + suite.coordinator.SetupClients(path) + clientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) + + // manually generate old proto buf definitions and set in genesis + // NOTE: we cannot use 'ExportGenesis' for the solo machines since we are + // using client states and consensus states which do not implement the exported.ClientState + // and exported.ConsensusState interface + var clients []clienttypes.IdentifiedClientState + for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} { + clientState := sm.ClientState() + + var seq uint64 + if clientState.IsFrozen { + seq = 1 + } + + // generate old client state proto defintion + legacyClientState := &clientv100.ClientState{ + Sequence: clientState.Sequence, + FrozenSequence: seq, + ConsensusState: &clientv100.ConsensusState{ + PublicKey: clientState.ConsensusState.PublicKey, + Diversifier: clientState.ConsensusState.Diversifier, + Timestamp: clientState.ConsensusState.Timestamp, + }, + AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal, + } + + // set client state + any, err := codectypes.NewAnyWithValue(legacyClientState) + suite.Require().NoError(err) + suite.Require().NotNil(any) + client := clienttypes.IdentifiedClientState{ + ClientId: sm.ClientID, + ClientState: any, + } + clients = append(clients, client) + + // set in store for ease of determining expected genesis + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(path.EndpointA.Chain.GetContext(), sm.ClientID) + bz, err := path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState) + suite.Require().NoError(err) + clientStore.Set(host.ClientStateKey(), bz) + + // set some consensus states + height1 := clienttypes.NewHeight(0, 1) + height2 := clienttypes.NewHeight(1, 2) + height3 := clienttypes.NewHeight(0, 123) + + any, err = codectypes.NewAnyWithValue(legacyClientState.ConsensusState) + suite.Require().NoError(err) + suite.Require().NotNil(any) + consensusState1 := clienttypes.ConsensusStateWithHeight{ + Height: height1, + ConsensusState: any, + } + consensusState2 := clienttypes.ConsensusStateWithHeight{ + Height: height2, + ConsensusState: any, + } + consensusState3 := clienttypes.ConsensusStateWithHeight{ + Height: height3, + ConsensusState: any, + } + + clientConsensusState := clienttypes.ClientConsensusStates{ + ClientId: sm.ClientID, + ConsensusStates: []clienttypes.ConsensusStateWithHeight{consensusState1, consensusState2, consensusState3}, + } + + clientGenState.ClientsConsensus = append(clientGenState.ClientsConsensus, clientConsensusState) + + // set in store for ease of determining expected genesis + bz, err = path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState.ConsensusState) + suite.Require().NoError(err) + clientStore.Set(host.ConsensusStateKey(height1), bz) + clientStore.Set(host.ConsensusStateKey(height2), bz) + clientStore.Set(host.ConsensusStateKey(height3), bz) + } + // solo machine clients must come before tendermint in expected + clientGenState.Clients = append(clients, clientGenState.Clients...) + + // migrate store get expected genesis + // store migration and genesis migration should produce identical results + err := clientv100.MigrateStore(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path.EndpointA.Chain.App.AppCodec()) + suite.Require().NoError(err) + expectedClientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper) + + // NOTE: these lines are added in comparison to 02-client/legacy/v100 + // generate appState with old ibc genesis state + appState := genutiltypes.AppMap{} + ibcGenState := types.DefaultGenesisState() + ibcGenState.ClientGenesis = clientGenState + clientv100.RegisterInterfaces(clientCtx.InterfaceRegistry) + appState[host.ModuleName] = clientCtx.JSONCodec.MustMarshalJSON(ibcGenState) + genDoc := tmtypes.GenesisDoc{ + ChainID: suite.chainA.ChainID, + GenesisTime: suite.coordinator.CurrentTime, + InitialHeight: suite.chainA.GetContext().BlockHeight(), + } + + // NOTE: genesis time isn't updated since we aren't testing for tendermint consensus state pruning + migrated, err := v100.MigrateGenesis(appState, clientCtx, genDoc, uint64(connectiontypes.DefaultTimePerBlock)) + suite.Require().NoError(err) + + expectedAppState := genutiltypes.AppMap{} + expectedIBCGenState := types.DefaultGenesisState() + expectedIBCGenState.ClientGenesis = expectedClientGenState + + bz, err := clientCtx.JSONCodec.MarshalJSON(expectedIBCGenState) + suite.Require().NoError(err) + expectedAppState[host.ModuleName] = bz + + suite.Require().Equal(expectedAppState, migrated) +} diff --git a/modules/core/module.go b/modules/core/module.go index 1d338dcb69e..db7aaba3e33 100644 --- a/modules/core/module.go +++ b/modules/core/module.go @@ -19,6 +19,7 @@ import ( "github.com/cosmos/cosmos-sdk/types/module" simtypes "github.com/cosmos/cosmos-sdk/types/simulation" ibcclient "github.com/cosmos/ibc-go/modules/core/02-client" + clientkeeper "github.com/cosmos/ibc-go/modules/core/02-client/keeper" clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types" channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types" @@ -136,6 +137,9 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { connectiontypes.RegisterMsgServer(cfg.MsgServer(), am.keeper) channeltypes.RegisterMsgServer(cfg.MsgServer(), am.keeper) types.RegisterQueryService(cfg.QueryServer(), am.keeper) + + m := clientkeeper.NewMigrator(am.keeper.ClientKeeper) + cfg.RegisterMigration(host.ModuleName, 1, m.Migrate1to2) } // InitGenesis performs genesis initialization for the ibc module. It returns @@ -157,7 +161,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw } // ConsensusVersion implements AppModule/ConsensusVersion. -func (AppModule) ConsensusVersion() uint64 { return 1 } +func (AppModule) ConsensusVersion() uint64 { return 2 } // BeginBlock returns the begin blocker for the ibc module. func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { diff --git a/modules/light-clients/07-tendermint/types/store.go b/modules/light-clients/07-tendermint/types/store.go index 6e1d63ec65c..dbb3fede83e 100644 --- a/modules/light-clients/07-tendermint/types/store.go +++ b/modules/light-clients/07-tendermint/types/store.go @@ -281,6 +281,42 @@ func GetPreviousConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, h return getTmConsensusState(clientStore, cdc, csKey) } +// PruneAllExpiredConsensusStates iterates over all consensus states for a given +// client store. If a consensus state is expired, it is deleted and its metadata +// is deleted. +func PruneAllExpiredConsensusStates( + ctx sdk.Context, clientStore sdk.KVStore, + cdc codec.BinaryCodec, clientState *ClientState, +) (err error) { + var heights []exported.Height + + pruneCb := func(height exported.Height) bool { + consState, err := GetConsensusState(clientStore, cdc, height) + // this error should never occur + if err != nil { + return true + } + + if clientState.IsExpired(consState.Timestamp, ctx.BlockTime()) { + heights = append(heights, height) + } + + return false + } + + IterateConsensusStateAscending(clientStore, pruneCb) + if err != nil { + return err + } + + for _, height := range heights { + deleteConsensusState(clientStore, height) + deleteConsensusMetadata(clientStore, height) + } + + return nil +} + // Helper function for GetNextConsensusState and GetPreviousConsensusState func getTmConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, key []byte) (*ConsensusState, bool) { bz := clientStore.Get(key) diff --git a/modules/light-clients/07-tendermint/types/tendermint.pb.go b/modules/light-clients/07-tendermint/types/tendermint.pb.go index 84a79b66668..9a0645a4462 100644 --- a/modules/light-clients/07-tendermint/types/tendermint.pb.go +++ b/modules/light-clients/07-tendermint/types/tendermint.pb.go @@ -11,10 +11,10 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - _ "github.com/golang/protobuf/ptypes/duration" - _ "github.com/golang/protobuf/ptypes/timestamp" github_com_tendermint_tendermint_libs_bytes "github.com/tendermint/tendermint/libs/bytes" types2 "github.com/tendermint/tendermint/proto/tendermint/types" + _ "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/timestamppb" io "io" math "math" math_bits "math/bits" diff --git a/modules/light-clients/07-tendermint/types/update.go b/modules/light-clients/07-tendermint/types/update.go index c70746b4f74..c2e6788fed1 100644 --- a/modules/light-clients/07-tendermint/types/update.go +++ b/modules/light-clients/07-tendermint/types/update.go @@ -134,7 +134,6 @@ func (cs ClientState) CheckHeaderAndUpdateState( } // if pruneHeight is set, delete consensus state and metadata if pruneHeight != nil { - deleteConsensusState(clientStore, pruneHeight) deleteConsensusMetadata(clientStore, pruneHeight) } diff --git a/modules/light-clients/07-tendermint/types/update_test.go b/modules/light-clients/07-tendermint/types/update_test.go index b93168b5166..db074eee5d2 100644 --- a/modules/light-clients/07-tendermint/types/update_test.go +++ b/modules/light-clients/07-tendermint/types/update_test.go @@ -400,6 +400,8 @@ func (suite *TendermintTestSuite) TestPruneConsensusState() { clientStore = path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) expectedProcessTime, ok := types.GetProcessedTime(clientStore, expiredHeight) suite.Require().True(ok) + expectedProcessHeight, ok := types.GetProcessedHeight(clientStore, expiredHeight) + suite.Require().True(ok) expectedConsKey := types.GetIterationKey(clientStore, expiredHeight) suite.Require().NotNil(expectedConsKey) @@ -425,6 +427,10 @@ func (suite *TendermintTestSuite) TestPruneConsensusState() { processTime, ok := types.GetProcessedTime(clientStore, pruneHeight) suite.Require().Equal(uint64(0), processTime, "processed time metadata not pruned") suite.Require().False(ok) + processHeight, ok := types.GetProcessedHeight(clientStore, pruneHeight) + suite.Require().Nil(processHeight, "processed height metadata not pruned") + suite.Require().False(ok) + // check iteration key metadata is pruned consKey := types.GetIterationKey(clientStore, pruneHeight) suite.Require().Nil(consKey, "iteration key not pruned") @@ -438,6 +444,12 @@ func (suite *TendermintTestSuite) TestPruneConsensusState() { processTime, ok = types.GetProcessedTime(clientStore, expiredHeight) suite.Require().Equal(expectedProcessTime, processTime, "processed time metadata incorrectly pruned") suite.Require().True(ok) + + // check processed height metadata is not pruned + processHeight, ok = types.GetProcessedHeight(clientStore, expiredHeight) + suite.Require().Equal(expectedProcessHeight, processHeight, "processed height metadata incorrectly pruned") + suite.Require().True(ok) + // check iteration key metadata is not pruned consKey = types.GetIterationKey(clientStore, expiredHeight) suite.Require().Equal(expectedConsKey, consKey, "iteration key incorrectly pruned") diff --git a/proto/ibc/core/connection/v1/connection.proto b/proto/ibc/core/connection/v1/connection.proto index e09f1529d92..72c0ff7daa0 100644 --- a/proto/ibc/core/connection/v1/connection.proto +++ b/proto/ibc/core/connection/v1/connection.proto @@ -107,8 +107,8 @@ message Version { // Params defines the set of Connection parameters. message Params { - // maximum expected time per block, used to enforce block delay. This parameter should reflect the largest amount of - // time that the chain might reasonably take to produce the next block under normal operating conditions. A safe - // choice is 3-5x the expected time per block. + // maximum expected time per block (in nanoseconds), used to enforce block delay. This parameter should reflect the + // largest amount of time that the chain might reasonably take to produce the next block under normal operating + // conditions. A safe choice is 3-5x the expected time per block. uint64 max_expected_time_per_block = 1 [(gogoproto.moretags) = "yaml:\"max_expected_time_per_block\""]; } diff --git a/proto/ibc/lightclients/solomachine/v1/solomachine.proto b/proto/ibc/lightclients/solomachine/v1/solomachine.proto new file mode 100644 index 00000000000..4ba0da259a7 --- /dev/null +++ b/proto/ibc/lightclients/solomachine/v1/solomachine.proto @@ -0,0 +1,189 @@ +syntax = "proto3"; + +package ibc.lightclients.solomachine.v1; + +option go_package = "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100"; + +import "ibc/core/connection/v1/connection.proto"; +import "ibc/core/channel/v1/channel.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; + +// ClientState defines a solo machine client that tracks the current consensus +// state and if the client is frozen. +message ClientState { + option (gogoproto.goproto_getters) = false; + // latest sequence of the client state + uint64 sequence = 1; + // frozen sequence of the solo machine + uint64 frozen_sequence = 2 [(gogoproto.moretags) = "yaml:\"frozen_sequence\""]; + ConsensusState consensus_state = 3 [(gogoproto.moretags) = "yaml:\"consensus_state\""]; + // when set to true, will allow governance to update a solo machine client. + // The client will be unfrozen if it is frozen. + bool allow_update_after_proposal = 4 [(gogoproto.moretags) = "yaml:\"allow_update_after_proposal\""]; +} + +// ConsensusState defines a solo machine consensus state. The sequence of a +// consensus state is contained in the "height" key used in storing the +// consensus state. +message ConsensusState { + option (gogoproto.goproto_getters) = false; + // public key of the solo machine + google.protobuf.Any public_key = 1 [(gogoproto.moretags) = "yaml:\"public_key\""]; + // diversifier allows the same public key to be re-used across different solo + // machine clients (potentially on different chains) without being considered + // misbehaviour. + string diversifier = 2; + uint64 timestamp = 3; +} + +// Header defines a solo machine consensus header +message Header { + option (gogoproto.goproto_getters) = false; + // sequence to update solo machine public key at + uint64 sequence = 1; + uint64 timestamp = 2; + bytes signature = 3; + google.protobuf.Any new_public_key = 4 [(gogoproto.moretags) = "yaml:\"new_public_key\""]; + string new_diversifier = 5 [(gogoproto.moretags) = "yaml:\"new_diversifier\""]; +} + +// Misbehaviour defines misbehaviour for a solo machine which consists +// of a sequence and two signatures over different messages at that sequence. +message Misbehaviour { + option (gogoproto.goproto_getters) = false; + string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""]; + uint64 sequence = 2; + SignatureAndData signature_one = 3 [(gogoproto.moretags) = "yaml:\"signature_one\""]; + SignatureAndData signature_two = 4 [(gogoproto.moretags) = "yaml:\"signature_two\""]; +} + +// SignatureAndData contains a signature and the data signed over to create that +// signature. +message SignatureAndData { + option (gogoproto.goproto_getters) = false; + bytes signature = 1; + DataType data_type = 2 [(gogoproto.moretags) = "yaml:\"data_type\""]; + bytes data = 3; + uint64 timestamp = 4; +} + +// TimestampedSignatureData contains the signature data and the timestamp of the +// signature. +message TimestampedSignatureData { + option (gogoproto.goproto_getters) = false; + bytes signature_data = 1 [(gogoproto.moretags) = "yaml:\"signature_data\""]; + uint64 timestamp = 2; +} + +// SignBytes defines the signed bytes used for signature verification. +message SignBytes { + option (gogoproto.goproto_getters) = false; + + uint64 sequence = 1; + uint64 timestamp = 2; + string diversifier = 3; + // type of the data used + DataType data_type = 4 [(gogoproto.moretags) = "yaml:\"data_type\""]; + // marshaled data + bytes data = 5; +} + +// DataType defines the type of solo machine proof being created. This is done +// to preserve uniqueness of different data sign byte encodings. +enum DataType { + option (gogoproto.goproto_enum_prefix) = false; + + // Default State + DATA_TYPE_UNINITIALIZED_UNSPECIFIED = 0 [(gogoproto.enumvalue_customname) = "UNSPECIFIED"]; + // Data type for client state verification + DATA_TYPE_CLIENT_STATE = 1 [(gogoproto.enumvalue_customname) = "CLIENT"]; + // Data type for consensus state verification + DATA_TYPE_CONSENSUS_STATE = 2 [(gogoproto.enumvalue_customname) = "CONSENSUS"]; + // Data type for connection state verification + DATA_TYPE_CONNECTION_STATE = 3 [(gogoproto.enumvalue_customname) = "CONNECTION"]; + // Data type for channel state verification + DATA_TYPE_CHANNEL_STATE = 4 [(gogoproto.enumvalue_customname) = "CHANNEL"]; + // Data type for packet commitment verification + DATA_TYPE_PACKET_COMMITMENT = 5 [(gogoproto.enumvalue_customname) = "PACKETCOMMITMENT"]; + // Data type for packet acknowledgement verification + DATA_TYPE_PACKET_ACKNOWLEDGEMENT = 6 [(gogoproto.enumvalue_customname) = "PACKETACKNOWLEDGEMENT"]; + // Data type for packet receipt absence verification + DATA_TYPE_PACKET_RECEIPT_ABSENCE = 7 [(gogoproto.enumvalue_customname) = "PACKETRECEIPTABSENCE"]; + // Data type for next sequence recv verification + DATA_TYPE_NEXT_SEQUENCE_RECV = 8 [(gogoproto.enumvalue_customname) = "NEXTSEQUENCERECV"]; + // Data type for header verification + DATA_TYPE_HEADER = 9 [(gogoproto.enumvalue_customname) = "HEADER"]; +} + +// HeaderData returns the SignBytes data for update verification. +message HeaderData { + option (gogoproto.goproto_getters) = false; + + // header public key + google.protobuf.Any new_pub_key = 1 [(gogoproto.moretags) = "yaml:\"new_pub_key\""]; + // header diversifier + string new_diversifier = 2 [(gogoproto.moretags) = "yaml:\"new_diversifier\""]; +} + +// ClientStateData returns the SignBytes data for client state verification. +message ClientStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + google.protobuf.Any client_state = 2 [(gogoproto.moretags) = "yaml:\"client_state\""]; +} + +// ConsensusStateData returns the SignBytes data for consensus state +// verification. +message ConsensusStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + google.protobuf.Any consensus_state = 2 [(gogoproto.moretags) = "yaml:\"consensus_state\""]; +} + +// ConnectionStateData returns the SignBytes data for connection state +// verification. +message ConnectionStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + ibc.core.connection.v1.ConnectionEnd connection = 2; +} + +// ChannelStateData returns the SignBytes data for channel state +// verification. +message ChannelStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + ibc.core.channel.v1.Channel channel = 2; +} + +// PacketCommitmentData returns the SignBytes data for packet commitment +// verification. +message PacketCommitmentData { + bytes path = 1; + bytes commitment = 2; +} + +// PacketAcknowledgementData returns the SignBytes data for acknowledgement +// verification. +message PacketAcknowledgementData { + bytes path = 1; + bytes acknowledgement = 2; +} + +// PacketReceiptAbsenceData returns the SignBytes data for +// packet receipt absence verification. +message PacketReceiptAbsenceData { + bytes path = 1; +} + +// NextSequenceRecvData returns the SignBytes data for verification of the next +// sequence to be received. +message NextSequenceRecvData { + bytes path = 1; + uint64 next_seq_recv = 2 [(gogoproto.moretags) = "yaml:\"next_seq_recv\""]; +}