From 138ce145dcd0144597dee5d7e22593d4f79cf1dc Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 15 May 2024 14:51:20 -0400 Subject: [PATCH 001/102] Fix pebbledb memory corruption (#3020) --- database/pebble/batch.go | 24 ++++++++---------------- database/pebble/batch_test.go | 2 +- database/pebble/db.go | 22 ++++++---------------- database/pebble/db_test.go | 2 +- database/pebble/iterator.go | 17 +++++++++++------ 5 files changed, 27 insertions(+), 40 deletions(-) diff --git a/database/pebble/batch.go b/database/pebble/batch.go index a53b962dc7be..8778a9473960 100644 --- a/database/pebble/batch.go +++ b/database/pebble/batch.go @@ -56,26 +56,18 @@ func (b *batch) Write() error { return database.ErrClosed } - if !b.written { - // This batch has not been written to the database yet. - if err := updateError(b.batch.Commit(pebble.Sync)); err != nil { + if b.written { + // pebble doesn't support writing a batch twice so we have to clone the + // batch before writing it. + newBatch := b.db.pebbleDB.NewBatch() + if err := newBatch.Apply(b.batch, nil); err != nil { return err } - b.written = true - return nil + b.batch = newBatch } - // pebble doesn't support writing a batch twice so we have to clone - // [b] and commit the clone. - batchClone := b.db.pebbleDB.NewBatch() - - // Copy the batch. - if err := batchClone.Apply(b.batch, nil); err != nil { - return err - } - - // Commit the new batch. - return updateError(batchClone.Commit(pebble.Sync)) + b.written = true + return updateError(b.batch.Commit(pebble.Sync)) } func (b *batch) Reset() { diff --git a/database/pebble/batch_test.go b/database/pebble/batch_test.go index 3d657a874fd3..4fcc537d1e84 100644 --- a/database/pebble/batch_test.go +++ b/database/pebble/batch_test.go @@ -17,7 +17,7 @@ func TestBatch(t *testing.T) { require := require.New(t) dirName := t.TempDir() - db, err := New(dirName, DefaultConfigBytes, logging.NoLog{}, "", prometheus.NewRegistry()) + db, err := New(dirName, nil, logging.NoLog{}, "", prometheus.NewRegistry()) require.NoError(err) batchIntf := db.NewBatch() diff --git a/database/pebble/db.go b/database/pebble/db.go index 77259a217d87..8e99e0690b64 100644 --- a/database/pebble/db.go +++ b/database/pebble/db.go @@ -4,7 +4,6 @@ package pebble import ( - "bytes" "context" "encoding/json" "errors" @@ -44,18 +43,8 @@ var ( MaxOpenFiles: 4096, MaxConcurrentCompactions: 1, } - - DefaultConfigBytes []byte ) -func init() { - var err error - DefaultConfigBytes, err = json.Marshal(DefaultConfig) - if err != nil { - panic(err) - } -} - type Database struct { lock sync.RWMutex pebbleDB *pebble.DB @@ -200,9 +189,10 @@ func (db *Database) Compact(start []byte, end []byte) error { } if end == nil { - // The database.Database spec treats a nil [limit] as a key after all keys - // but pebble treats a nil [limit] as a key before all keys in Compact. - // Use the greatest key in the database as the [limit] to get the desired behavior. + // The database.Database spec treats a nil [limit] as a key after all + // keys but pebble treats a nil [limit] as a key before all keys in + // Compact. Use the greatest key in the database as the [limit] to get + // the desired behavior. it := db.pebbleDB.NewIter(&pebble.IterOptions{}) if !it.Last() { @@ -210,7 +200,7 @@ func (db *Database) Compact(start []byte, end []byte) error { return it.Close() } - end = it.Key() + end = slices.Clone(it.Key()) if err := it.Close(); err != nil { return err } @@ -273,7 +263,7 @@ func keyRange(start, prefix []byte) *pebble.IterOptions { LowerBound: prefix, UpperBound: prefixToUpperBound(prefix), } - if bytes.Compare(start, prefix) == 1 { + if pebble.DefaultComparer.Compare(start, prefix) == 1 { opt.LowerBound = start } return opt diff --git a/database/pebble/db_test.go b/database/pebble/db_test.go index ec6dd3e0fa2d..3b37d9362d92 100644 --- a/database/pebble/db_test.go +++ b/database/pebble/db_test.go @@ -16,7 +16,7 @@ import ( func newDB(t testing.TB) *Database { folder := t.TempDir() - db, err := New(folder, DefaultConfigBytes, logging.NoLog{}, "pebble", prometheus.NewRegistry()) + db, err := New(folder, nil, logging.NoLog{}, "pebble", prometheus.NewRegistry()) require.NoError(t, err) return db.(*Database) } diff --git a/database/pebble/iterator.go b/database/pebble/iterator.go index ab7d8aad11a3..40654dc41d98 100644 --- a/database/pebble/iterator.go +++ b/database/pebble/iterator.go @@ -17,7 +17,7 @@ import ( var ( _ database.Iterator = (*iter)(nil) - errCouldntGetValue = errors.New("couldnt get iterator value") + errCouldNotGetValue = errors.New("could not get iterator value") ) type iter struct { @@ -63,16 +63,16 @@ func (it *iter) Next() bool { return false } - it.nextKey = it.iter.Key() - - var err error - it.nextVal, err = it.iter.ValueAndErr() + key := it.iter.Key() + value, err := it.iter.ValueAndErr() if err != nil { it.hasNext = false - it.err = fmt.Errorf("%w: %w", errCouldntGetValue, err) + it.err = fmt.Errorf("%w: %w", errCouldNotGetValue, err) return false } + it.nextKey = key + it.nextVal = value return true } @@ -122,6 +122,11 @@ func (it *iter) release() { return } + // Cloning these values ensures that calling it.Key() or it.Value() after + // releasing the iterator will not segfault. + it.nextKey = slices.Clone(it.nextKey) + it.nextVal = slices.Clone(it.nextVal) + // Remove the iterator from the list of open iterators. it.db.openIterators.Remove(it) From 0928176698a743af837ec040b011231872821acc Mon Sep 17 00:00:00 2001 From: Tsachi Herman <24438559+tsachiherman@users.noreply.github.com> Date: Thu, 16 May 2024 15:12:24 -0400 Subject: [PATCH 002/102] [vms/avm] fix linter error in benchmark : Use of weak random number generator (#3023) --- config/config_test.go | 8 ++- config/keys.go | 110 +++++++++++++++++---------------- vms/avm/vm_benchmark_test.go | 10 +-- vms/platformvm/service_test.go | 5 +- 4 files changed, 70 insertions(+), 63 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index 820796714b72..68847ca4f6d5 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -22,6 +22,8 @@ import ( "github.com/ava-labs/avalanchego/subnets" ) +const chainConfigFilenameExtention = ".ex" + func TestGetChainConfigsFromFiles(t *testing.T) { tests := map[string]struct { configs map[string]string @@ -72,11 +74,11 @@ func TestGetChainConfigsFromFiles(t *testing.T) { // Create custom configs for key, value := range test.configs { chainDir := filepath.Join(chainsDir, key) - setupFile(t, chainDir, chainConfigFileName+".ex", value) //nolint:goconst + setupFile(t, chainDir, chainConfigFileName+chainConfigFilenameExtention, value) } for key, value := range test.upgrades { chainDir := filepath.Join(chainsDir, key) - setupFile(t, chainDir, chainUpgradeFileName+".ex", value) + setupFile(t, chainDir, chainUpgradeFileName+chainConfigFilenameExtention, value) } v := setupViper(configFile) @@ -161,7 +163,7 @@ func TestSetChainConfigDefaultDir(t *testing.T) { require.Equal(defaultChainConfigDir, v.GetString(ChainConfigDirKey)) chainsDir := filepath.Join(defaultChainConfigDir, "C") - setupFile(t, chainsDir, chainConfigFileName+".ex", "helloworld") + setupFile(t, chainsDir, chainConfigFileName+chainConfigFilenameExtention, "helloworld") chainConfigs, err := getChainConfigs(v) require.NoError(err) expected := map[string]chains.ChainConfig{"C": {Config: []byte("helloworld"), Upgrade: []byte(nil)}} diff --git a/config/keys.go b/config/keys.go index 560b08774fa7..25348ae54c8d 100644 --- a/config/keys.go +++ b/config/keys.go @@ -3,60 +3,64 @@ package config -// #nosec G101 +// the HTTPWriteTimeoutKey was moved here so that it would not generate the +// false-positive linter error "G101: Potential hardcoded credentials" when running golangci-lint. +const HTTPWriteTimeoutKey = "http-write-timeout" // #nosec G101 + const ( - DataDirKey = "data-dir" - ConfigFileKey = "config-file" - ConfigContentKey = "config-file-content" - ConfigContentTypeKey = "config-file-content-type" - VersionKey = "version" - GenesisFileKey = "genesis-file" - GenesisFileContentKey = "genesis-file-content" - NetworkNameKey = "network-id" - ACPSupportKey = "acp-support" - ACPObjectKey = "acp-object" - TxFeeKey = "tx-fee" - CreateAssetTxFeeKey = "create-asset-tx-fee" - CreateSubnetTxFeeKey = "create-subnet-tx-fee" - TransformSubnetTxFeeKey = "transform-subnet-tx-fee" - CreateBlockchainTxFeeKey = "create-blockchain-tx-fee" - AddPrimaryNetworkValidatorFeeKey = "add-primary-network-validator-fee" - AddPrimaryNetworkDelegatorFeeKey = "add-primary-network-delegator-fee" - AddSubnetValidatorFeeKey = "add-subnet-validator-fee" - AddSubnetDelegatorFeeKey = "add-subnet-delegator-fee" - UptimeRequirementKey = "uptime-requirement" - MinValidatorStakeKey = "min-validator-stake" - MaxValidatorStakeKey = "max-validator-stake" - MinDelegatorStakeKey = "min-delegator-stake" - MinDelegatorFeeKey = "min-delegation-fee" - MinStakeDurationKey = "min-stake-duration" - MaxStakeDurationKey = "max-stake-duration" - StakeMaxConsumptionRateKey = "stake-max-consumption-rate" - StakeMinConsumptionRateKey = "stake-min-consumption-rate" - StakeMintingPeriodKey = "stake-minting-period" - StakeSupplyCapKey = "stake-supply-cap" - DBTypeKey = "db-type" - DBReadOnlyKey = "db-read-only" - DBPathKey = "db-dir" - DBConfigFileKey = "db-config-file" - DBConfigContentKey = "db-config-file-content" - PublicIPKey = "public-ip" - PublicIPResolutionFreqKey = "public-ip-resolution-frequency" - PublicIPResolutionServiceKey = "public-ip-resolution-service" - HTTPHostKey = "http-host" - HTTPPortKey = "http-port" - HTTPSEnabledKey = "http-tls-enabled" - HTTPSKeyFileKey = "http-tls-key-file" - HTTPSKeyContentKey = "http-tls-key-file-content" - HTTPSCertFileKey = "http-tls-cert-file" - HTTPSCertContentKey = "http-tls-cert-file-content" - HTTPAllowedOrigins = "http-allowed-origins" - HTTPAllowedHostsKey = "http-allowed-hosts" - HTTPShutdownTimeoutKey = "http-shutdown-timeout" - HTTPShutdownWaitKey = "http-shutdown-wait" - HTTPReadTimeoutKey = "http-read-timeout" - HTTPReadHeaderTimeoutKey = "http-read-header-timeout" - HTTPWriteTimeoutKey = "http-write-timeout" + DataDirKey = "data-dir" + ConfigFileKey = "config-file" + ConfigContentKey = "config-file-content" + ConfigContentTypeKey = "config-file-content-type" + VersionKey = "version" + GenesisFileKey = "genesis-file" + GenesisFileContentKey = "genesis-file-content" + NetworkNameKey = "network-id" + ACPSupportKey = "acp-support" + ACPObjectKey = "acp-object" + TxFeeKey = "tx-fee" + CreateAssetTxFeeKey = "create-asset-tx-fee" + CreateSubnetTxFeeKey = "create-subnet-tx-fee" + TransformSubnetTxFeeKey = "transform-subnet-tx-fee" + CreateBlockchainTxFeeKey = "create-blockchain-tx-fee" + AddPrimaryNetworkValidatorFeeKey = "add-primary-network-validator-fee" + AddPrimaryNetworkDelegatorFeeKey = "add-primary-network-delegator-fee" + AddSubnetValidatorFeeKey = "add-subnet-validator-fee" + AddSubnetDelegatorFeeKey = "add-subnet-delegator-fee" + UptimeRequirementKey = "uptime-requirement" + MinValidatorStakeKey = "min-validator-stake" + MaxValidatorStakeKey = "max-validator-stake" + MinDelegatorStakeKey = "min-delegator-stake" + MinDelegatorFeeKey = "min-delegation-fee" + MinStakeDurationKey = "min-stake-duration" + MaxStakeDurationKey = "max-stake-duration" + StakeMaxConsumptionRateKey = "stake-max-consumption-rate" + StakeMinConsumptionRateKey = "stake-min-consumption-rate" + StakeMintingPeriodKey = "stake-minting-period" + StakeSupplyCapKey = "stake-supply-cap" + DBTypeKey = "db-type" + DBReadOnlyKey = "db-read-only" + DBPathKey = "db-dir" + DBConfigFileKey = "db-config-file" + DBConfigContentKey = "db-config-file-content" + PublicIPKey = "public-ip" + PublicIPResolutionFreqKey = "public-ip-resolution-frequency" + PublicIPResolutionServiceKey = "public-ip-resolution-service" + HTTPHostKey = "http-host" + HTTPPortKey = "http-port" + HTTPSEnabledKey = "http-tls-enabled" + HTTPSKeyFileKey = "http-tls-key-file" + HTTPSKeyContentKey = "http-tls-key-file-content" + HTTPSCertFileKey = "http-tls-cert-file" + HTTPSCertContentKey = "http-tls-cert-file-content" + + HTTPAllowedOrigins = "http-allowed-origins" + HTTPAllowedHostsKey = "http-allowed-hosts" + HTTPShutdownTimeoutKey = "http-shutdown-timeout" + HTTPShutdownWaitKey = "http-shutdown-wait" + HTTPReadTimeoutKey = "http-read-timeout" + HTTPReadHeaderTimeoutKey = "http-read-header-timeout" + HTTPIdleTimeoutKey = "http-idle-timeout" StateSyncIPsKey = "state-sync-ips" StateSyncIDsKey = "state-sync-ids" diff --git a/vms/avm/vm_benchmark_test.go b/vms/avm/vm_benchmark_test.go index 5befa7062dda..e0bb3080c4e1 100644 --- a/vms/avm/vm_benchmark_test.go +++ b/vms/avm/vm_benchmark_test.go @@ -65,7 +65,7 @@ func BenchmarkLoadUser(b *testing.B) { } // GetAllUTXOsBenchmark is a helper func to benchmark the GetAllUTXOs depending on the size -func GetAllUTXOsBenchmark(b *testing.B, utxoCount int) { +func getAllUTXOsBenchmark(b *testing.B, utxoCount int, randSrc rand.Source) { require := require.New(b) env := setup(b, &envConfig{fork: latest}) @@ -76,12 +76,11 @@ func GetAllUTXOsBenchmark(b *testing.B, utxoCount int) { addr := ids.GenerateTestShortID() - // #nosec G404 for i := 0; i < utxoCount; i++ { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: ids.GenerateTestID(), - OutputIndex: rand.Uint32(), + OutputIndex: uint32(randSrc.Int63()), }, Asset: avax.Asset{ID: env.vm.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ @@ -129,9 +128,10 @@ func BenchmarkGetUTXOs(b *testing.B) { }, } - for _, count := range tests { + for testIdx, count := range tests { + randSrc := rand.NewSource(int64(testIdx)) b.Run(count.name, func(b *testing.B) { - GetAllUTXOsBenchmark(b, count.utxoCount) + getAllUTXOsBenchmark(b, count.utxoCount, randSrc) }) } } diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 8f39770548b8..e44e603ce53d 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -130,11 +130,12 @@ func TestGetTxStatus(t *testing.T) { sm := m.NewSharedMemory(service.vm.ctx.ChainID) peerSharedMemory := m.NewSharedMemory(service.vm.ctx.XChainID) - // #nosec G404 + randSrc := rand.NewSource(0) + utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: ids.GenerateTestID(), - OutputIndex: rand.Uint32(), + OutputIndex: uint32(randSrc.Int63()), }, Asset: avax.Asset{ID: service.vm.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ From ddd6d25379e207c81832eae0778d1946a8a1d66c Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 17 May 2024 19:14:49 -0400 Subject: [PATCH 003/102] Simplify sampler interface (#3026) --- network/ip_tracker.go | 4 +-- network/p2p/validators.go | 4 +-- network/peer/set.go | 4 +-- .../consensus/snowman/bootstrapper/sampler.go | 10 ++++-- snow/validators/manager_test.go | 3 +- snow/validators/set.go | 7 ++-- snow/validators/set_test.go | 3 +- utils/sampler/uniform.go | 6 ++-- utils/sampler/uniform_best.go | 2 +- utils/sampler/uniform_replacer.go | 16 +++++----- utils/sampler/uniform_resample.go | 16 +++++----- utils/sampler/uniform_test.go | 32 +++++++++---------- utils/sampler/weighted.go | 6 +--- utils/sampler/weighted_array.go | 6 ++-- utils/sampler/weighted_best.go | 2 +- utils/sampler/weighted_heap.go | 6 ++-- utils/sampler/weighted_linear.go | 6 ++-- utils/sampler/weighted_test.go | 16 +++++----- utils/sampler/weighted_uniform.go | 6 ++-- utils/sampler/weighted_without_replacement.go | 2 +- .../weighted_without_replacement_generic.go | 17 +++++----- .../weighted_without_replacement_test.go | 24 +++++++------- vms/avm/environment_test.go | 4 +-- vms/proposervm/proposer/windower.go | 18 +++++------ 24 files changed, 110 insertions(+), 110 deletions(-) diff --git a/network/ip_tracker.go b/network/ip_tracker.go index 8bca76d7d755..03040b15337e 100644 --- a/network/ip_tracker.go +++ b/network/ip_tracker.go @@ -400,8 +400,8 @@ func (i *ipTracker) GetGossipableIPs( uniform.Initialize(uint64(len(i.gossipableIPs))) for len(ips) < maxNumIPs { - index, err := uniform.Next() - if err != nil { + index, hasNext := uniform.Next() + if !hasNext { return ips } diff --git a/network/p2p/validators.go b/network/p2p/validators.go index 2dee314feb47..161d84d88372 100644 --- a/network/p2p/validators.go +++ b/network/p2p/validators.go @@ -125,8 +125,8 @@ func (v *Validators) Sample(ctx context.Context, limit int) []ids.NodeID { uniform.Initialize(uint64(len(v.validatorList))) for len(sampled) < limit { - i, err := uniform.Next() - if err != nil { + i, hasNext := uniform.Next() + if !hasNext { break } diff --git a/network/peer/set.go b/network/peer/set.go index cbb9675ec305..a90ffc4e56a9 100644 --- a/network/peer/set.go +++ b/network/peer/set.go @@ -124,8 +124,8 @@ func (s *peerSet) Sample(n int, precondition func(Peer) bool) []Peer { peers := make([]Peer, 0, n) for len(peers) < n { - index, err := sampler.Next() - if err != nil { + index, hasNext := sampler.Next() + if !hasNext { // We have run out of peers to attempt to sample. break } diff --git a/snow/consensus/snowman/bootstrapper/sampler.go b/snow/consensus/snowman/bootstrapper/sampler.go index b43f6d915745..56b27d3076ff 100644 --- a/snow/consensus/snowman/bootstrapper/sampler.go +++ b/snow/consensus/snowman/bootstrapper/sampler.go @@ -4,11 +4,15 @@ package bootstrapper import ( + "errors" + "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/set" ) +var errUnexpectedSamplerFailure = errors.New("unexpected sampler failure") + // Sample keys from [elements] uniformly by weight without replacement. The // returned set will have size less than or equal to [maxSize]. This function // will error if the sum of all weights overflows. @@ -36,9 +40,9 @@ func Sample[T comparable](elements map[T]uint64, maxSize int) (set.Set[T], error } maxSize = int(min(uint64(maxSize), totalWeight)) - indices, err := sampler.Sample(maxSize) - if err != nil { - return nil, err + indices, ok := sampler.Sample(maxSize) + if !ok { + return nil, errUnexpectedSamplerFailure } sampledElements := set.NewSet[T](maxSize) diff --git a/snow/validators/manager_test.go b/snow/validators/manager_test.go index cf23d49d39be..365d7ffdf7d7 100644 --- a/snow/validators/manager_test.go +++ b/snow/validators/manager_test.go @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/set" safemath "github.com/ava-labs/avalanchego/utils/math" @@ -396,7 +395,7 @@ func TestSample(t *testing.T) { require.Equal([]ids.NodeID{nodeID0}, sampled) _, err = m.Sample(subnetID, 2) - require.ErrorIs(err, sampler.ErrOutOfRange) + require.ErrorIs(err, errInsufficientWeight) nodeID1 := ids.GenerateTestNodeID() require.NoError(m.AddStaker(subnetID, nodeID1, nil, ids.Empty, math.MaxInt64-1)) diff --git a/snow/validators/set.go b/snow/validators/set.go index e9bb235f995b..b0c7e5de9ba6 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -23,6 +23,7 @@ var ( errDuplicateValidator = errors.New("duplicate validator") errMissingValidator = errors.New("missing validator") errTotalWeightNotUint64 = errors.New("total weight is not a uint64") + errInsufficientWeight = errors.New("insufficient weight") ) // newSet returns a new, empty set of validators. @@ -257,9 +258,9 @@ func (s *vdrSet) sample(size int) ([]ids.NodeID, error) { s.samplerInitialized = true } - indices, err := s.sampler.Sample(size) - if err != nil { - return nil, err + indices, ok := s.sampler.Sample(size) + if !ok { + return nil, errInsufficientWeight } list := make([]ids.NodeID, size) diff --git a/snow/validators/set_test.go b/snow/validators/set_test.go index 480f9dba4f8e..086e5c0b654a 100644 --- a/snow/validators/set_test.go +++ b/snow/validators/set_test.go @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/set" safemath "github.com/ava-labs/avalanchego/utils/math" @@ -343,7 +342,7 @@ func TestSetSample(t *testing.T) { require.Equal([]ids.NodeID{nodeID0}, sampled) _, err = s.Sample(2) - require.ErrorIs(err, sampler.ErrOutOfRange) + require.ErrorIs(err, errInsufficientWeight) nodeID1 := ids.GenerateTestNodeID() require.NoError(s.Add(nodeID1, nil, ids.Empty, math.MaxInt64-1)) diff --git a/utils/sampler/uniform.go b/utils/sampler/uniform.go index 5ae9a21d8822..5cdf20bc5125 100644 --- a/utils/sampler/uniform.go +++ b/utils/sampler/uniform.go @@ -7,12 +7,12 @@ package sampler type Uniform interface { Initialize(sampleRange uint64) // Sample returns length numbers in the range [0,sampleRange). If there - // aren't enough numbers in the range, an error is returned. If length is + // aren't enough numbers in the range, false is returned. If length is // negative the implementation may panic. - Sample(length int) ([]uint64, error) + Sample(length int) ([]uint64, bool) + Next() (uint64, bool) Reset() - Next() (uint64, error) } // NewUniform returns a new sampler diff --git a/utils/sampler/uniform_best.go b/utils/sampler/uniform_best.go index 21f7870d5bdc..fda2579558f6 100644 --- a/utils/sampler/uniform_best.go +++ b/utils/sampler/uniform_best.go @@ -56,7 +56,7 @@ samplerLoop: start := s.clock.Time() for i := 0; i < s.benchmarkIterations; i++ { - if _, err := sampler.Sample(sampleSize); err != nil { + if _, ok := sampler.Sample(sampleSize); !ok { continue samplerLoop } } diff --git a/utils/sampler/uniform_replacer.go b/utils/sampler/uniform_replacer.go index 80666a238343..9d6f47c2d4d3 100644 --- a/utils/sampler/uniform_replacer.go +++ b/utils/sampler/uniform_replacer.go @@ -36,18 +36,18 @@ func (s *uniformReplacer) Initialize(length uint64) { s.drawsCount = 0 } -func (s *uniformReplacer) Sample(count int) ([]uint64, error) { +func (s *uniformReplacer) Sample(count int) ([]uint64, bool) { s.Reset() results := make([]uint64, count) for i := 0; i < count; i++ { - ret, err := s.Next() - if err != nil { - return nil, err + ret, hasNext := s.Next() + if !hasNext { + return nil, false } results[i] = ret } - return results, nil + return results, true } func (s *uniformReplacer) Reset() { @@ -55,9 +55,9 @@ func (s *uniformReplacer) Reset() { s.drawsCount = 0 } -func (s *uniformReplacer) Next() (uint64, error) { +func (s *uniformReplacer) Next() (uint64, bool) { if s.drawsCount >= s.length { - return 0, ErrOutOfRange + return 0, false } draw := s.rng.Uint64Inclusive(s.length-1-s.drawsCount) + s.drawsCount @@ -65,5 +65,5 @@ func (s *uniformReplacer) Next() (uint64, error) { s.drawn[draw] = s.drawn.get(s.drawsCount, s.drawsCount) s.drawsCount++ - return ret, nil + return ret, true } diff --git a/utils/sampler/uniform_resample.go b/utils/sampler/uniform_resample.go index b05ce62fe886..4325d759b1c0 100644 --- a/utils/sampler/uniform_resample.go +++ b/utils/sampler/uniform_resample.go @@ -23,28 +23,28 @@ func (s *uniformResample) Initialize(length uint64) { s.drawn = make(map[uint64]struct{}) } -func (s *uniformResample) Sample(count int) ([]uint64, error) { +func (s *uniformResample) Sample(count int) ([]uint64, bool) { s.Reset() results := make([]uint64, count) for i := 0; i < count; i++ { - ret, err := s.Next() - if err != nil { - return nil, err + ret, hasNext := s.Next() + if !hasNext { + return nil, false } results[i] = ret } - return results, nil + return results, true } func (s *uniformResample) Reset() { clear(s.drawn) } -func (s *uniformResample) Next() (uint64, error) { +func (s *uniformResample) Next() (uint64, bool) { i := uint64(len(s.drawn)) if i >= s.length { - return 0, ErrOutOfRange + return 0, false } for { @@ -53,6 +53,6 @@ func (s *uniformResample) Next() (uint64, error) { continue } s.drawn[draw] = struct{}{} - return draw, nil + return draw, true } } diff --git a/utils/sampler/uniform_test.go b/utils/sampler/uniform_test.go index eb9862e7656c..99334464b2d5 100644 --- a/utils/sampler/uniform_test.go +++ b/utils/sampler/uniform_test.go @@ -83,8 +83,8 @@ func UniformInitializeMaxUint64Test(t *testing.T, s Uniform) { s.Initialize(math.MaxUint64) for { - val, err := s.Next() - require.NoError(t, err) + val, hasNext := s.Next() + require.True(t, hasNext) if val > math.MaxInt64 { break @@ -95,8 +95,8 @@ func UniformInitializeMaxUint64Test(t *testing.T, s Uniform) { func UniformOutOfRangeTest(t *testing.T, s Uniform) { s.Initialize(0) - _, err := s.Sample(1) - require.ErrorIs(t, err, ErrOutOfRange) + _, ok := s.Sample(1) + require.False(t, ok) } func UniformEmptyTest(t *testing.T, s Uniform) { @@ -104,8 +104,8 @@ func UniformEmptyTest(t *testing.T, s Uniform) { s.Initialize(1) - val, err := s.Sample(0) - require.NoError(err) + val, ok := s.Sample(0) + require.True(ok) require.Empty(val) } @@ -114,8 +114,8 @@ func UniformSingletonTest(t *testing.T, s Uniform) { s.Initialize(1) - val, err := s.Sample(1) - require.NoError(err) + val, ok := s.Sample(1) + require.True(ok) require.Equal([]uint64{0}, val) } @@ -124,8 +124,8 @@ func UniformDistributionTest(t *testing.T, s Uniform) { s.Initialize(3) - val, err := s.Sample(3) - require.NoError(err) + val, ok := s.Sample(3) + require.True(ok) slices.Sort(val) require.Equal([]uint64{0, 1, 2}, val) @@ -134,8 +134,8 @@ func UniformDistributionTest(t *testing.T, s Uniform) { func UniformOverSampleTest(t *testing.T, s Uniform) { s.Initialize(3) - _, err := s.Sample(4) - require.ErrorIs(t, err, ErrOutOfRange) + _, ok := s.Sample(4) + require.False(t, ok) } func UniformLazilySample(t *testing.T, s Uniform) { @@ -146,15 +146,15 @@ func UniformLazilySample(t *testing.T, s Uniform) { for j := 0; j < 2; j++ { sampled := map[uint64]bool{} for i := 0; i < 3; i++ { - val, err := s.Next() - require.NoError(err) + val, hasNext := s.Next() + require.True(hasNext) require.False(sampled[val]) sampled[val] = true } - _, err := s.Next() - require.ErrorIs(err, ErrOutOfRange) + _, hasNext := s.Next() + require.False(hasNext) s.Reset() } diff --git a/utils/sampler/weighted.go b/utils/sampler/weighted.go index 2296da08e97a..64a6493ff860 100644 --- a/utils/sampler/weighted.go +++ b/utils/sampler/weighted.go @@ -3,15 +3,11 @@ package sampler -import "errors" - -var ErrOutOfRange = errors.New("out of range") - // Weighted defines how to sample a specified valued based on a provided // weighted distribution type Weighted interface { Initialize(weights []uint64) error - Sample(sampleValue uint64) (int, error) + Sample(sampleValue uint64) (int, bool) } // NewWeighted returns a new sampler diff --git a/utils/sampler/weighted_array.go b/utils/sampler/weighted_array.go index bbbf98914d68..faae08c0ccbf 100644 --- a/utils/sampler/weighted_array.go +++ b/utils/sampler/weighted_array.go @@ -81,9 +81,9 @@ func (s *weightedArray) Initialize(weights []uint64) error { return nil } -func (s *weightedArray) Sample(value uint64) (int, error) { +func (s *weightedArray) Sample(value uint64) (int, bool) { if len(s.arr) == 0 || s.arr[len(s.arr)-1].cumulativeWeight <= value { - return 0, ErrOutOfRange + return 0, false } minIndex := 0 maxIndex := len(s.arr) - 1 @@ -98,7 +98,7 @@ func (s *weightedArray) Sample(value uint64) (int, error) { currentElem := s.arr[index] currentWeight := currentElem.cumulativeWeight if previousWeight <= value && value < currentWeight { - return currentElem.index, nil + return currentElem.index, true } if value < previousWeight { diff --git a/utils/sampler/weighted_best.go b/utils/sampler/weighted_best.go index 59bf60019144..91ec2ae50135 100644 --- a/utils/sampler/weighted_best.go +++ b/utils/sampler/weighted_best.go @@ -60,7 +60,7 @@ samplerLoop: start := s.clock.Time() for _, sample := range samples { - if _, err := sampler.Sample(sample); err != nil { + if _, ok := sampler.Sample(sample); !ok { continue samplerLoop } } diff --git a/utils/sampler/weighted_heap.go b/utils/sampler/weighted_heap.go index f4002a857e4a..96971657c569 100644 --- a/utils/sampler/weighted_heap.go +++ b/utils/sampler/weighted_heap.go @@ -80,9 +80,9 @@ func (s *weightedHeap) Initialize(weights []uint64) error { return nil } -func (s *weightedHeap) Sample(value uint64) (int, error) { +func (s *weightedHeap) Sample(value uint64) (int, bool) { if len(s.heap) == 0 || s.heap[0].cumulativeWeight <= value { - return 0, ErrOutOfRange + return 0, false } index := 0 @@ -90,7 +90,7 @@ func (s *weightedHeap) Sample(value uint64) (int, error) { currentElement := s.heap[index] currentWeight := currentElement.weight if value < currentWeight { - return currentElement.index, nil + return currentElement.index, true } value -= currentWeight diff --git a/utils/sampler/weighted_linear.go b/utils/sampler/weighted_linear.go index d6f0c5d74fba..c66bd442ab55 100644 --- a/utils/sampler/weighted_linear.go +++ b/utils/sampler/weighted_linear.go @@ -68,15 +68,15 @@ func (s *weightedLinear) Initialize(weights []uint64) error { return nil } -func (s *weightedLinear) Sample(value uint64) (int, error) { +func (s *weightedLinear) Sample(value uint64) (int, bool) { if len(s.arr) == 0 || s.arr[len(s.arr)-1].cumulativeWeight <= value { - return 0, ErrOutOfRange + return 0, false } index := 0 for { if elem := s.arr[index]; value < elem.cumulativeWeight { - return elem.index, nil + return elem.index, true } index++ } diff --git a/utils/sampler/weighted_test.go b/utils/sampler/weighted_test.go index ea08230d175a..286b7f4823a6 100644 --- a/utils/sampler/weighted_test.go +++ b/utils/sampler/weighted_test.go @@ -97,8 +97,8 @@ func WeightedOutOfRangeTest(t *testing.T, s Weighted) { require.NoError(s.Initialize([]uint64{1})) - _, err := s.Sample(1) - require.ErrorIs(err, ErrOutOfRange) + _, ok := s.Sample(1) + require.False(ok) } func WeightedSingletonTest(t *testing.T, s Weighted) { @@ -106,8 +106,8 @@ func WeightedSingletonTest(t *testing.T, s Weighted) { require.NoError(s.Initialize([]uint64{1})) - index, err := s.Sample(0) - require.NoError(err) + index, ok := s.Sample(0) + require.True(ok) require.Zero(index) } @@ -116,8 +116,8 @@ func WeightedWithZeroTest(t *testing.T, s Weighted) { require.NoError(s.Initialize([]uint64{0, 1})) - index, err := s.Sample(0) - require.NoError(err) + index, ok := s.Sample(0) + require.True(ok) require.Equal(1, index) } @@ -128,8 +128,8 @@ func WeightedDistributionTest(t *testing.T, s Weighted) { counts := make([]int, 5) for i := uint64(0); i < 11; i++ { - index, err := s.Sample(i) - require.NoError(err) + index, ok := s.Sample(i) + require.True(ok) counts[index]++ } require.Equal([]int{1, 1, 2, 3, 4}, counts) diff --git a/utils/sampler/weighted_uniform.go b/utils/sampler/weighted_uniform.go index 22dbb6b5ebd5..44836450b3b8 100644 --- a/utils/sampler/weighted_uniform.go +++ b/utils/sampler/weighted_uniform.go @@ -61,9 +61,9 @@ func (s *weightedUniform) Initialize(weights []uint64) error { return nil } -func (s *weightedUniform) Sample(value uint64) (int, error) { +func (s *weightedUniform) Sample(value uint64) (int, bool) { if uint64(len(s.indices)) <= value { - return 0, ErrOutOfRange + return 0, false } - return s.indices[int(value)], nil + return s.indices[int(value)], true } diff --git a/utils/sampler/weighted_without_replacement.go b/utils/sampler/weighted_without_replacement.go index d512cd777909..a5585f0e4300 100644 --- a/utils/sampler/weighted_without_replacement.go +++ b/utils/sampler/weighted_without_replacement.go @@ -8,7 +8,7 @@ package sampler // indices. So duplicate indices can be returned. type WeightedWithoutReplacement interface { Initialize(weights []uint64) error - Sample(count int) ([]int, error) + Sample(count int) ([]int, bool) } // NewDeterministicWeightedWithoutReplacement returns a new sampler diff --git a/utils/sampler/weighted_without_replacement_generic.go b/utils/sampler/weighted_without_replacement_generic.go index c45d64d0b2b0..004ff797b90f 100644 --- a/utils/sampler/weighted_without_replacement_generic.go +++ b/utils/sampler/weighted_without_replacement_generic.go @@ -25,19 +25,20 @@ func (s *weightedWithoutReplacementGeneric) Initialize(weights []uint64) error { return s.w.Initialize(weights) } -func (s *weightedWithoutReplacementGeneric) Sample(count int) ([]int, error) { +func (s *weightedWithoutReplacementGeneric) Sample(count int) ([]int, bool) { s.u.Reset() indices := make([]int, count) for i := 0; i < count; i++ { - weight, err := s.u.Next() - if err != nil { - return nil, err + weight, ok := s.u.Next() + if !ok { + return nil, false } - indices[i], err = s.w.Sample(weight) - if err != nil { - return nil, err + + indices[i], ok = s.w.Sample(weight) + if !ok { + return nil, false } } - return indices, nil + return indices, true } diff --git a/utils/sampler/weighted_without_replacement_test.go b/utils/sampler/weighted_without_replacement_test.go index 8d3469141da1..9edbd8b9bf3b 100644 --- a/utils/sampler/weighted_without_replacement_test.go +++ b/utils/sampler/weighted_without_replacement_test.go @@ -99,8 +99,8 @@ func WeightedWithoutReplacementOutOfRangeTest( require.NoError(s.Initialize([]uint64{1})) - _, err := s.Sample(2) - require.ErrorIs(err, ErrOutOfRange) + _, ok := s.Sample(2) + require.False(ok) } func WeightedWithoutReplacementEmptyWithoutWeightTest( @@ -111,8 +111,8 @@ func WeightedWithoutReplacementEmptyWithoutWeightTest( require.NoError(s.Initialize(nil)) - indices, err := s.Sample(0) - require.NoError(err) + indices, ok := s.Sample(0) + require.True(ok) require.Empty(indices) } @@ -124,8 +124,8 @@ func WeightedWithoutReplacementEmptyTest( require.NoError(s.Initialize([]uint64{1})) - indices, err := s.Sample(0) - require.NoError(err) + indices, ok := s.Sample(0) + require.True(ok) require.Empty(indices) } @@ -137,8 +137,8 @@ func WeightedWithoutReplacementSingletonTest( require.NoError(s.Initialize([]uint64{1})) - indices, err := s.Sample(1) - require.NoError(err) + indices, ok := s.Sample(1) + require.True(ok) require.Equal([]int{0}, indices) } @@ -150,8 +150,8 @@ func WeightedWithoutReplacementWithZeroTest( require.NoError(s.Initialize([]uint64{0, 1})) - indices, err := s.Sample(1) - require.NoError(err) + indices, ok := s.Sample(1) + require.True(ok) require.Equal([]int{1}, indices) } @@ -163,8 +163,8 @@ func WeightedWithoutReplacementDistributionTest( require.NoError(s.Initialize([]uint64{1, 1, 2})) - indices, err := s.Sample(4) - require.NoError(err) + indices, ok := s.Sample(4) + require.True(ok) slices.Sort(indices) require.Equal([]int{0, 1, 2, 2}, indices) diff --git a/vms/avm/environment_test.go b/vms/avm/environment_test.go index eba565727973..52a76425e7ae 100644 --- a/vms/avm/environment_test.go +++ b/vms/avm/environment_test.go @@ -349,8 +349,8 @@ func sampleAddrs(tb testing.TB, addressFormatter avax.AddressManager, addrs []id sampler.Initialize(uint64(len(addrs))) numAddrs := 1 + rand.Intn(len(addrs)) // #nosec G404 - indices, err := sampler.Sample(numAddrs) - require.NoError(err) + indices, ok := sampler.Sample(numAddrs) + require.True(ok) for _, index := range indices { addr := addrs[index] addrStr, err := addressFormatter.FormatLocalAddress(addr) diff --git a/vms/proposervm/proposer/windower.go b/vms/proposervm/proposer/windower.go index b9a633c702c0..ae79aecafcd5 100644 --- a/vms/proposervm/proposer/windower.go +++ b/vms/proposervm/proposer/windower.go @@ -6,7 +6,6 @@ package proposer import ( "context" "errors" - "fmt" "math/bits" "time" @@ -37,7 +36,8 @@ const ( var ( _ Windower = (*windower)(nil) - ErrAnyoneCanPropose = errors.New("anyone can propose") + ErrAnyoneCanPropose = errors.New("anyone can propose") + ErrUnexpectedSamplerFailure = errors.New("unexpected sampler failure") ) type Windower interface { @@ -132,9 +132,9 @@ func (w *windower) Proposers(ctx context.Context, blockHeight, pChainHeight uint source.Seed(w.chainSource ^ blockHeight) numToSample := int(min(uint64(maxWindows), totalWeight)) - indices, err := sampler.Sample(numToSample) - if err != nil { - return nil, err + indices, ok := sampler.Sample(numToSample) + if !ok { + return nil, ErrUnexpectedSamplerFailure } nodeIDs := make([]ids.NodeID, numToSample) @@ -231,7 +231,7 @@ func (w *windower) makeSampler( pChainHeight uint64, source sampler.Source, ) (sampler.WeightedWithoutReplacement, []validatorData, error) { - // Get the canconical representation of the validator set at the provided + // Get the canonical representation of the validator set at the provided // p-chain height. validatorsMap, err := w.state.GetValidatorSet(ctx, pChainHeight, w.subnetID) if err != nil { @@ -271,9 +271,9 @@ func (w *windower) expectedProposer( // biasing the seed generation. For example, without reversing the slot // height=0 and slot=1 would equal height=1 and slot=0. source.Seed(w.chainSource ^ blockHeight ^ bits.Reverse64(slot)) - indices, err := sampler.Sample(1) - if err != nil { - return ids.EmptyNodeID, fmt.Errorf("failed sampling proposers: %w", err) + indices, ok := sampler.Sample(1) + if !ok { + return ids.EmptyNodeID, ErrUnexpectedSamplerFailure } return validators[indices[0]].id, nil } From cab15c031364c4bd7b46056dc4ed3714a742d0d3 Mon Sep 17 00:00:00 2001 From: Tsachi Herman <24438559+tsachiherman@users.noreply.github.com> Date: Sun, 19 May 2024 11:59:20 -0400 Subject: [PATCH 004/102] [build] Update linter version (#3024) Signed-off-by: Tsachi Herman <24438559+tsachiherman@users.noreply.github.com> Co-authored-by: Stephen Buttolph --- .golangci.yml | 10 +++++----- network/peer/tls_config.go | 3 +-- scripts/lint.sh | 2 +- vms/platformvm/txs/executor/import_test.go | 11 ++++++++--- .../txs/executor/standard_tx_executor_test.go | 2 ++ 5 files changed, 17 insertions(+), 11 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 0c4006e9508d..a1991abd29aa 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -2,11 +2,6 @@ run: timeout: 10m - # Enables skipping of directories: - # - vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ - # Default: true - skip-dirs-use-default: false - # If set we pass it to "go list -mod={option}". From "go help modules": # If invoked with -mod=readonly, the go command is disallowed from the implicit # automatic updating of go.mod described above. Instead, it fails when any changes @@ -36,6 +31,11 @@ issues: # Default: 3 max-same-issues: 0 + # Enables skipping of directories: + # - vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + # Default: true + exclude-dirs-use-default: false + linters: disable-all: true enable: diff --git a/network/peer/tls_config.go b/network/peer/tls_config.go index 7de848ed062a..9673b98dc8f1 100644 --- a/network/peer/tls_config.go +++ b/network/peer/tls_config.go @@ -14,7 +14,6 @@ import ( // It is safe, and typically expected, for [keyLogWriter] to be [nil]. // [keyLogWriter] should only be enabled for debugging. func TLSConfig(cert tls.Certificate, keyLogWriter io.Writer) *tls.Config { - // #nosec G402 return &tls.Config{ Certificates: []tls.Certificate{cert}, ClientAuth: tls.RequireAnyClientCert, @@ -24,7 +23,7 @@ func TLSConfig(cert tls.Certificate, keyLogWriter io.Writer) *tls.Config { // // During our security audit by Quantstamp, this was investigated // and confirmed to be safe and correct. - InsecureSkipVerify: true, + InsecureSkipVerify: true, //#nosec G402 MinVersion: tls.VersionTLS13, KeyLogWriter: keyLogWriter, } diff --git a/scripts/lint.sh b/scripts/lint.sh index b2cbaa50fcff..9fb23ae325be 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -32,7 +32,7 @@ fi TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_no_error_inline_func"} function test_golangci_lint { - go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.56.1 + go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.58.1 golangci-lint run --config .golangci.yml } diff --git a/vms/platformvm/txs/executor/import_test.go b/vms/platformvm/txs/executor/import_test.go index c05607526841..c4b831badc92 100644 --- a/vms/platformvm/txs/executor/import_test.go +++ b/vms/platformvm/txs/executor/import_test.go @@ -39,7 +39,8 @@ func TestNewImportTx(t *testing.T) { require.NoError(t, err) customAssetID := ids.GenerateTestID() - + // generate a constant random source generator. + randSrc := rand.NewSource(0) tests := []test{ { description: "can't pay fee", @@ -52,6 +53,7 @@ func TestNewImportTx(t *testing.T) { map[ids.ID]uint64{ env.ctx.AVAXAssetID: env.config.TxFee - 1, }, + randSrc, ), sourceKeys: []*secp256k1.PrivateKey{sourceKey}, expectedErr: builder.ErrInsufficientFunds, @@ -67,6 +69,7 @@ func TestNewImportTx(t *testing.T) { map[ids.ID]uint64{ env.ctx.AVAXAssetID: env.config.TxFee, }, + randSrc, ), sourceKeys: []*secp256k1.PrivateKey{sourceKey}, expectedErr: nil, @@ -82,6 +85,7 @@ func TestNewImportTx(t *testing.T) { map[ids.ID]uint64{ env.ctx.AVAXAssetID: env.config.TxFee, }, + randSrc, ), sourceKeys: []*secp256k1.PrivateKey{sourceKey}, timestamp: env.config.UpgradeConfig.ApricotPhase5Time, @@ -99,6 +103,7 @@ func TestNewImportTx(t *testing.T) { env.ctx.AVAXAssetID: env.config.TxFee, customAssetID: 1, }, + randSrc, ), sourceKeys: []*secp256k1.PrivateKey{sourceKey}, timestamp: env.config.UpgradeConfig.ApricotPhase5Time, @@ -168,6 +173,7 @@ func fundedSharedMemory( sourceKey *secp256k1.PrivateKey, peerChain ids.ID, assets map[ids.ID]uint64, + randSrc rand.Source, ) atomic.SharedMemory { fundedSharedMemoryCalls++ m := atomic.NewMemory(prefixdb.New([]byte{fundedSharedMemoryCalls}, env.baseDB)) @@ -176,11 +182,10 @@ func fundedSharedMemory( peerSharedMemory := m.NewSharedMemory(peerChain) for assetID, amt := range assets { - // #nosec G404 utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: ids.GenerateTestID(), - OutputIndex: rand.Uint32(), + OutputIndex: uint32(randSrc.Int63()), }, Asset: avax.Asset{ID: assetID}, Out: &secp256k1fx.TransferOutput{ diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index 28fb5a2404fb..47f26ac5dd13 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -6,6 +6,7 @@ package executor import ( "errors" "math" + "math/rand" "testing" "time" @@ -1185,6 +1186,7 @@ func TestDurangoMemoField(t *testing.T) { map[ids.ID]uint64{ env.ctx.AVAXAssetID: sourceAmount, }, + rand.NewSource(0), ) env.msm.SharedMemory = sharedMemory From eb7ddd75e0bf13b4671b385a9bb7a364b2e8b553 Mon Sep 17 00:00:00 2001 From: cocoyeal <150209682+cocoyeal@users.noreply.github.com> Date: Mon, 20 May 2024 22:42:41 +0800 Subject: [PATCH 005/102] Fix broken docs link (#3028) --- api/admin/service.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/admin/service.md b/api/admin/service.md index 94afbf370451..4a2a97c29e13 100644 --- a/api/admin/service.md +++ b/api/admin/service.md @@ -75,7 +75,7 @@ Now, calls to the X-Chain can be made to either `/ext/bc/X` or, equivalently, to Give a blockchain an alias, a different name that can be used any place the blockchain’s ID is used. -:::note Aliasing a chain can also be done via the [Node API](https://docs.avax.network/nodes/configure/avalanchego-config-flags.md#--chain-aliases-file-string). +:::note Aliasing a chain can also be done via the [Node API](/nodes/configure/avalanchego-config-flags.md#--chain-aliases-file-string). Note that the alias is set for each chain on each node individually. In a multi-node Subnet, the same alias should be configured on each node to use an alias across a Subnet successfully. Setting an alias for a chain on one node does not register that alias with other nodes automatically. From 46bc6f5e86dd7ef0ecd8bddec7c6c2b84564019c Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon, 20 May 2024 18:40:21 -0400 Subject: [PATCH 006/102] `gossipping` -> `gossiping` (#3033) --- RELEASES.md | 4 ++-- node/node.go | 2 +- snow/README.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 5fd5372d5e86..051b1801aac5 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -3018,7 +3018,7 @@ This version is backwards compatible to [v1.7.0](https://github.com/ava-labs/ava ### Networking -- Reduced default peerlist and accepted frontier gossipping +- Reduced default peerlist and accepted frontier gossiping - Increased the default at-large outbound buffer size to 32 MiB ### Metrics @@ -3100,7 +3100,7 @@ This version is backwards compatible to [v1.7.0](https://github.com/ava-labs/ava - Added `--snow-mixed-query-num-push-vdr` and `--snow-mixed-query-num-push-non-vdr` to allow parameterization of sending push queries - By default, non-validators now send only pull queries, not push queries. - By default, validators now send both pull queries and push queries upon inserting a container into consensus. Previously, nodes sent only push queries. -- Added metrics to track the amount of over gossipping of `peerlist` messages +- Added metrics to track the amount of over gossiping of `peerlist` messages - Added custom message queueing support to outbound `Peer` messages - Reused `Ping` messages to avoid needless memory allocations diff --git a/node/node.go b/node/node.go index d225667ccf13..9144541f8a3a 100644 --- a/node/node.go +++ b/node/node.go @@ -504,7 +504,7 @@ func (n *Node) initNetworking() error { } } if unknownACPs.Len() > 0 { - n.Log.Warn("gossipping unknown ACPs", + n.Log.Warn("gossiping unknown ACPs", zap.Reflect("acps", unknownACPs), ) } diff --git a/snow/README.md b/snow/README.md index b5b16bc938ba..86a90919b2ac 100644 --- a/snow/README.md +++ b/snow/README.md @@ -47,7 +47,7 @@ Currently, Avalanchego implements its own message serialization to communicate. ### [Network](https://github.com/ava-labs/avalanchego/blob/master/network/network.go) -The networking interface is shared across all chains. It implements functions from the `ExternalSender` interface. The two functions it implements are `Send` and `Gossip`. `Send` sends a message of type `OutboundMessage` to a specific set of nodes (specified by an array of `NodeIDs`). `Gossip` sends a message of type `OutboundMessage` to a random group of nodes in a subnet (can be a validator or a non-validator). Gossipping is used to push transactions across the network. The networking protocol uses TLS to pass messages between peers. +The networking interface is shared across all chains. It implements functions from the `ExternalSender` interface. The two functions it implements are `Send` and `Gossip`. `Send` sends a message of type `OutboundMessage` to a specific set of nodes (specified by an array of `NodeIDs`). `Gossip` sends a message of type `OutboundMessage` to a random group of nodes in a subnet (can be a validator or a non-validator). gossiping is used to push transactions across the network. The networking protocol uses TLS to pass messages between peers. Along with sending and gossiping, the networking library is also responsible for making connections and maintaining connections. Any node whether they are a validator or non-validator will attempt to connect to the primary network. From 7106666fd2e1bbe714c7e894da35a2a2cedd15e0 Mon Sep 17 00:00:00 2001 From: marun Date: Mon, 20 May 2024 16:38:13 -0700 Subject: [PATCH 007/102] [tmpnet] Ensure tmpnet compatibility with windows (#3002) Signed-off-by: marun Co-authored-by: Stephen Buttolph --- scripts/build_test.sh | 6 ------ .../fixture/tmpnet/detached_process_default.go | 17 +++++++++++++++++ .../fixture/tmpnet/detached_process_windows.go | 12 ++++++++++++ tests/fixture/tmpnet/node_process.go | 5 +---- 4 files changed, 30 insertions(+), 10 deletions(-) create mode 100644 tests/fixture/tmpnet/detached_process_default.go create mode 100644 tests/fixture/tmpnet/detached_process_windows.go diff --git a/scripts/build_test.sh b/scripts/build_test.sh index 747cb8882449..cfff1a2fc9fd 100755 --- a/scripts/build_test.sh +++ b/scripts/build_test.sh @@ -9,12 +9,6 @@ source "$AVALANCHE_PATH"/scripts/constants.sh EXCLUDED_TARGETS="| grep -v /mocks | grep -v proto | grep -v tests/e2e | grep -v tests/upgrade" -GOOS=$(go env GOOS) -if [[ "$GOOS" == "windows" ]]; then - # tmpnet and antithesis tests (which depend on tmpnet) are not compatible with windows - EXCLUDED_TARGETS="${EXCLUDED_TARGETS} | grep -v tests/fixture | grep -v tests/antithesis" -fi - TEST_TARGETS="$(eval "go list ./... ${EXCLUDED_TARGETS}")" # shellcheck disable=SC2086 diff --git a/tests/fixture/tmpnet/detached_process_default.go b/tests/fixture/tmpnet/detached_process_default.go new file mode 100644 index 000000000000..0e4b20ddd8e3 --- /dev/null +++ b/tests/fixture/tmpnet/detached_process_default.go @@ -0,0 +1,17 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +//go:build linux || darwin + +package tmpnet + +import ( + "os/exec" + "syscall" +) + +func configureDetachedProcess(cmd *exec.Cmd) { + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setsid: true, + } +} diff --git a/tests/fixture/tmpnet/detached_process_windows.go b/tests/fixture/tmpnet/detached_process_windows.go new file mode 100644 index 000000000000..bf7ff9a726b4 --- /dev/null +++ b/tests/fixture/tmpnet/detached_process_windows.go @@ -0,0 +1,12 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +//go:build windows + +package tmpnet + +import "os/exec" + +func configureDetachedProcess(*exec.Cmd) { + panic("tmpnet deployment to windows is not supported") +} diff --git a/tests/fixture/tmpnet/node_process.go b/tests/fixture/tmpnet/node_process.go index 4ce0997eb60f..f2a9c7ff628d 100644 --- a/tests/fixture/tmpnet/node_process.go +++ b/tests/fixture/tmpnet/node_process.go @@ -114,11 +114,8 @@ func (p *NodeProcess) Start(w io.Writer) error { // All arguments are provided in the flags file cmd := exec.Command(p.node.RuntimeConfig.AvalancheGoPath, "--config-file", p.node.getFlagsPath()) // #nosec G204 - // Ensure process is detached from the parent process so that an error in the parent will not affect the child - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setsid: true, - } + configureDetachedProcess(cmd) if err := cmd.Start(); err != nil { return err From 551f8d33cbed58a0159f29e200be76c3283b0347 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 20 May 2024 22:03:28 -0400 Subject: [PATCH 008/102] Fix negative ETA caused by rollback in vm.SetState (#3036) --- snow/engine/snowman/bootstrap/bootstrapper.go | 16 +++++----- .../snowman/bootstrap/bootstrapper_test.go | 31 +++++++++++++++++++ 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/snow/engine/snowman/bootstrap/bootstrapper.go b/snow/engine/snowman/bootstrap/bootstrapper.go index 2988ece9ba0e..095ba4e63b17 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/snow/engine/snowman/bootstrap/bootstrapper.go @@ -150,6 +150,14 @@ func (b *Bootstrapper) Clear(context.Context) error { } func (b *Bootstrapper) Start(ctx context.Context, startReqID uint32) error { + b.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.Bootstrapping, + }) + if err := b.VM.SetState(ctx, snow.Bootstrapping); err != nil { + return fmt.Errorf("failed to notify VM that bootstrapping has started: %w", err) + } + lastAccepted, err := b.getLastAccepted(ctx) if err != nil { return err @@ -161,14 +169,6 @@ func (b *Bootstrapper) Start(ctx context.Context, startReqID uint32) error { zap.Uint64("lastAcceptedHeight", lastAcceptedHeight), ) - b.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, - State: snow.Bootstrapping, - }) - if err := b.VM.SetState(ctx, snow.Bootstrapping); err != nil { - return fmt.Errorf("failed to notify VM that bootstrapping has started: %w", err) - } - // Set the starting height b.startingHeight = lastAcceptedHeight b.requestID = startReqID diff --git a/snow/engine/snowman/bootstrap/bootstrapper_test.go b/snow/engine/snowman/bootstrap/bootstrapper_test.go index 159fc98fb581..5577f62fa81d 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -742,6 +742,37 @@ func TestBootstrapperReceiveStaleAncestorsMessage(t *testing.T) { require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) } +func TestBootstrapperRollbackOnSetState(t *testing.T) { + require := require.New(t) + + config, _, _, vm := newConfig(t) + + blks := snowmantest.BuildChain(2) + initializeVMWithBlockchain(vm, blks) + + blks[1].StatusV = choices.Accepted + + bs, err := New( + config, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, + ) + require.NoError(err) + + vm.SetStateF = func(context.Context, snow.State) error { + blks[1].StatusV = choices.Processing + return nil + } + + require.NoError(bs.Start(context.Background(), 0)) + require.Equal(blks[0].HeightV, bs.startingHeight) +} + func initializeVMWithBlockchain(vm *block.TestVM, blocks []*snowmantest.Block) { vm.CantSetState = false vm.LastAcceptedF = func(context.Context) (ids.ID, error) { From 34e7b2f680abd9d66678b6db18003fce42f68711 Mon Sep 17 00:00:00 2001 From: marun Date: Tue, 21 May 2024 08:54:45 -0700 Subject: [PATCH 009/102] [tmpnet] Enable single node networks (#3003) Co-authored-by: Alberto Benegiamo --- tests/e2e/c/dynamic_fees.go | 4 +-- tests/e2e/e2e_test.go | 6 +---- tests/fixture/e2e/flags.go | 11 ++++++++ tests/fixture/e2e/helpers.go | 1 - tests/fixture/tmpnet/README.md | 5 ++-- tests/fixture/tmpnet/cmd/main.go | 2 +- tests/fixture/tmpnet/network.go | 39 +++++++++++++++++----------- tests/fixture/tmpnet/network_test.go | 4 +-- tests/fixture/tmpnet/node.go | 6 ++--- tests/upgrade/upgrade_test.go | 4 +-- 10 files changed, 47 insertions(+), 35 deletions(-) diff --git a/tests/e2e/c/dynamic_fees.go b/tests/e2e/c/dynamic_fees.go index 9af074894afc..c3dda77b985c 100644 --- a/tests/e2e/c/dynamic_fees.go +++ b/tests/e2e/c/dynamic_fees.go @@ -37,9 +37,7 @@ var _ = e2e.DescribeCChain("[Dynamic Fees]", func() { ginkgo.It("should ensure that the gas price is affected by load", func() { ginkgo.By("creating a new private network to ensure isolation from other tests") - privateNetwork := &tmpnet.Network{ - Owner: "avalanchego-e2e-dynamic-fees", - } + privateNetwork := tmpnet.NewDefaultNetwork("avalanchego-e2e-dynamic-fees") e2e.Env.StartPrivateNetwork(privateNetwork) ginkgo.By("allocating a pre-funded key") diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index 443a781281a3..73c29b3bc83f 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/onsi/gomega" - "github.com/stretchr/testify/require" // ensure test packages are scanned by ginkgo _ "github.com/ava-labs/avalanchego/tests/e2e/banff" @@ -38,11 +37,8 @@ func init() { var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Run only once in the first ginkgo process - nodes, err := tmpnet.NewNodes(tmpnet.DefaultNodeCount) - require.NoError(ginkgo.GinkgoT(), err) - + nodes := tmpnet.NewNodesOrPanic(flagVars.NodeCount()) subnets := vms.XSVMSubnets(nodes...) - return e2e.NewTestEnvironment( flagVars, &tmpnet.Network{ diff --git a/tests/fixture/e2e/flags.go b/tests/fixture/e2e/flags.go index 8af3cce6d787..bc752adaf7df 100644 --- a/tests/fixture/e2e/flags.go +++ b/tests/fixture/e2e/flags.go @@ -19,6 +19,7 @@ type FlagVars struct { reuseNetwork bool networkShutdownDelay time.Duration stopNetwork bool + nodeCount int } func (v *FlagVars) AvalancheGoExecPath() string { @@ -51,6 +52,10 @@ func (v *FlagVars) StopNetwork() bool { return v.stopNetwork } +func (v *FlagVars) NodeCount() int { + return v.nodeCount +} + func RegisterFlags() *FlagVars { vars := FlagVars{} flag.StringVar( @@ -89,6 +94,12 @@ func RegisterFlags() *FlagVars { false, "[optional] stop an existing network and exit without executing any tests.", ) + flag.IntVar( + &vars.nodeCount, + "node-count", + tmpnet.DefaultNodeCount, + "number of nodes the network should initially consist of", + ) return &vars } diff --git a/tests/fixture/e2e/helpers.go b/tests/fixture/e2e/helpers.go index 1395db1f8c29..358f07946055 100644 --- a/tests/fixture/e2e/helpers.go +++ b/tests/fixture/e2e/helpers.go @@ -233,7 +233,6 @@ func StartNetwork( DefaultNetworkDir, avalancheGoExecPath, pluginDir, - tmpnet.DefaultNodeCount, ), ) diff --git a/tests/fixture/tmpnet/README.md b/tests/fixture/tmpnet/README.md index b1158773d628..3c0679b4410d 100644 --- a/tests/fixture/tmpnet/README.md +++ b/tests/fixture/tmpnet/README.md @@ -49,7 +49,7 @@ A temporary network can be managed by the `tmpnetctl` cli tool: # Build the tmpnetctl binary $ ./scripts/build_tmpnetctl.sh -# Start a new network +# Start a new network. Possible to specify the number of nodes (> 1) with --node-count. $ ./build/tmpnetctl start-network --avalanchego-path=/path/to/avalanchego ... Started network /home/me/.tmpnet/networks/20240306-152305.924531 (UUID: abaab590-b375-44f6-9ca5-f8a6dc061725) @@ -87,6 +87,7 @@ network := &tmpnet.Network{ // Configure non-default values fo DefaultFlags: tmpnet.FlagsMap{ config.LogLevelKey: "INFO", // Change one of the network's defaults }, + Nodes: tmpnet.NewNodesOrPanic(5), // Number of initial validating nodes Subnets: []*tmpnet.Subnet{ // Subnets to create on the new network once it is running { Name: "xsvm-a", // User-defined name used to reference subnet in code and on disk @@ -97,6 +98,7 @@ network := &tmpnet.Network{ // Configure non-default values fo PreFundedKey: , // (Optional) A private key that is funded in the genesis bytes }, }, + ValidatorIDs: , // The IDs of nodes that validate the subnet }, }, } @@ -108,7 +110,6 @@ _ := tmpnet.StartNewNetwork( // Start the network "", // Empty string uses the default network path (~/tmpnet/networks) "/path/to/avalanchego", // The path to the binary that nodes will execute "/path/to/plugins", // The path nodes will use for plugin binaries (suggested value ~/.avalanchego/plugins) - 5, // Number of initial validating nodes ) uris := network.GetNodeURIs() diff --git a/tests/fixture/tmpnet/cmd/main.go b/tests/fixture/tmpnet/cmd/main.go index 7b415b32788b..0e6bcb1fd0a0 100644 --- a/tests/fixture/tmpnet/cmd/main.go +++ b/tests/fixture/tmpnet/cmd/main.go @@ -66,6 +66,7 @@ func main() { network := &tmpnet.Network{ Owner: networkOwner, + Nodes: tmpnet.NewNodesOrPanic(int(nodeCount)), } // Extreme upper bound, should never take this long @@ -80,7 +81,6 @@ func main() { rootDir, avalancheGoPath, pluginDir, - int(nodeCount), ) if err != nil { return err diff --git a/tests/fixture/tmpnet/network.go b/tests/fixture/tmpnet/network.go index faea1dde50f7..bd5b1b914efc 100644 --- a/tests/fixture/tmpnet/network.go +++ b/tests/fixture/tmpnet/network.go @@ -47,9 +47,13 @@ const ( HardHatKeyStr = "56289e99c94b6912bfc12adc093c9b51124f0dc54ac7a766b2bc5ccf558d8027" ) -// HardhatKey is a legacy used for hardhat testing in subnet-evm -// TODO(marun) Remove when no longer needed. -var HardhatKey *secp256k1.PrivateKey +var ( + // Key expected to be funded for subnet-evm hardhat testing + // TODO(marun) Remove when subnet-evm configures the genesis with this key. + HardhatKey *secp256k1.PrivateKey + + errInsufficientNodes = errors.New("network needs at least one node to start") +) func init() { hardhatKeyBytes, err := hex.DecodeString(HardHatKeyStr) @@ -105,6 +109,13 @@ type Network struct { Subnets []*Subnet } +func NewDefaultNetwork(owner string) *Network { + return &Network{ + Owner: owner, + Nodes: NewNodesOrPanic(DefaultNodeCount), + } +} + // Ensure a real and absolute network dir so that node // configuration that embeds the network path will continue to // work regardless of symlink and working directory changes. @@ -123,9 +134,11 @@ func StartNewNetwork( rootNetworkDir string, avalancheGoExecPath string, pluginDir string, - nodeCount int, ) error { - if err := network.EnsureDefaultConfig(w, avalancheGoExecPath, pluginDir, nodeCount); err != nil { + if len(network.Nodes) == 0 { + return errInsufficientNodes + } + if err := network.EnsureDefaultConfig(w, avalancheGoExecPath, pluginDir); err != nil { return err } if err := network.Create(rootNetworkDir); err != nil { @@ -171,7 +184,7 @@ func ReadNetwork(dir string) (*Network, error) { } // Initializes a new network with default configuration. -func (n *Network) EnsureDefaultConfig(w io.Writer, avalancheGoPath string, pluginDir string, nodeCount int) error { +func (n *Network) EnsureDefaultConfig(w io.Writer, avalancheGoPath string, pluginDir string) error { if _, err := fmt.Fprintf(w, "Preparing configuration for new network with %s\n", avalancheGoPath); err != nil { return err } @@ -187,6 +200,11 @@ func (n *Network) EnsureDefaultConfig(w io.Writer, avalancheGoPath string, plugi } n.DefaultFlags.SetDefaults(DefaultFlags()) + if len(n.Nodes) == 1 { + // Sybil protection needs to be disabled for a single node network to start + n.DefaultFlags[config.SybilProtectionEnabledKey] = false + } + // Only configure the plugin dir with a non-empty value to ensure // the use of the default value (`[datadir]/plugins`) when // no plugin dir is configured. @@ -222,15 +240,6 @@ func (n *Network) EnsureDefaultConfig(w io.Writer, avalancheGoPath string, plugi n.DefaultRuntimeConfig.AvalancheGoPath = avalancheGoPath } - // Ensure nodes are created - if len(n.Nodes) == 0 { - nodes, err := NewNodes(nodeCount) - if err != nil { - return err - } - n.Nodes = nodes - } - // Ensure nodes are configured for i := range n.Nodes { if err := n.EnsureNodeConfig(n.Nodes[i]); err != nil { diff --git a/tests/fixture/tmpnet/network_test.go b/tests/fixture/tmpnet/network_test.go index c04c497c2485..db8d1c404716 100644 --- a/tests/fixture/tmpnet/network_test.go +++ b/tests/fixture/tmpnet/network_test.go @@ -15,8 +15,8 @@ func TestNetworkSerialization(t *testing.T) { tmpDir := t.TempDir() - network := &Network{} - require.NoError(network.EnsureDefaultConfig(&bytes.Buffer{}, "/path/to/avalanche/go", "", 1)) + network := NewDefaultNetwork("testnet") + require.NoError(network.EnsureDefaultConfig(&bytes.Buffer{}, "/path/to/avalanche/go", "")) require.NoError(network.Create(tmpDir)) // Ensure node runtime is initialized require.NoError(network.readNodes()) diff --git a/tests/fixture/tmpnet/node.go b/tests/fixture/tmpnet/node.go index 452d8d8e78ad..99777e674c04 100644 --- a/tests/fixture/tmpnet/node.go +++ b/tests/fixture/tmpnet/node.go @@ -104,16 +104,16 @@ func NewEphemeralNode(flags FlagsMap) *Node { } // Initializes the specified number of nodes. -func NewNodes(count int) ([]*Node, error) { +func NewNodesOrPanic(count int) []*Node { nodes := make([]*Node, count) for i := range nodes { node := NewNode("") if err := node.EnsureKeys(); err != nil { - return nil, err + panic(err) } nodes[i] = node } - return nodes, nil + return nodes } // Reads a node's configuration from the specified directory. diff --git a/tests/upgrade/upgrade_test.go b/tests/upgrade/upgrade_test.go index 7114ac291d43..d3632853bc31 100644 --- a/tests/upgrade/upgrade_test.go +++ b/tests/upgrade/upgrade_test.go @@ -45,9 +45,7 @@ var _ = ginkgo.Describe("[Upgrade]", func() { require := require.New(ginkgo.GinkgoT()) ginkgo.It("can upgrade versions", func() { - network := &tmpnet.Network{ - Owner: "avalanchego-upgrade", - } + network := tmpnet.NewDefaultNetwork("avalanchego-upgrade") e2e.StartNetwork(network, avalancheGoExecPath, "" /* pluginDir */, 0 /* shutdownDelay */, false /* reuseNetwork */) ginkgo.By(fmt.Sprintf("restarting all nodes with %q binary", avalancheGoExecPathToUpgradeTo)) From 066c3a6ec71bf741ac43ebdf54c876b27d4a6102 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Tue, 21 May 2024 19:17:55 +0200 Subject: [PATCH 010/102] P-chain - introducing fees calculators (#2698) Signed-off-by: Alberto Benegiamo Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Co-authored-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- config/config.go | 7 +- genesis/genesis_fuji.go | 3 +- genesis/genesis_local.go | 3 +- genesis/genesis_mainnet.go | 3 +- genesis/params.go | 34 +-- node/config.go | 15 +- node/node.go | 49 ++-- vms/platformvm/block/builder/helpers_test.go | 19 +- vms/platformvm/block/executor/helpers_test.go | 19 +- .../block/executor/standard_block_test.go | 4 +- vms/platformvm/config/config.go | 45 +--- vms/platformvm/service_test.go | 10 +- .../txs/executor/create_chain_test.go | 2 +- .../txs/executor/create_subnet_test.go | 2 +- vms/platformvm/txs/executor/helpers_test.go | 19 +- vms/platformvm/txs/executor/import_test.go | 10 +- .../txs/executor/staker_tx_verification.go | 47 ++-- .../executor/staker_tx_verification_test.go | 4 +- .../txs/executor/standard_tx_executor.go | 36 ++- .../txs/executor/standard_tx_executor_test.go | 2 +- vms/platformvm/txs/fee/calculator.go | 142 ++++++++++ vms/platformvm/txs/fee/calculator_test.go | 251 ++++++++++++++++++ vms/platformvm/txs/fee/static_config.go | 33 +++ vms/platformvm/txs/txstest/context.go | 24 +- vms/platformvm/validator_set_property_test.go | 23 +- vms/platformvm/vm_regression_test.go | 8 +- vms/platformvm/vm_test.go | 27 +- 27 files changed, 640 insertions(+), 201 deletions(-) create mode 100644 vms/platformvm/txs/fee/calculator.go create mode 100644 vms/platformvm/txs/fee/calculator_test.go create mode 100644 vms/platformvm/txs/fee/static_config.go diff --git a/config/config.go b/config/config.go index 760c0022b123..4cc327240637 100644 --- a/config/config.go +++ b/config/config.go @@ -45,6 +45,7 @@ import ( "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/vms/proposervm" ) @@ -758,9 +759,9 @@ func getStakingConfig(v *viper.Viper, networkID uint32) (node.StakingConfig, err return config, nil } -func getTxFeeConfig(v *viper.Viper, networkID uint32) genesis.TxFeeConfig { +func getTxFeeConfig(v *viper.Viper, networkID uint32) fee.StaticConfig { if networkID != constants.MainnetID && networkID != constants.FujiID { - return genesis.TxFeeConfig{ + return fee.StaticConfig{ TxFee: v.GetUint64(TxFeeKey), CreateAssetTxFee: v.GetUint64(CreateAssetTxFeeKey), CreateSubnetTxFee: v.GetUint64(CreateSubnetTxFeeKey), @@ -1325,7 +1326,7 @@ func GetNodeConfig(v *viper.Viper) (node.Config, error) { nodeConfig.FdLimit = v.GetUint64(FdLimitKey) // Tx Fee - nodeConfig.TxFeeConfig = getTxFeeConfig(v, nodeConfig.NetworkID) + nodeConfig.StaticConfig = getTxFeeConfig(v, nodeConfig.NetworkID) // Genesis Data genesisStakingCfg := nodeConfig.StakingConfig.StakingConfig diff --git a/genesis/genesis_fuji.go b/genesis/genesis_fuji.go index 27c43f79fd0b..06cd2dd143ac 100644 --- a/genesis/genesis_fuji.go +++ b/genesis/genesis_fuji.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" ) var ( @@ -18,7 +19,7 @@ var ( // FujiParams are the params used for the fuji testnet FujiParams = Params{ - TxFeeConfig: TxFeeConfig{ + StaticConfig: fee.StaticConfig{ TxFee: units.MilliAvax, CreateAssetTxFee: 10 * units.MilliAvax, CreateSubnetTxFee: 100 * units.MilliAvax, diff --git a/genesis/genesis_local.go b/genesis/genesis_local.go index 5a76aa25cfcf..72f180ce3445 100644 --- a/genesis/genesis_local.go +++ b/genesis/genesis_local.go @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" ) // PrivateKey-vmRQiZeXEXYMyJhEiqdC2z5JhuDbxL8ix9UVvjgMu2Er1NepE => P-local1g65uqn6t77p656w64023nh8nd9updzmxyymev2 @@ -36,7 +37,7 @@ var ( // LocalParams are the params used for local networks LocalParams = Params{ - TxFeeConfig: TxFeeConfig{ + StaticConfig: fee.StaticConfig{ TxFee: units.MilliAvax, CreateAssetTxFee: units.MilliAvax, CreateSubnetTxFee: 100 * units.MilliAvax, diff --git a/genesis/genesis_mainnet.go b/genesis/genesis_mainnet.go index 3808174ebbd1..94eae11cb2c8 100644 --- a/genesis/genesis_mainnet.go +++ b/genesis/genesis_mainnet.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" ) var ( @@ -18,7 +19,7 @@ var ( // MainnetParams are the params used for mainnet MainnetParams = Params{ - TxFeeConfig: TxFeeConfig{ + StaticConfig: fee.StaticConfig{ TxFee: units.MilliAvax, CreateAssetTxFee: 10 * units.MilliAvax, CreateSubnetTxFee: 1 * units.Avax, diff --git a/genesis/params.go b/genesis/params.go index e2ae45c697e6..6d4f5f4d978f 100644 --- a/genesis/params.go +++ b/genesis/params.go @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" ) type StakingConfig struct { @@ -33,42 +34,21 @@ type StakingConfig struct { RewardConfig reward.Config `json:"rewardConfig"` } -type TxFeeConfig struct { - // Transaction fee - TxFee uint64 `json:"txFee"` - // Transaction fee for create asset transactions - CreateAssetTxFee uint64 `json:"createAssetTxFee"` - // Transaction fee for create subnet transactions - CreateSubnetTxFee uint64 `json:"createSubnetTxFee"` - // Transaction fee for transform subnet transactions - TransformSubnetTxFee uint64 `json:"transformSubnetTxFee"` - // Transaction fee for create blockchain transactions - CreateBlockchainTxFee uint64 `json:"createBlockchainTxFee"` - // Transaction fee for adding a primary network validator - AddPrimaryNetworkValidatorFee uint64 `json:"addPrimaryNetworkValidatorFee"` - // Transaction fee for adding a primary network delegator - AddPrimaryNetworkDelegatorFee uint64 `json:"addPrimaryNetworkDelegatorFee"` - // Transaction fee for adding a subnet validator - AddSubnetValidatorFee uint64 `json:"addSubnetValidatorFee"` - // Transaction fee for adding a subnet delegator - AddSubnetDelegatorFee uint64 `json:"addSubnetDelegatorFee"` -} - type Params struct { StakingConfig - TxFeeConfig + fee.StaticConfig } -func GetTxFeeConfig(networkID uint32) TxFeeConfig { +func GetTxFeeConfig(networkID uint32) fee.StaticConfig { switch networkID { case constants.MainnetID: - return MainnetParams.TxFeeConfig + return MainnetParams.StaticConfig case constants.FujiID: - return FujiParams.TxFeeConfig + return FujiParams.StaticConfig case constants.LocalID: - return LocalParams.TxFeeConfig + return LocalParams.StaticConfig default: - return LocalParams.TxFeeConfig + return LocalParams.StaticConfig } } diff --git a/node/config.go b/node/config.go index 716b9a32c90a..f5f8c1332530 100644 --- a/node/config.go +++ b/node/config.go @@ -23,6 +23,7 @@ import ( "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" ) type APIIndexerConfig struct { @@ -122,13 +123,13 @@ type DatabaseConfig struct { // Config contains all of the configurations of an Avalanche node. type Config struct { - HTTPConfig `json:"httpConfig"` - IPConfig `json:"ipConfig"` - StakingConfig `json:"stakingConfig"` - genesis.TxFeeConfig `json:"txFeeConfig"` - StateSyncConfig `json:"stateSyncConfig"` - BootstrapConfig `json:"bootstrapConfig"` - DatabaseConfig `json:"databaseConfig"` + HTTPConfig `json:"httpConfig"` + IPConfig `json:"ipConfig"` + StakingConfig `json:"stakingConfig"` + fee.StaticConfig `json:"txFeeConfig"` + StateSyncConfig `json:"stateSyncConfig"` + BootstrapConfig `json:"bootstrapConfig"` + DatabaseConfig `json:"databaseConfig"` // Genesis information GenesisBytes []byte `json:"-"` diff --git a/node/node.go b/node/node.go index 9144541f8a3a..fff8e13572cb 100644 --- a/node/node.go +++ b/node/node.go @@ -75,6 +75,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/signer" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" "github.com/ava-labs/avalanchego/vms/registry" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" @@ -1127,29 +1128,31 @@ func (n *Node) initVMs() error { err := utils.Err( n.VMManager.RegisterFactory(context.TODO(), constants.PlatformVMID, &platformvm.Factory{ Config: platformconfig.Config{ - Chains: n.chainManager, - Validators: vdrs, - UptimeLockedCalculator: n.uptimeCalculator, - SybilProtectionEnabled: n.Config.SybilProtectionEnabled, - PartialSyncPrimaryNetwork: n.Config.PartialSyncPrimaryNetwork, - TrackedSubnets: n.Config.TrackedSubnets, - TxFee: n.Config.TxFee, - CreateAssetTxFee: n.Config.CreateAssetTxFee, - CreateSubnetTxFee: n.Config.CreateSubnetTxFee, - TransformSubnetTxFee: n.Config.TransformSubnetTxFee, - CreateBlockchainTxFee: n.Config.CreateBlockchainTxFee, - AddPrimaryNetworkValidatorFee: n.Config.AddPrimaryNetworkValidatorFee, - AddPrimaryNetworkDelegatorFee: n.Config.AddPrimaryNetworkDelegatorFee, - AddSubnetValidatorFee: n.Config.AddSubnetValidatorFee, - AddSubnetDelegatorFee: n.Config.AddSubnetDelegatorFee, - UptimePercentage: n.Config.UptimeRequirement, - MinValidatorStake: n.Config.MinValidatorStake, - MaxValidatorStake: n.Config.MaxValidatorStake, - MinDelegatorStake: n.Config.MinDelegatorStake, - MinDelegationFee: n.Config.MinDelegationFee, - MinStakeDuration: n.Config.MinStakeDuration, - MaxStakeDuration: n.Config.MaxStakeDuration, - RewardConfig: n.Config.RewardConfig, + Chains: n.chainManager, + Validators: vdrs, + UptimeLockedCalculator: n.uptimeCalculator, + SybilProtectionEnabled: n.Config.SybilProtectionEnabled, + PartialSyncPrimaryNetwork: n.Config.PartialSyncPrimaryNetwork, + TrackedSubnets: n.Config.TrackedSubnets, + StaticFeeConfig: fee.StaticConfig{ + TxFee: n.Config.TxFee, + CreateAssetTxFee: n.Config.CreateAssetTxFee, + CreateSubnetTxFee: n.Config.CreateSubnetTxFee, + TransformSubnetTxFee: n.Config.TransformSubnetTxFee, + CreateBlockchainTxFee: n.Config.CreateBlockchainTxFee, + AddPrimaryNetworkValidatorFee: n.Config.AddPrimaryNetworkValidatorFee, + AddPrimaryNetworkDelegatorFee: n.Config.AddPrimaryNetworkDelegatorFee, + AddSubnetValidatorFee: n.Config.AddSubnetValidatorFee, + AddSubnetDelegatorFee: n.Config.AddSubnetDelegatorFee, + }, + UptimePercentage: n.Config.UptimeRequirement, + MinValidatorStake: n.Config.MinValidatorStake, + MaxValidatorStake: n.Config.MaxValidatorStake, + MinDelegatorStake: n.Config.MinDelegatorStake, + MinDelegationFee: n.Config.MinDelegationFee, + MinStakeDuration: n.Config.MinStakeDuration, + MaxStakeDuration: n.Config.MaxStakeDuration, + RewardConfig: n.Config.RewardConfig, UpgradeConfig: upgrade.Config{ ApricotPhase3Time: version.GetApricotPhase3Time(n.Config.NetworkID), ApricotPhase5Time: version.GetApricotPhase5Time(n.Config.NetworkID), diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index e9310ac05b4f..0108162649d6 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -43,6 +43,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" @@ -308,14 +309,16 @@ func defaultConfig(t *testing.T, f fork) *config.Config { Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), Validators: validators.NewManager(), - TxFee: defaultTxFee, - CreateSubnetTxFee: 100 * defaultTxFee, - CreateBlockchainTxFee: 100 * defaultTxFee, - MinValidatorStake: 5 * units.MilliAvax, - MaxValidatorStake: 500 * units.MilliAvax, - MinDelegatorStake: 1 * units.MilliAvax, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, + StaticFeeConfig: fee.StaticConfig{ + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + }, + MinValidatorStake: 5 * units.MilliAvax, + MaxValidatorStake: 500 * units.MilliAvax, + MinDelegatorStake: 1 * units.MilliAvax, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: reward.Config{ MaxConsumptionRate: .12 * reward.PercentDenominator, MinConsumptionRate: .10 * reward.PercentDenominator, diff --git a/vms/platformvm/block/executor/helpers_test.go b/vms/platformvm/block/executor/helpers_test.go index c1295f862a6c..d87b67c76d25 100644 --- a/vms/platformvm/block/executor/helpers_test.go +++ b/vms/platformvm/block/executor/helpers_test.go @@ -44,6 +44,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" @@ -330,14 +331,16 @@ func defaultConfig(t *testing.T, f fork) *config.Config { Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), Validators: validators.NewManager(), - TxFee: defaultTxFee, - CreateSubnetTxFee: 100 * defaultTxFee, - CreateBlockchainTxFee: 100 * defaultTxFee, - MinValidatorStake: 5 * units.MilliAvax, - MaxValidatorStake: 500 * units.MilliAvax, - MinDelegatorStake: 1 * units.MilliAvax, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, + StaticFeeConfig: fee.StaticConfig{ + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + }, + MinValidatorStake: 5 * units.MilliAvax, + MaxValidatorStake: 500 * units.MilliAvax, + MinDelegatorStake: 1 * units.MilliAvax, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: reward.Config{ MaxConsumptionRate: .12 * reward.PercentDenominator, MinConsumptionRate: .10 * reward.PercentDenominator, diff --git a/vms/platformvm/block/executor/standard_block_test.go b/vms/platformvm/block/executor/standard_block_test.go index 880b706884e1..b8c9257a2915 100644 --- a/vms/platformvm/block/executor/standard_block_test.go +++ b/vms/platformvm/block/executor/standard_block_test.go @@ -143,7 +143,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ID: avaxAssetID, }, Out: &secp256k1fx.TransferOutput{ - Amt: env.config.CreateSubnetTxFee, + Amt: env.config.StaticFeeConfig.CreateSubnetTxFee, }, } utxoID := utxo.InputID() @@ -158,7 +158,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { UTXOID: utxo.UTXOID, Asset: utxo.Asset, In: &secp256k1fx.TransferInput{ - Amt: env.config.CreateSubnetTxFee, + Amt: env.config.StaticFeeConfig.CreateSubnetTxFee, }, }}, }}, diff --git a/vms/platformvm/config/config.go b/vms/platformvm/config/config.go index 8b8a0c717aed..731b079ca425 100644 --- a/vms/platformvm/config/config.go +++ b/vms/platformvm/config/config.go @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" ) @@ -30,6 +31,9 @@ type Config struct { // calling VM.Initialize. Validators validators.Manager + // All static fees config active before E-upgrade + StaticFeeConfig fee.StaticConfig + // Provides access to the uptime manager as a thread safe data structure UptimeLockedCalculator uptime.LockedCalculator @@ -42,33 +46,6 @@ type Config struct { // Set of subnets that this node is validating TrackedSubnets set.Set[ids.ID] - // Fee that is burned by every non-state creating transaction - TxFee uint64 - - // Fee that must be burned by every state creating transaction before AP3 - CreateAssetTxFee uint64 - - // Fee that must be burned by every subnet creating transaction after AP3 - CreateSubnetTxFee uint64 - - // Fee that must be burned by every transform subnet transaction - TransformSubnetTxFee uint64 - - // Fee that must be burned by every blockchain creating transaction after AP3 - CreateBlockchainTxFee uint64 - - // Transaction fee for adding a primary network validator - AddPrimaryNetworkValidatorFee uint64 - - // Transaction fee for adding a primary network delegator - AddPrimaryNetworkDelegatorFee uint64 - - // Transaction fee for adding a subnet validator - AddSubnetValidatorFee uint64 - - // Transaction fee for adding a subnet delegator - AddSubnetDelegatorFee uint64 - // The minimum amount of tokens one must bond to be a validator MinValidatorStake uint64 @@ -106,20 +83,6 @@ type Config struct { UseCurrentHeight bool } -func (c *Config) GetCreateBlockchainTxFee(timestamp time.Time) uint64 { - if c.UpgradeConfig.IsApricotPhase3Activated(timestamp) { - return c.CreateBlockchainTxFee - } - return c.CreateAssetTxFee -} - -func (c *Config) GetCreateSubnetTxFee(timestamp time.Time) uint64 { - if c.UpgradeConfig.IsApricotPhase3Activated(timestamp) { - return c.CreateSubnetTxFee - } - return c.CreateAssetTxFee -} - // Create the blockchain described in [tx], but only if this node is a member of // the subnet that validates the chain func (c *Config) CreateChain(chainID ids.ID, tx *txs.CreateChainTx) { diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index e44e603ce53d..95e2f98c3228 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -39,6 +39,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" @@ -366,6 +367,11 @@ func TestGetBalance(t *testing.T) { require := require.New(t) service, _, _ := defaultService(t) + var ( + feeCalc = fee.NewStaticCalculator(service.vm.Config.StaticFeeConfig, service.vm.Config.UpgradeConfig) + createSubnetFee = feeCalc.CalculateFee(&txs.CreateSubnetTx{}, service.vm.clock.Time()) + ) + // Ensure GetStake is correct for each of the genesis validators genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) for idx, utxo := range genesis.UTXOs { @@ -381,7 +387,7 @@ func TestGetBalance(t *testing.T) { if idx == 0 { // we use the first key to fund a subnet creation in [defaultGenesis]. // As such we need to account for the subnet creation fee - balance = defaultBalance - service.vm.Config.GetCreateSubnetTxFee(service.vm.clock.Time()) + balance = defaultBalance - createSubnetFee } require.Equal(avajson.Uint64(balance), reply.Balance) require.Equal(avajson.Uint64(balance), reply.Unlocked) @@ -750,7 +756,7 @@ func TestGetBlock(t *testing.T) { service, _, txBuilder := defaultService(t) service.vm.ctx.Lock.Lock() - service.vm.Config.CreateAssetTxFee = 100 * defaultTxFee + service.vm.StaticFeeConfig.CreateAssetTxFee = 100 * defaultTxFee // Make a block an accept it, then check we can get it. tx, err := txBuilder.NewCreateChainTx( // Test GetTx works for standard blocks diff --git a/vms/platformvm/txs/executor/create_chain_test.go b/vms/platformvm/txs/executor/create_chain_test.go index 288a294e493b..346c8ab1468a 100644 --- a/vms/platformvm/txs/executor/create_chain_test.go +++ b/vms/platformvm/txs/executor/create_chain_test.go @@ -195,7 +195,7 @@ func TestCreateChainTxAP3FeeChange(t *testing.T) { env.state.SetTimestamp(test.time) // to duly set fee cfg := *env.config - cfg.CreateBlockchainTxFee = test.fee + cfg.StaticFeeConfig.CreateBlockchainTxFee = test.fee builder := txstest.NewBuilder(env.ctx, &cfg, env.state) tx, err := builder.NewCreateChainTx( testSubnet1.ID(), diff --git a/vms/platformvm/txs/executor/create_subnet_test.go b/vms/platformvm/txs/executor/create_subnet_test.go index 77b37773107b..c1902dd56625 100644 --- a/vms/platformvm/txs/executor/create_subnet_test.go +++ b/vms/platformvm/txs/executor/create_subnet_test.go @@ -62,7 +62,7 @@ func TestCreateSubnetTxAP3FeeChange(t *testing.T) { } cfg := *env.config - cfg.CreateSubnetTxFee = test.fee + cfg.StaticFeeConfig.CreateSubnetTxFee = test.fee builder := txstest.NewBuilder(env.ctx, &cfg, env.state) tx, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{}, diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index 994250dd5940..74c706227fea 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -41,6 +41,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" @@ -279,14 +280,16 @@ func defaultConfig(t *testing.T, f fork) *config.Config { Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), Validators: validators.NewManager(), - TxFee: defaultTxFee, - CreateSubnetTxFee: 100 * defaultTxFee, - CreateBlockchainTxFee: 100 * defaultTxFee, - MinValidatorStake: 5 * units.MilliAvax, - MaxValidatorStake: 500 * units.MilliAvax, - MinDelegatorStake: 1 * units.MilliAvax, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, + StaticFeeConfig: fee.StaticConfig{ + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + }, + MinValidatorStake: 5 * units.MilliAvax, + MaxValidatorStake: 500 * units.MilliAvax, + MinDelegatorStake: 1 * units.MilliAvax, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: reward.Config{ MaxConsumptionRate: .12 * reward.PercentDenominator, MinConsumptionRate: .10 * reward.PercentDenominator, diff --git a/vms/platformvm/txs/executor/import_test.go b/vms/platformvm/txs/executor/import_test.go index c4b831badc92..7bb0be9afcc1 100644 --- a/vms/platformvm/txs/executor/import_test.go +++ b/vms/platformvm/txs/executor/import_test.go @@ -51,7 +51,7 @@ func TestNewImportTx(t *testing.T) { sourceKey, env.ctx.XChainID, map[ids.ID]uint64{ - env.ctx.AVAXAssetID: env.config.TxFee - 1, + env.ctx.AVAXAssetID: env.config.StaticFeeConfig.TxFee - 1, }, randSrc, ), @@ -67,7 +67,7 @@ func TestNewImportTx(t *testing.T) { sourceKey, env.ctx.XChainID, map[ids.ID]uint64{ - env.ctx.AVAXAssetID: env.config.TxFee, + env.ctx.AVAXAssetID: env.config.StaticFeeConfig.TxFee, }, randSrc, ), @@ -83,7 +83,7 @@ func TestNewImportTx(t *testing.T) { sourceKey, env.ctx.CChainID, map[ids.ID]uint64{ - env.ctx.AVAXAssetID: env.config.TxFee, + env.ctx.AVAXAssetID: env.config.StaticFeeConfig.TxFee, }, randSrc, ), @@ -100,7 +100,7 @@ func TestNewImportTx(t *testing.T) { sourceKey, env.ctx.XChainID, map[ids.ID]uint64{ - env.ctx.AVAXAssetID: env.config.TxFee, + env.ctx.AVAXAssetID: env.config.StaticFeeConfig.TxFee, customAssetID: 1, }, randSrc, @@ -148,7 +148,7 @@ func TestNewImportTx(t *testing.T) { totalOut += out.Out.Amount() } - require.Equal(env.config.TxFee, totalIn-totalOut) + require.Equal(env.config.StaticFeeConfig.TxFee, totalIn-totalOut) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index befd736a674b..0aac4ad50f64 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -15,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" safemath "github.com/ava-labs/avalanchego/utils/math" ) @@ -163,6 +164,9 @@ func verifyAddValidatorTx( } // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -170,7 +174,7 @@ func verifyAddValidatorTx( outs, sTx.Creds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: backend.Config.AddPrimaryNetworkValidatorFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return nil, fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) @@ -251,6 +255,9 @@ func verifyAddSubnetValidatorTx( } // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -258,7 +265,7 @@ func verifyAddSubnetValidatorTx( tx.Outs, baseTxCreds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: backend.Config.AddSubnetValidatorFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) @@ -326,6 +333,9 @@ func verifyRemoveSubnetValidatorTx( } // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -333,7 +343,7 @@ func verifyRemoveSubnetValidatorTx( tx.Outs, baseTxCreds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: backend.Config.TxFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return nil, false, fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) @@ -441,6 +451,9 @@ func verifyAddDelegatorTx( } // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -448,7 +461,7 @@ func verifyAddDelegatorTx( outs, sTx.Creds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: backend.Config.AddPrimaryNetworkDelegatorFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return nil, fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) @@ -547,15 +560,10 @@ func verifyAddPermissionlessValidatorTx( ) } - var txFee uint64 if tx.Subnet != constants.PrimaryNetworkID { if err := verifySubnetValidatorPrimaryNetworkRequirements(isDurangoActive, chainState, tx.Validator); err != nil { return err } - - txFee = backend.Config.AddSubnetValidatorFee - } else { - txFee = backend.Config.AddPrimaryNetworkValidatorFee } outs := make([]*avax.TransferableOutput, len(tx.Outs)+len(tx.StakeOuts)) @@ -563,6 +571,9 @@ func verifyAddPermissionlessValidatorTx( copy(outs[len(tx.Outs):], tx.StakeOuts) // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -570,7 +581,7 @@ func verifyAddPermissionlessValidatorTx( outs, sTx.Creds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: txFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) @@ -692,7 +703,6 @@ func verifyAddPermissionlessDelegatorTx( copy(outs, tx.Outs) copy(outs[len(tx.Outs):], tx.StakeOuts) - var txFee uint64 if tx.Subnet != constants.PrimaryNetworkID { // Invariant: Delegators must only be able to reference validator // transactions that implement [txs.ValidatorTx]. All @@ -703,13 +713,12 @@ func verifyAddPermissionlessDelegatorTx( if validator.Priority.IsPermissionedValidator() { return ErrDelegateToPermissionedValidator } - - txFee = backend.Config.AddSubnetDelegatorFee - } else { - txFee = backend.Config.AddPrimaryNetworkDelegatorFee } // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -717,7 +726,7 @@ func verifyAddPermissionlessDelegatorTx( outs, sTx.Creds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: txFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) @@ -761,6 +770,10 @@ func verifyTransferSubnetOwnershipTx( } // Verify the flowcheck + currentTimestamp := chainState.GetTimestamp() + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -768,7 +781,7 @@ func verifyTransferSubnetOwnershipTx( tx.Outs, baseTxCreds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: backend.Config.TxFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) diff --git a/vms/platformvm/txs/executor/staker_tx_verification_test.go b/vms/platformvm/txs/executor/staker_tx_verification_test.go index 24e1df2a0db6..bde3da64ad7a 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification_test.go +++ b/vms/platformvm/txs/executor/staker_tx_verification_test.go @@ -423,7 +423,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { ).Return(ErrFlowCheckFailed) cfg := defaultTestConfig(t, durango, activeForkTime) - cfg.AddSubnetValidatorFee = 1 + cfg.StaticFeeConfig.AddSubnetValidatorFee = 1 return &Backend{ FlowChecker: flowChecker, @@ -469,7 +469,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { ).Return(nil) cfg := defaultTestConfig(t, durango, activeForkTime) - cfg.AddSubnetValidatorFee = 1 + cfg.StaticFeeConfig.AddSubnetValidatorFee = 1 return &Backend{ FlowChecker: flowChecker, diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index 4ee5ced73173..725f1aaff814 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" ) var ( @@ -68,7 +69,9 @@ func (e *StandardTxExecutor) CreateChainTx(tx *txs.CreateChainTx) error { } // Verify the flowcheck - createBlockchainTxFee := e.Config.GetCreateBlockchainTxFee(currentTimestamp) + feeCalculator := fee.NewStaticCalculator(e.Backend.Config.StaticFeeConfig, e.Backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -76,7 +79,7 @@ func (e *StandardTxExecutor) CreateChainTx(tx *txs.CreateChainTx) error { tx.Outs, baseTxCreds, map[ids.ID]uint64{ - e.Ctx.AVAXAssetID: createBlockchainTxFee, + e.Ctx.AVAXAssetID: fee, }, ); err != nil { return err @@ -114,7 +117,9 @@ func (e *StandardTxExecutor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { } // Verify the flowcheck - createSubnetTxFee := e.Config.GetCreateSubnetTxFee(currentTimestamp) + feeCalculator := fee.NewStaticCalculator(e.Backend.Config.StaticFeeConfig, e.Backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -122,7 +127,7 @@ func (e *StandardTxExecutor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { tx.Outs, e.Tx.Creds, map[ids.ID]uint64{ - e.Ctx.AVAXAssetID: createSubnetTxFee, + e.Ctx.AVAXAssetID: fee, }, ); err != nil { return err @@ -194,6 +199,10 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { copy(ins, tx.Ins) copy(ins[len(tx.Ins):], tx.ImportedInputs) + // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(e.Backend.Config.StaticFeeConfig, e.Backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := e.FlowChecker.VerifySpendUTXOs( tx, utxos, @@ -201,7 +210,7 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { tx.Outs, e.Tx.Creds, map[ids.ID]uint64{ - e.Ctx.AVAXAssetID: e.Config.TxFee, + e.Ctx.AVAXAssetID: fee, }, ); err != nil { return err @@ -250,6 +259,9 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { } // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(e.Backend.Config.StaticFeeConfig, e.Backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -257,7 +269,7 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { outs, e.Tx.Creds, map[ids.ID]uint64{ - e.Ctx.AVAXAssetID: e.Config.TxFee, + e.Ctx.AVAXAssetID: fee, }, ); err != nil { return fmt.Errorf("failed verifySpend: %w", err) @@ -435,6 +447,10 @@ func (e *StandardTxExecutor) TransformSubnetTx(tx *txs.TransformSubnetTx) error return err } + // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(e.Backend.Config.StaticFeeConfig, e.Backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + totalRewardAmount := tx.MaximumSupply - tx.InitialSupply if err := e.Backend.FlowChecker.VerifySpend( tx, @@ -446,7 +462,7 @@ func (e *StandardTxExecutor) TransformSubnetTx(tx *txs.TransformSubnetTx) error // entry in this map literal from being overwritten by the // second entry. map[ids.ID]uint64{ - e.Ctx.AVAXAssetID: e.Config.TransformSubnetTxFee, + e.Ctx.AVAXAssetID: fee, tx.AssetID: totalRewardAmount, }, ); err != nil { @@ -555,6 +571,10 @@ func (e *StandardTxExecutor) BaseTx(tx *txs.BaseTx) error { } // Verify the flowcheck + currentTimestamp := e.State.GetTimestamp() + feeCalculator := fee.NewStaticCalculator(e.Backend.Config.StaticFeeConfig, e.Backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -562,7 +582,7 @@ func (e *StandardTxExecutor) BaseTx(tx *txs.BaseTx) error { tx.Outs, e.Tx.Creds, map[ids.ID]uint64{ - e.Ctx.AVAXAssetID: e.Config.TxFee, + e.Ctx.AVAXAssetID: fee, }, ); err != nil { return err diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index 47f26ac5dd13..de69b0ff5a8c 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -1588,7 +1588,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) // Set dependency expectations. - env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil).Times(1) subnetOwner := fx.NewMockOwner(ctrl) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) diff --git a/vms/platformvm/txs/fee/calculator.go b/vms/platformvm/txs/fee/calculator.go new file mode 100644 index 000000000000..f349f282f7ca --- /dev/null +++ b/vms/platformvm/txs/fee/calculator.go @@ -0,0 +1,142 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fee + +import ( + "time" + + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" +) + +var _ txs.Visitor = (*calculator)(nil) + +func NewStaticCalculator(config StaticConfig, upgradeTimes upgrade.Config) *Calculator { + return &Calculator{ + config: config, + upgradeTimes: upgradeTimes, + } +} + +type Calculator struct { + config StaticConfig + upgradeTimes upgrade.Config +} + +// [CalculateFee] returns the minimal fee needed to accept [tx], at chain time [time] +func (c *Calculator) CalculateFee(tx txs.UnsignedTx, time time.Time) uint64 { + tmp := &calculator{ + upgrades: c.upgradeTimes, + staticCfg: c.config, + time: time, + } + + // this is guaranteed to never return an error + _ = tx.Visit(tmp) + return tmp.fee +} + +// calculator is intentionally unexported and used through Calculator to provide +// a more convenient API +type calculator struct { + // Pre E-fork inputs + upgrades upgrade.Config + staticCfg StaticConfig + time time.Time + + // outputs of visitor execution + fee uint64 +} + +func (c *calculator) AddValidatorTx(*txs.AddValidatorTx) error { + c.fee = c.staticCfg.AddPrimaryNetworkValidatorFee + return nil +} + +func (c *calculator) AddSubnetValidatorTx(*txs.AddSubnetValidatorTx) error { + c.fee = c.staticCfg.AddSubnetValidatorFee + return nil +} + +func (c *calculator) AddDelegatorTx(*txs.AddDelegatorTx) error { + c.fee = c.staticCfg.AddPrimaryNetworkDelegatorFee + return nil +} + +func (c *calculator) CreateChainTx(*txs.CreateChainTx) error { + if c.upgrades.IsApricotPhase3Activated(c.time) { + c.fee = c.staticCfg.CreateBlockchainTxFee + } else { + c.fee = c.staticCfg.CreateAssetTxFee + } + return nil +} + +func (c *calculator) CreateSubnetTx(*txs.CreateSubnetTx) error { + if c.upgrades.IsApricotPhase3Activated(c.time) { + c.fee = c.staticCfg.CreateSubnetTxFee + } else { + c.fee = c.staticCfg.CreateAssetTxFee + } + return nil +} + +func (c *calculator) AdvanceTimeTx(*txs.AdvanceTimeTx) error { + c.fee = 0 // no fees + return nil +} + +func (c *calculator) RewardValidatorTx(*txs.RewardValidatorTx) error { + c.fee = 0 // no fees + return nil +} + +func (c *calculator) RemoveSubnetValidatorTx(*txs.RemoveSubnetValidatorTx) error { + c.fee = c.staticCfg.TxFee + return nil +} + +func (c *calculator) TransformSubnetTx(*txs.TransformSubnetTx) error { + c.fee = c.staticCfg.TransformSubnetTxFee + return nil +} + +func (c *calculator) TransferSubnetOwnershipTx(*txs.TransferSubnetOwnershipTx) error { + c.fee = c.staticCfg.TxFee + return nil +} + +func (c *calculator) AddPermissionlessValidatorTx(tx *txs.AddPermissionlessValidatorTx) error { + if tx.Subnet != constants.PrimaryNetworkID { + c.fee = c.staticCfg.AddSubnetValidatorFee + } else { + c.fee = c.staticCfg.AddPrimaryNetworkValidatorFee + } + return nil +} + +func (c *calculator) AddPermissionlessDelegatorTx(tx *txs.AddPermissionlessDelegatorTx) error { + if tx.Subnet != constants.PrimaryNetworkID { + c.fee = c.staticCfg.AddSubnetDelegatorFee + } else { + c.fee = c.staticCfg.AddPrimaryNetworkDelegatorFee + } + return nil +} + +func (c *calculator) BaseTx(*txs.BaseTx) error { + c.fee = c.staticCfg.TxFee + return nil +} + +func (c *calculator) ImportTx(*txs.ImportTx) error { + c.fee = c.staticCfg.TxFee + return nil +} + +func (c *calculator) ExportTx(*txs.ExportTx) error { + c.fee = c.staticCfg.TxFee + return nil +} diff --git a/vms/platformvm/txs/fee/calculator_test.go b/vms/platformvm/txs/fee/calculator_test.go new file mode 100644 index 000000000000..c25fec9073e8 --- /dev/null +++ b/vms/platformvm/txs/fee/calculator_test.go @@ -0,0 +1,251 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fee + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" +) + +func TestTxFees(t *testing.T) { + feeTestsDefaultCfg := StaticConfig{ + TxFee: 1 * units.Avax, + CreateAssetTxFee: 2 * units.Avax, + CreateSubnetTxFee: 3 * units.Avax, + TransformSubnetTxFee: 4 * units.Avax, + CreateBlockchainTxFee: 5 * units.Avax, + AddPrimaryNetworkValidatorFee: 6 * units.Avax, + AddPrimaryNetworkDelegatorFee: 7 * units.Avax, + AddSubnetValidatorFee: 8 * units.Avax, + AddSubnetDelegatorFee: 9 * units.Avax, + } + + latestForkTime := time.Unix(1713945427, 0) + upgrades := upgrade.Config{ + EUpgradeTime: latestForkTime, + DurangoTime: latestForkTime.Add(-1 * time.Hour), + CortinaTime: latestForkTime.Add(-2 * time.Hour), + BanffTime: latestForkTime.Add(-3 * time.Hour), + ApricotPhase5Time: latestForkTime.Add(-4 * time.Hour), + ApricotPhase3Time: latestForkTime.Add(-5 * time.Hour), + } + + // chain times needed to have specific upgrades active + preEUpgradeTime := upgrades.EUpgradeTime.Add(-1 * time.Second) + preApricotPhase3Time := upgrades.ApricotPhase3Time.Add(-1 * time.Second) + + tests := []struct { + name string + chainTime time.Time + unsignedTx func() txs.UnsignedTx + expected uint64 + }{ + { + name: "AddValidatorTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: addValidatorTx, + expected: feeTestsDefaultCfg.AddPrimaryNetworkValidatorFee, + }, + { + name: "AddSubnetValidatorTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: addSubnetValidatorTx, + expected: feeTestsDefaultCfg.AddSubnetValidatorFee, + }, + { + name: "AddDelegatorTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: addDelegatorTx, + expected: feeTestsDefaultCfg.AddPrimaryNetworkDelegatorFee, + }, + { + name: "CreateChainTx pre ApricotPhase3", + chainTime: preApricotPhase3Time, + unsignedTx: createChainTx, + expected: feeTestsDefaultCfg.CreateAssetTxFee, + }, + { + name: "CreateChainTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: createChainTx, + expected: feeTestsDefaultCfg.CreateBlockchainTxFee, + }, + { + name: "CreateSubnetTx pre ApricotPhase3", + chainTime: preApricotPhase3Time, + unsignedTx: createSubnetTx, + expected: feeTestsDefaultCfg.CreateAssetTxFee, + }, + { + name: "CreateSubnetTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: createSubnetTx, + expected: feeTestsDefaultCfg.CreateSubnetTxFee, + }, + { + name: "RemoveSubnetValidatorTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: removeSubnetValidatorTx, + expected: feeTestsDefaultCfg.TxFee, + }, + { + name: "TransformSubnetTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: transformSubnetTx, + expected: feeTestsDefaultCfg.TransformSubnetTxFee, + }, + { + name: "TransferSubnetOwnershipTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: transferSubnetOwnershipTx, + expected: feeTestsDefaultCfg.TxFee, + }, + { + name: "AddPermissionlessValidatorTx Primary Network pre EUpgrade", + chainTime: upgrades.EUpgradeTime.Add(-1 * time.Second), + unsignedTx: func() txs.UnsignedTx { + return addPermissionlessValidatorTx(constants.PrimaryNetworkID) + }, + expected: feeTestsDefaultCfg.AddPrimaryNetworkValidatorFee, + }, + { + name: "AddPermissionlessValidatorTx Subnet pre EUpgrade", + chainTime: upgrades.EUpgradeTime.Add(-1 * time.Second), + unsignedTx: func() txs.UnsignedTx { + subnetID := ids.GenerateTestID() + require.NotEqual(t, constants.PrimaryNetworkID, subnetID) + return addPermissionlessValidatorTx(subnetID) + }, + expected: feeTestsDefaultCfg.AddSubnetValidatorFee, + }, + { + name: "AddPermissionlessDelegatorTx Primary Network pre EUpgrade", + chainTime: upgrades.EUpgradeTime.Add(-1 * time.Second), + unsignedTx: func() txs.UnsignedTx { + return addPermissionlessDelegatorTx(constants.PrimaryNetworkID) + }, + expected: feeTestsDefaultCfg.AddPrimaryNetworkDelegatorFee, + }, + { + name: "AddPermissionlessDelegatorTx pre EUpgrade", + chainTime: upgrades.EUpgradeTime.Add(-1 * time.Second), + unsignedTx: func() txs.UnsignedTx { + subnetID := ids.GenerateTestID() + require.NotEqual(t, constants.PrimaryNetworkID, subnetID) + return addPermissionlessDelegatorTx(subnetID) + }, + expected: feeTestsDefaultCfg.AddSubnetDelegatorFee, + }, + { + name: "BaseTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: baseTx, + expected: feeTestsDefaultCfg.TxFee, + }, + { + name: "ImportTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: importTx, + expected: feeTestsDefaultCfg.TxFee, + }, + { + name: "ExportTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: exportTx, + expected: feeTestsDefaultCfg.TxFee, + }, + { + name: "RewardValidatorTx pre EUpgrade", + chainTime: upgrades.EUpgradeTime.Add(-1 * time.Second), + unsignedTx: func() txs.UnsignedTx { + return &txs.RewardValidatorTx{ + TxID: ids.GenerateTestID(), + } + }, + expected: 0, + }, + { + name: "AdvanceTimeTx pre EUpgrade", + chainTime: upgrades.EUpgradeTime.Add(-1 * time.Second), + unsignedTx: func() txs.UnsignedTx { + return &txs.AdvanceTimeTx{ + Time: uint64(time.Now().Unix()), + } + }, + expected: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + uTx := tt.unsignedTx() + fc := NewStaticCalculator(feeTestsDefaultCfg, upgrades) + require.Equal(t, tt.expected, fc.CalculateFee(uTx, tt.chainTime)) + }) + } +} + +func addValidatorTx() txs.UnsignedTx { + return &txs.AddValidatorTx{} +} + +func addSubnetValidatorTx() txs.UnsignedTx { + return &txs.AddSubnetValidatorTx{} +} + +func addDelegatorTx() txs.UnsignedTx { + return &txs.AddDelegatorTx{} +} + +func createChainTx() txs.UnsignedTx { + return &txs.CreateChainTx{} +} + +func createSubnetTx() txs.UnsignedTx { + return &txs.CreateSubnetTx{} +} + +func removeSubnetValidatorTx() txs.UnsignedTx { + return &txs.RemoveSubnetValidatorTx{} +} + +func transformSubnetTx() txs.UnsignedTx { + return &txs.TransformSubnetTx{} +} + +func transferSubnetOwnershipTx() txs.UnsignedTx { + return &txs.TransferSubnetOwnershipTx{} +} + +func addPermissionlessValidatorTx(subnetID ids.ID) txs.UnsignedTx { + return &txs.AddPermissionlessValidatorTx{ + Subnet: subnetID, + } +} + +func addPermissionlessDelegatorTx(subnetID ids.ID) txs.UnsignedTx { + return &txs.AddPermissionlessDelegatorTx{ + Subnet: subnetID, + } +} + +func baseTx() txs.UnsignedTx { + return &txs.BaseTx{} +} + +func importTx() txs.UnsignedTx { + return &txs.ImportTx{} +} + +func exportTx() txs.UnsignedTx { + return &txs.ExportTx{} +} diff --git a/vms/platformvm/txs/fee/static_config.go b/vms/platformvm/txs/fee/static_config.go new file mode 100644 index 000000000000..e03fb701806a --- /dev/null +++ b/vms/platformvm/txs/fee/static_config.go @@ -0,0 +1,33 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fee + +type StaticConfig struct { + // Fee that is burned by every non-state creating transaction + TxFee uint64 `json:"txFee"` + + // Fee that must be burned by every state creating transaction before AP3 + CreateAssetTxFee uint64 `json:"createAssetTxFee"` + + // Fee that must be burned by every subnet creating transaction after AP3 + CreateSubnetTxFee uint64 `json:"createSubnetTxFee"` + + // Fee that must be burned by every transform subnet transaction + TransformSubnetTxFee uint64 `json:"transformSubnetTxFee"` + + // Fee that must be burned by every blockchain creating transaction after AP3 + CreateBlockchainTxFee uint64 `json:"createBlockchainTxFee"` + + // Transaction fee for adding a primary network validator + AddPrimaryNetworkValidatorFee uint64 `json:"addPrimaryNetworkValidatorFee"` + + // Transaction fee for adding a primary network delegator + AddPrimaryNetworkDelegatorFee uint64 `json:"addPrimaryNetworkDelegatorFee"` + + // Transaction fee for adding a subnet validator + AddSubnetValidatorFee uint64 `json:"addSubnetValidatorFee"` + + // Transaction fee for adding a subnet delegator + AddSubnetDelegatorFee uint64 `json:"addSubnetDelegatorFee"` +} diff --git a/vms/platformvm/txs/txstest/context.go b/vms/platformvm/txs/txstest/context.go index 514a85e2bae7..ec2252a632e1 100644 --- a/vms/platformvm/txs/txstest/context.go +++ b/vms/platformvm/txs/txstest/context.go @@ -8,6 +8,8 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/wallet/chain/p/builder" ) @@ -16,16 +18,22 @@ func newContext( cfg *config.Config, timestamp time.Time, ) *builder.Context { + var ( + feeCalc = fee.NewStaticCalculator(cfg.StaticFeeConfig, cfg.UpgradeConfig) + createSubnetFee = feeCalc.CalculateFee(&txs.CreateSubnetTx{}, timestamp) + createChainFee = feeCalc.CalculateFee(&txs.CreateChainTx{}, timestamp) + ) + return &builder.Context{ NetworkID: ctx.NetworkID, AVAXAssetID: ctx.AVAXAssetID, - BaseTxFee: cfg.TxFee, - CreateSubnetTxFee: cfg.GetCreateSubnetTxFee(timestamp), - TransformSubnetTxFee: cfg.TransformSubnetTxFee, - CreateBlockchainTxFee: cfg.GetCreateBlockchainTxFee(timestamp), - AddPrimaryNetworkValidatorFee: cfg.AddPrimaryNetworkValidatorFee, - AddPrimaryNetworkDelegatorFee: cfg.AddPrimaryNetworkDelegatorFee, - AddSubnetValidatorFee: cfg.AddSubnetValidatorFee, - AddSubnetDelegatorFee: cfg.AddSubnetDelegatorFee, + BaseTxFee: cfg.StaticFeeConfig.TxFee, + CreateSubnetTxFee: createSubnetFee, + TransformSubnetTxFee: cfg.StaticFeeConfig.TransformSubnetTxFee, + CreateBlockchainTxFee: createChainFee, + AddPrimaryNetworkValidatorFee: cfg.StaticFeeConfig.AddPrimaryNetworkValidatorFee, + AddPrimaryNetworkDelegatorFee: cfg.StaticFeeConfig.AddPrimaryNetworkDelegatorFee, + AddSubnetValidatorFee: cfg.StaticFeeConfig.AddSubnetValidatorFee, + AddSubnetDelegatorFee: cfg.StaticFeeConfig.AddSubnetDelegatorFee, } } diff --git a/vms/platformvm/validator_set_property_test.go b/vms/platformvm/validator_set_property_test.go index 4c1e3ef9fe2c..faf5eb810d51 100644 --- a/vms/platformvm/validator_set_property_test.go +++ b/vms/platformvm/validator_set_property_test.go @@ -43,6 +43,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -652,16 +653,18 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { UptimeLockedCalculator: uptime.NewLockedCalculator(), SybilProtectionEnabled: true, Validators: validators.NewManager(), - TxFee: defaultTxFee, - CreateSubnetTxFee: 100 * defaultTxFee, - TransformSubnetTxFee: 100 * defaultTxFee, - CreateBlockchainTxFee: 100 * defaultTxFee, - MinValidatorStake: defaultMinValidatorStake, - MaxValidatorStake: defaultMaxValidatorStake, - MinDelegatorStake: defaultMinDelegatorStake, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, + StaticFeeConfig: fee.StaticConfig{ + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + TransformSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + }, + MinValidatorStake: defaultMinValidatorStake, + MaxValidatorStake: defaultMaxValidatorStake, + MinDelegatorStake: defaultMinDelegatorStake, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, UpgradeConfig: upgrade.Config{ ApricotPhase3Time: forkTime, ApricotPhase5Time: forkTime, diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 6813919614b8..d29b2b0af1fa 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -643,7 +643,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { ID: vm.ctx.AVAXAssetID, }, Out: &secp256k1fx.TransferOutput{ - Amt: vm.TxFee, + Amt: vm.StaticFeeConfig.TxFee, OutputOwners: secp256k1fx.OutputOwners{}, }, } @@ -660,7 +660,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { UTXOID: utxo.UTXOID, Asset: utxo.Asset, In: &secp256k1fx.TransferInput{ - Amt: vm.TxFee, + Amt: vm.StaticFeeConfig.TxFee, }, }, }, @@ -890,7 +890,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { ID: vm.ctx.AVAXAssetID, }, Out: &secp256k1fx.TransferOutput{ - Amt: vm.TxFee, + Amt: vm.StaticFeeConfig.TxFee, OutputOwners: secp256k1fx.OutputOwners{}, }, } @@ -907,7 +907,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { UTXOID: utxo.UTXOID, Asset: utxo.Asset, In: &secp256k1fx.TransferInput{ - Amt: vm.TxFee, + Amt: vm.StaticFeeConfig.TxFee, }, }, }, diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 6f1177ed17f1..13802ad4dae2 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -57,6 +57,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -245,16 +246,18 @@ func defaultVM(t *testing.T, f fork) (*VM, *txstest.Builder, database.Database, UptimeLockedCalculator: uptime.NewLockedCalculator(), SybilProtectionEnabled: true, Validators: validators.NewManager(), - TxFee: defaultTxFee, - CreateSubnetTxFee: 100 * defaultTxFee, - TransformSubnetTxFee: 100 * defaultTxFee, - CreateBlockchainTxFee: 100 * defaultTxFee, - MinValidatorStake: defaultMinValidatorStake, - MaxValidatorStake: defaultMaxValidatorStake, - MinDelegatorStake: defaultMinDelegatorStake, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, + StaticFeeConfig: fee.StaticConfig{ + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + TransformSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + }, + MinValidatorStake: defaultMinValidatorStake, + MaxValidatorStake: defaultMaxValidatorStake, + MinDelegatorStake: defaultMinDelegatorStake, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, UpgradeConfig: upgrade.Config{ ApricotPhase3Time: apricotPhase3Time, ApricotPhase5Time: apricotPhase5Time, @@ -387,7 +390,7 @@ func TestGenesis(t *testing.T) { require.NoError(err) require.Equal(utxo.Address, addr) - require.Equal(uint64(utxo.Amount)-vm.CreateSubnetTxFee, out.Amount()) + require.Equal(uint64(utxo.Amount)-vm.StaticFeeConfig.CreateSubnetTxFee, out.Amount()) } } @@ -2367,7 +2370,7 @@ func TestBaseTx(t *testing.T) { } require.Equal(totalOutputAmt, key0OutputAmt+key1OutputAmt+changeAddrOutputAmt) - require.Equal(vm.TxFee, totalInputAmt-totalOutputAmt) + require.Equal(vm.StaticFeeConfig.TxFee, totalInputAmt-totalOutputAmt) require.Equal(sendAmt, key1OutputAmt) vm.ctx.Lock.Unlock() From 85eac09bab1208ede8b8865dd185df6b2bcc5e44 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 21 May 2024 13:19:11 -0400 Subject: [PATCH 011/102] Change default staking key from RSA 4096 to secp256r1 (#3025) --- network/certs_test.go | 110 ------------------------------------- network/network_test.go | 22 ++++++-- network/test_cert_1.crt | 27 --------- network/test_cert_2.crt | 27 --------- network/test_cert_3.crt | 27 --------- network/test_key_1.key | 52 ------------------ network/test_key_2.key | 52 ------------------ network/test_key_3.key | 52 ------------------ network/tracked_ip_test.go | 51 +++++++++++++++++ staking/tls.go | 11 ++-- staking/tls_test.go | 7 +++ staking/verify_test.go | 53 ++++++++++++++++++ 12 files changed, 134 insertions(+), 357 deletions(-) delete mode 100644 network/certs_test.go delete mode 100644 network/test_cert_1.crt delete mode 100644 network/test_cert_2.crt delete mode 100644 network/test_cert_3.crt delete mode 100644 network/test_key_1.key delete mode 100644 network/test_key_2.key delete mode 100644 network/test_key_3.key create mode 100644 staking/verify_test.go diff --git a/network/certs_test.go b/network/certs_test.go deleted file mode 100644 index 587674119546..000000000000 --- a/network/certs_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package network - -import ( - "crypto/tls" - "net" - "sync" - "testing" - - "github.com/stretchr/testify/require" - - _ "embed" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/peer" - "github.com/ava-labs/avalanchego/staking" - "github.com/ava-labs/avalanchego/utils/ips" -) - -var ( - //go:embed test_cert_1.crt - testCertBytes1 []byte - //go:embed test_key_1.key - testKeyBytes1 []byte - //go:embed test_cert_2.crt - testCertBytes2 []byte - //go:embed test_key_2.key - testKeyBytes2 []byte - //go:embed test_cert_3.crt - testCertBytes3 []byte - //go:embed test_key_3.key - testKeyBytes3 []byte - - ip *ips.ClaimedIPPort - otherIP *ips.ClaimedIPPort - - certLock sync.Mutex - tlsCerts []*tls.Certificate - tlsConfigs []*tls.Config -) - -func init() { - cert1, err := staking.LoadTLSCertFromBytes(testKeyBytes1, testCertBytes1) - if err != nil { - panic(err) - } - cert2, err := staking.LoadTLSCertFromBytes(testKeyBytes2, testCertBytes2) - if err != nil { - panic(err) - } - cert3, err := staking.LoadTLSCertFromBytes(testKeyBytes3, testCertBytes3) - if err != nil { - panic(err) - } - tlsCerts = []*tls.Certificate{ - cert1, cert2, cert3, - } - - stakingCert1, err := staking.ParseCertificate(cert1.Leaf.Raw) - if err != nil { - panic(err) - } - stakingCert2, err := staking.ParseCertificate(cert2.Leaf.Raw) - if err != nil { - panic(err) - } - - ip = ips.NewClaimedIPPort( - stakingCert1, - ips.IPPort{ - IP: net.IPv4(127, 0, 0, 1), - Port: 9651, - }, - 1, // timestamp - nil, // signature - ) - otherIP = ips.NewClaimedIPPort( - stakingCert2, - ips.IPPort{ - IP: net.IPv4(127, 0, 0, 1), - Port: 9651, - }, - 1, // timestamp - nil, // signature - ) -} - -func getTLS(t *testing.T, index int) (ids.NodeID, *tls.Certificate, *tls.Config) { - certLock.Lock() - defer certLock.Unlock() - - for len(tlsCerts) <= index { - cert, err := staking.NewTLSCert() - require.NoError(t, err) - tlsCerts = append(tlsCerts, cert) - } - for len(tlsConfigs) <= index { - cert := tlsCerts[len(tlsConfigs)] - tlsConfig := peer.TLSConfig(*cert, nil) - tlsConfigs = append(tlsConfigs, tlsConfig) - } - - tlsCert := tlsCerts[index] - cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) - require.NoError(t, err) - nodeID := ids.NodeIDFromCert(cert) - return nodeID, tlsCert, tlsConfigs[index] -} diff --git a/network/network_test.go b/network/network_test.go index ee645aef21d7..f8f7b56427ff 100644 --- a/network/network_test.go +++ b/network/network_test.go @@ -6,7 +6,6 @@ package network import ( "context" "crypto" - "crypto/rsa" "net" "sync" "testing" @@ -166,13 +165,19 @@ func newTestNetwork(t *testing.T, count int) (*testDialer, []*testListener, []id ) for i := 0; i < count; i++ { ip, listener := dialer.NewListener() - nodeID, tlsCert, tlsConfig := getTLS(t, i) + + tlsCert, err := staking.NewTLSCert() + require.NoError(t, err) + + cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) + require.NoError(t, err) + nodeID := ids.NodeIDFromCert(cert) blsKey, err := bls.NewSecretKey() require.NoError(t, err) config := defaultConfig - config.TLSConfig = tlsConfig + config.TLSConfig = peer.TLSConfig(*tlsCert, nil) config.MyNodeID = nodeID config.MyIPPort = ip config.TLSKey = tlsCert.PrivateKey.(crypto.Signer) @@ -399,7 +404,14 @@ func TestTrackVerifiesSignatures(t *testing.T) { _, networks, wg := newFullyConnectedTestNetwork(t, []router.InboundHandler{nil}) network := networks[0] - nodeID, tlsCert, _ := getTLS(t, 1) + + tlsCert, err := staking.NewTLSCert() + require.NoError(err) + + cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) + require.NoError(err) + nodeID := ids.NodeIDFromCert(cert) + require.NoError(network.config.Validators.AddStaker(constants.PrimaryNetworkID, nodeID, nil, ids.Empty, 1)) stakingCert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) @@ -417,7 +429,7 @@ func TestTrackVerifiesSignatures(t *testing.T) { ), }) // The signature is wrong so this peer tracking info isn't useful. - require.ErrorIs(err, rsa.ErrVerification) + require.ErrorIs(err, staking.ErrECDSAVerificationFailure) network.peersLock.RLock() require.Empty(network.trackedIPs) diff --git a/network/test_cert_1.crt b/network/test_cert_1.crt deleted file mode 100644 index 2f2b95e658ad..000000000000 --- a/network/test_cert_1.crt +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEnTCCAoWgAwIBAgIBADANBgkqhkiG9w0BAQsFADAAMCAXDTk5MTIzMTAwMDAw -MFoYDzIxMjQwMTA5MTQ0NTU4WjAAMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEAqCOUESK8b5N894dVCSIs4mTfNTdhaL5cnw3ZXSbZlfquBRJOxhqHXutG -An9++OTWvevrssaXBxGT4oOT3N11dm4iKh7ewi3to+1Sfqq71blCVZtBDOeWpZx0 -WwhPO37Us26fCR7T2gStiTHY9qE0QV/9p15OCAFsRb94JuhF0OR0d6tRm0yQ6b7Y -NRzpaBw4MBxZD9h84+QDdhsTyxI0xk/NnbG74pykjsau0/YA9mNqHHSnL4DyD5qu -IKqRfD5HQHemx66I3jEXUB/GxTHhxz5uskIpS9AV3oclvVi14BjSEWgNkJX+nMi+ -tjuSKouAFpzJZzZme2DvmyAecxbNVBdajOTe2QRiG7HKh1OdMZabd2dUNv5S9/gd -bI53s4R++z/H4llsBfk6B2+/DmqDRauh4Mz9HTf0Pud7Nz2b7r77PnPTjHExgN3R -i+Yo6LskRCQTzzTVwW/RY+rNVux9UE6ZPLarDbXnSyetKMUS7qlz8NUerWjtkC6i -om570LfTGs3GxIqVgoGg0mXuji+EoG+XpYR3PRaeo8cAmfEu7T+SxgSfJAv7DyZv -+a2VTZcOPDI1KTLrM8Xovy17t5rd9cy1/75vxnKLiGDEhzWJmNl4IvIYbtihWWl5 -ksdFYbe9Dpvuh/wBCGoK+kmCirUM1DiizWn5TxJeS1qYI8I2sYMCAwEAAaMgMB4w -DgYDVR0PAQH/BAQDAgSwMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIB -AABzczRjzfhlmV+bFDzAs7nszQlZREcoRuWe5qHy7VKLvZvIAaYlcApB34hH7nDq -T/8fS8g8rC+Tzw0iCPF21Z4AzSe76V6EU4VGWWe8l00nDszfvavE5BF24z8dCuVC -1gScC1tvG6FPT23koZ0BVmyueCIa7sdqFlDz8rrRpLWfUcLj38gxwWM2JVBHQUvV -j87lzpTNH+2nPiwrKISqUPFi4YvbWKe8T4bY2Elw7THiNLZGfgqOXVkeIVi4fs97 -Tc5uscZ4OpSTlrfJqMJEV8cMRvrDmhD/VWbJvnk7lyELPoHx6MUinBswBT51yvmY -bZh4AZ43GSvSyo/V7p9scytQP3zM1MeHpsFa0RHwGVFp2BmO1abvydAxX0NMWasv -WUzXCKliXsVD/qUeCU/CFnaBqpzBvm4AFBgwHzprwzP9Be/mz/TjTcsfrmoiyxlr -QjXNk9TnP9d+aeOJsRz+JSYyHETACO5PkCg+XCDyEOf+kQAzVb9Dp0oWaCovXciU -A5z0DSDzyKVBOQo0syb5NFsLZ2DeJemNbP+3kCNzBBASQ4VWAvRbLjPh3Oe8A5PZ -xezCvzRE05O6tYkz5C5hcKbpAjfP8G8RV6ERjLBICBfb7XI7T0hixhiNHlIKknkJ -F82B/zDt+qBFARw8A/qr44RF+vy3Ql4IS2ZcflAv2pTO ------END CERTIFICATE----- diff --git a/network/test_cert_2.crt b/network/test_cert_2.crt deleted file mode 100644 index 283e286be446..000000000000 --- a/network/test_cert_2.crt +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEnTCCAoWgAwIBAgIBADANBgkqhkiG9w0BAQsFADAAMCAXDTk5MTIzMTAwMDAw -MFoYDzIxMjQwMTA5MTQ0NTQ3WjAAMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEArT7afarml6cvCmAySAO8GQka1mcQIMACyEWy6KsqiccX+DoGh5ECyQSa -WFKWKGdQ32dAWGVlSkmmgJ1jtW749hSguysav3EPMaxe/ad5CV1MwyyccGS9U99M -z0UVuFEXVjN5W6UlcULp1oJDj07NzZP6ByRiDvnjzgeYb3jHwjqOBNwex1jLW6vp -oWD03zTanVQXZaaGcEISCI2CgDP3uXfd0NQpoGVpf9gMi0cdGu8gpqbLqBjzjzr8 -GDBQYGaWKFnlqe6X9nBUad/qNE3Zeb3ehSg+M2ecQzTZFWirfa6cGTtovu04RMML -9OLflQy3rTRST2HQ6z0gpVCP3V2Mg/LmAuWyhOLVYNkhEwkRHvddzFksRzQ+ghpP -cGfvI0dwxQV0CbEMVjd9zVEA6dOrMLI3st2922hqF23Al1+Hwcu1G/T3ybfSTwjd -YZ23IgkQF4r+RIXevzgOBBXfEwE8XERW2zNwUG5Sv5dxx+FgDjX0EGbrzgY6OeKT -D1SP/7WQLjwmGgwyNJYkAklvEKwU+dlGD5NpgvJ9fg8R1wUhp2HhSZ1l1OUVmRYw -YqUm7dTLK1CJU2BH2sRyZcUkwstjvgi688zfHNttGYmAnx6wGS12jWf+W4df+QNI -Ng6AdcJ5Ee0z0JAbTpZW/zX3CTSroow7igHnd4AwvKEVQFcyO/MCAwEAAaMgMB4w -DgYDVR0PAQH/BAQDAgSwMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIB -ACePaZvjw2KiheheWNjzOv2B+7uLVe7oEbThEUQypEmTFK8wKaHwI4BGdBGEOr/N -LZ1M2wAYgwzMTEDJE+GEB2ZHIdH9cH5lu7ITsOMVcBSJttEJVhhEtbMwVJ9JC62j -AsW4VmHFpEik+xvinxedKczXOa21YJo4sv2TiFWFaSHqPeRo7HA1dxQYOwiLsS6e -JKIupMrn8IZz2YN5gFhbvQTBp2J3u6kxMIzN0a+BPARR4fwMn5lVMVvye/+8Kwtw -dZHSN1FYUcFqHagmhNlNkAOaGQklSFWtsVVQxQCFS2bxEImLj5kG16fCAsQoRC0J -ZS2OaRncrtB0r0Qu1JB5XJP9FLflSb57KIxBNVrl+iWdWikgBFE6cMthMwgLfQ99 -k8AMp6KrCjcxqegN+P30ct/JwahKPq2+SwtdHG3yrZ2TJEjhOtersrTnRK9zqm9v -lqS7JsiztjgqnhMs2eTdXygfEe0AoZihGTaaLYj37A9+2RECkuijkjBghG2NBnv6 -264lTghZyZcZgZNCgYglYC1bhifEorJpYf6TOOcDAi5UH8R7vi4x70vI6sIDrhga -d9E63EVe11QdIjceceMlNm42UTrhl0epMbL6FIzU+d91qBgd9qT6YqoYPFZSiYFy -2hArgLxH2fxTXatCAit5g1MEk0w1MiHVrPZ8lTU3U/ET ------END CERTIFICATE----- diff --git a/network/test_cert_3.crt b/network/test_cert_3.crt deleted file mode 100644 index c0977191ec7b..000000000000 --- a/network/test_cert_3.crt +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEnTCCAoWgAwIBAgIBADANBgkqhkiG9w0BAQsFADAAMCAXDTk5MTIzMTAwMDAw -MFoYDzIxMjQwMTA5MTQ0NTM0WjAAMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEA5aV76ivIZ1iWmW0OzGMCrmFQBnej9JntQ1jP9yiacKu7j5Z/bD/eqoyc -jRwoSiesErfnThAGy7H80glVw/XmC0fYNPVDPyzAEdNk46M3yEI8hAKI6aSkl1s1 -KVAHpQuNcG+3xIB39OOMx0XuycZ6gqzyMmjqeT0cThNDXTwGbodMVDAf0q220QAq -zB/lz0sjHPXlYh25LJ1yPtl+vlcfGrP+q+2ODR9rnI79PE7AZB4Xc6wUIca5XXkH -PS7zQ1Ida1xrf446MYCVuazLFhpzq8/nhkxNMzxdZsJaWavL+xkpjGxAySvj0jlu -QFGsmsxOIU/XgJD/VRqqyISXpl2wg0l8mpsU9fV7bEW1y6MIc7AARRgbbEPiDz8m -/O8mjEW3C16untLHB7LzPCCitTssGR65Shkj+Lw+aM4X5ZI+Xm8eHTRCek8T5Cl3 -Sm2UFkLk2mun6cwoyWWhwi6+EfW6ks0c7qSHtJTP8DgLrWxYmBuD9PKSHclpa4/5 -toj52YnT6fIBJWz5ggIdntRCaH8+0eWvwuvDsdPUL7JQFjJmfQOdMenlNqW2aEvx -+JZiYLJBWj9cjpI33P5CAfFEVM3IFlDHmMHRTQ/kKLcfvSDfuofEBoMt4tjf01Um -dfi8kFKWl9ba9I7CoQ13U4J1wkk6KxatZP7eGCmKRoq8w+Y38NsCAwEAAaMgMB4w -DgYDVR0PAQH/BAQDAgSwMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIB -AKsvbN5/r4YPguetl+jIhqpr4TZM8GNZRGTqkKC8clRspBeihJqkNQWsnZiFkJTH -NhNAx+7tlJHqeGdojc2XjBAkc+//qYqXKHgihsO54bVG9oN9IPO+mpPumRRhGneH -jTUE/hLFqwA4ZPw5L1HtJ0m1yqg/HXf4aBXcVQ/YO8YN17ZgLpueYt+Chi1pP/Ku -TzHuoKuHst2T6uuZQZxcD+XJoXwdOt7mfPTh5y9/Psjn+qx833DNWSwF3O/lEghA -2yOb+5CFta2LLUHH894oj5SvgJ/5cvn4+NbyDCUv5ebvE98BMh72PLNRuIRV0gfO -XalMIZ+9Jm2TGXD0dWt9GeZ5z3h+nCEB6s3x0sqluaWG3lTUx+4T/aIxdGuvPFi6 -7DWm7TG7yxFGfbECyyXXL+B/gyHhE1Q93nE3wK9flSG+ljqFJS+8wytht52XhgwE -lV1AwHgxkbkFzNIwB0s7etR9+wBcQvFKqeCZrDeG1twKNcY1dv1D/OCUlBYJvL/X -YADeT2ZjFzHhWhv6TLVEAtqytT1o4qXh6VWeIrwfMG0VcQSiJyNxwO/aW5BOTM44 -EelDzvSjo/pRxqN/m44Iuf0Ran86DO7LmjNYh/04FN3oaL9cFIaT9BWXt/Xx2Fdw -+dg5bPSJ62ExVnnNRlY9lQECkSoRZK2epcICs+3YmmGX ------END CERTIFICATE----- diff --git a/network/test_key_1.key b/network/test_key_1.key deleted file mode 100644 index c49775114d66..000000000000 --- a/network/test_key_1.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCoI5QRIrxvk3z3 -h1UJIiziZN81N2FovlyfDdldJtmV+q4FEk7GGode60YCf3745Na96+uyxpcHEZPi -g5Pc3XV2biIqHt7CLe2j7VJ+qrvVuUJVm0EM55alnHRbCE87ftSzbp8JHtPaBK2J -Mdj2oTRBX/2nXk4IAWxFv3gm6EXQ5HR3q1GbTJDpvtg1HOloHDgwHFkP2Hzj5AN2 -GxPLEjTGT82dsbvinKSOxq7T9gD2Y2ocdKcvgPIPmq4gqpF8PkdAd6bHrojeMRdQ -H8bFMeHHPm6yQilL0BXehyW9WLXgGNIRaA2Qlf6cyL62O5Iqi4AWnMlnNmZ7YO+b -IB5zFs1UF1qM5N7ZBGIbscqHU50xlpt3Z1Q2/lL3+B1sjnezhH77P8fiWWwF+ToH -b78OaoNFq6HgzP0dN/Q+53s3PZvuvvs+c9OMcTGA3dGL5ijouyREJBPPNNXBb9Fj -6s1W7H1QTpk8tqsNtedLJ60oxRLuqXPw1R6taO2QLqKibnvQt9MazcbEipWCgaDS -Ze6OL4Sgb5elhHc9Fp6jxwCZ8S7tP5LGBJ8kC/sPJm/5rZVNlw48MjUpMuszxei/ -LXu3mt31zLX/vm/GcouIYMSHNYmY2Xgi8hhu2KFZaXmSx0Vht70Om+6H/AEIagr6 -SYKKtQzUOKLNaflPEl5LWpgjwjaxgwIDAQABAoICAHGe8U0PGyWPFlCzLDyq0of+ -wHNWxEWi9jYphqyTN1BJgVU+BOuMO9RhywKfI6+P/KmFBtbdqmuFblkQr1f+c4Uf -cYjjKYcwwDkZg7jDKYGI2pG9A51z1nJ9oodtuxUqZRQH+gKQyXq31Ik0nTg0wXo4 -ItH6QWLZi1AqzkgEiEFcUHQZ2mDGwdqjM7nYmsXW5AVm8qxpkCP0Dn6+V4bP+8fT -X9BjreK6Fd3B15y2zfmyPp+SGPRZ/7mZvnemq/+4mi+va43enPEBXY6wmoLhbYBV -6ToeyYdIy65/x3oHu4f/Xd2TYi9FnTRX18CPyvtjH6CoPNW5hlFztRcwAkOlsgQ7 -sZ+9FGAnRvz1lrBg80DeCHeSKVkDHmMQSINhPcPnlMJpxn6iiZjdvz/Bd+9RRqZl -xUI/lV3/Wueh8SeCQlFOj3fHBZEaq6QoC/VmmaeIiLEm1hj+ymuFxwOtA6AKWLb3 -59XnEkONeTfv9d2eQ7NOPU86n/zhWHUKodmBUEaxLDaUwRkS1Adb4rLuRwrMfn3a -2KkknYWzvyrlk8lDqKAMeQneFmpresGAXeIn0vt434eaGcK4a/IZ8PebuhZxGq1Z -bVbxVm0AsLmd9X3htR6MOiZswnVmA3JCw1AMKZpLMDRSbjV0uYuhBJQsN4Y/kyOK -l52JtymFNvbuRF+836+RAoIBAQDZ9wyihmgsEPLl7PHzfYo4pnTs1puoT5PS7GjO -iVm7UtOKaawsJxKX3cxzSFVXONs9hbPPzmsQEL3Xz+lUsgrSeXReF00KLRbfE2LM -dv9hlJVMQXEKnEkFYNNgETyZIJE3ZDDqdd2PDzNM8aKHlvLYREiETCwVn7r4x5QE -jIHC0gUjRJHqUgSdAMa+qvranPLxVV9mpJmL2RXjjb/OtJosFef9h5augSNI9tPS -EDLm4wMjyXr25Vu20/cusmTlOhCzi2d23hNHx8nPE0nCEVtZ2rnnWyH/ozqRnpXX -EPh0IeZQmebBhHWzkjIPaOa05Ua5rkVAQau8/FUUubjXytyZAoIBAQDFerIQwodP -V46WVC0LtSq4ju88x1vgDfT0NFE3H6hIX7Mc91zt0NGOhzv4crfjnoj+romNfQwD -0ymtudnnoaGPFBRrRF8T+26jfFpes7Ve5q/PpY78zJH1ZLwyKKX4dzgeY0Aj9FbO -q4dzh21oD7wyknRm0NTqOvgLAuxoBFZ4FTgudKNDzGymgIaQVT1+h0226og289WT -iptkpOZ/HcxQts2U3j3a87pJB0IFjIrBTtVqIyphdwRVDa929WGDITUPHa3aqykx -Ma/zvXvocAlIDITVwxXlS16DkSS+5jdN/CUj5h0O6FefGaJmk6/bFQIeXM4fRhRF -M0cs1mxXkNR7AoIBAQCFxYftn4wDr4tD7f44sE3Kou6UBMqXq+9PvmQ8jjOSMi0+ -f8h5eKmCp0+5WSV3WJ/FzG8lFMzEmWHKOAI+Rt85ee0fajGQE0g8NMuoLUhjfSt8 -F5XnKy/tqxVPmoSUflZhpo4W96u5B1021f4oNU5pyM6w04ci5lt8IBEKEan6Bae9 -k3HyW9AVA8r2bj1zOmwoDXt1pYPPPraeZ/rWRCVy9SbihPrHst4TA9nQzLxQ0/az -Wg6rxOxa8xB7imU+AjsJ1n7zhyxSG54SBwZ3outr5D/AbEAbgvSJNslDq1iw/bU6 -tpnXHxKV2R38MyeU0jpr7zb1Tti2Li+RfsKhPhHRAoIBAHfbpXH4r6mfaeKiCokd -l2VXE6tfEMtnjTIfAuAjLb9nnk3JcTTCVj5cpDCCaEwV7+4sPz6KFB3KL3TK5Y/q -ESXHOTF12QNGyvsdQbhS+JU2DKVKRgP3oetADd2fwESTD5OaB9cKuRlNELQ1EVlk -m4RSUaYJwAC+c8gzKQtk/pp5vpSrpGBFFfjk70dxBRbjxm5r4OsBibK4IOKwF1o1 -2sluek6NqRtYbMtgRVka2SjE0VFPMKzhUNbSrJnWCy5MnGilSdz7n8/E6ZdVfXwx -a+C4AHPBqWt3GFFgad4X2p9Rl7U3OJHQwUXGiEQcBVNCZ/vHti9TGIB7xApZxn5L -YDsCggEBAJ8RhrfEzm2YkyODFKFwgOszHQ3TNSvbC4+yLOUMSdzdKIyroOq0t53A -PSs046TINd+EDs9Pi6E69C+RYLim1NYMHeHFMzmKnQPXPwJVnYYUKInbIMURcuE9 -8FNBSKg3SUGz31SwG4bRIkJluMUp5oSAEUxWaxbUzLYkZex2uxnUGSd6TjddWKk1 -+SuoiZ3+W6yPWWh7TDKAR/oukBCmLIJI7dXSwv2DhagRpppdoMfqcnsCAgs/omB8 -Ku4y/jEkGbxLgo3Qd6U1o/QZlZG+9Q0iaxQS4dIpMxA3LwrL5txy00bm3JeWMB4H -MUZqfFgfj8ESxFBEeToOwr3Jq46vOwQ= ------END PRIVATE KEY----- diff --git a/network/test_key_2.key b/network/test_key_2.key deleted file mode 100644 index bcc0a192b2b4..000000000000 --- a/network/test_key_2.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCtPtp9quaXpy8K -YDJIA7wZCRrWZxAgwALIRbLoqyqJxxf4OgaHkQLJBJpYUpYoZ1DfZ0BYZWVKSaaA -nWO1bvj2FKC7Kxq/cQ8xrF79p3kJXUzDLJxwZL1T30zPRRW4URdWM3lbpSVxQunW -gkOPTs3Nk/oHJGIO+ePOB5hveMfCOo4E3B7HWMtbq+mhYPTfNNqdVBdlpoZwQhII -jYKAM/e5d93Q1CmgZWl/2AyLRx0a7yCmpsuoGPOPOvwYMFBgZpYoWeWp7pf2cFRp -3+o0Tdl5vd6FKD4zZ5xDNNkVaKt9rpwZO2i+7ThEwwv04t+VDLetNFJPYdDrPSCl -UI/dXYyD8uYC5bKE4tVg2SETCREe913MWSxHND6CGk9wZ+8jR3DFBXQJsQxWN33N -UQDp06swsjey3b3baGoXbcCXX4fBy7Ub9PfJt9JPCN1hnbciCRAXiv5Ehd6/OA4E -Fd8TATxcRFbbM3BQblK/l3HH4WAONfQQZuvOBjo54pMPVI//tZAuPCYaDDI0liQC -SW8QrBT52UYPk2mC8n1+DxHXBSGnYeFJnWXU5RWZFjBipSbt1MsrUIlTYEfaxHJl -xSTCy2O+CLrzzN8c220ZiYCfHrAZLXaNZ/5bh1/5A0g2DoB1wnkR7TPQkBtOllb/ -NfcJNKuijDuKAed3gDC8oRVAVzI78wIDAQABAoICAQCIgPu7BMuINoyUClPT9k1h -FJF22eIVS/VlQ7XCKgvsX1j9lwrKCnI9XUkXyorR7wYD4OEMRWhX7kwpDtoffP7h -NkOm9kGvEjA8nWqDRk/SFxeCuUXSMS4URd/JeM+yWQKgQxKeKTOlWGnTQPRmmFsE -XlIlCn/Q+QiLr+RmAK601VpNbfs6azZgVsZRB4opzQVr7XQ5/cnz7bszzfxDc67/ -DflSr7jUztMfjmXj3/aI4F3DsazKGE7gTkOP85GBQ5OQ27Rf/sTxwnRgr7Nj3us6 -R2ZrWNgZvMudEKjze3OUJd6M6wiPV258j4p+O7ybPlgDOzSXo6TvlUyBtUaFz04E -5S7bgimNUxEjFzTxkn9W/FTUeauvJcgDk+JmMZ+I9dFdMIuyksndywN9KdXBVxZH -1ZtO1P6JeFpxF7zQUmkH+/6RZd9PbQGlpNI06nAj98LVwqSDCO1aejLqoXYs9zqG -DOU4JdRm3qK0eshIghkvVOWIYhqKPkskQfbTFY+hasg82cGGFyzxqOsSiuW+CVIy -3iF3WyfKgvLMABoK/38zutsMT+/mOtA7rjErh1NJuwwWkkglmuwQMDqaWdOASs+v -MK8JjSi6zDpnbp70Prw5pUlHvvsD1iYWo7SOcpFos+U5zw1jHJJvnAatzcXWixuu -Xzbn2BtCqSFigW7waMy14QKCAQEAx/Nwy2xH9lVGfz8aO2CB0FGL9Ra3Jcv4HFJT -nw6/yvVLvRAwr87+/c+qbIzwLKbQXV/4vmNsqPrIJiazY+Tk739DjcW8YaMbejfr -ASPHtYbeF0FmVbxBHNZ/JSDSYUXdFZ7JlBiDSs3zhPlFBZYG2tU3JJZCR8+9J/Ss -JEIwL9UlapMznMwljFkLbvZ2oFstKkfdY61WxROOIwuGaKr0yRnNvMMp135JiB/O -dwh/NfROt4JzQ5O4ipMg6Wc73+OvBsOSQHYZQHl9NOaK1uomu5bUY7H8pLwGU7sw -LmPRzrGiu8dB+UUEyFkNI2xzwkjet+0UGupDyOfsCMf9hlzWmwKCAQEA3c8FeHkl -Il4GEB0VEw1NtC5x6i+s3NiPOlUmH+nOHgdaI7/BfljfTokQBGo+GkXkJ36yTEMh -L9Vtya3HtW4VEHNfPMjntPztn4XQvMZdSpu/k8rM44m+CB0DDLhFfwRr2cyUAwHz -xebXw8KhceqaWRp6ygJGx5Sk0gr7s7nhmIByjdx4tddEH/MahLklGdV7Vnp+yb3o -zNLVx/aDueknArgUb/zvZRcYWuNoGs9ac4pl0m6jan/x0ZcdBF0SU2bI6ltvF3WT -qwcvVnbJbBwq5PRuL4ZUqrqmXBbBAkpLJTx+kfPKD4bgcZTBnV2TxDbzze9CeieT -YCtg4u+khW7ZiQKCAQBrMIEuPD0TvEFPo8dvP1w4Dg9Gc0f5li/LFwNHCIQezIMu -togzJ3ehHvuQt7llZoPbGsDhZ7FvoQk9EpAmpCVqksHnNbK4cNUhHur3sHO2R7e1 -pdSzb3lEeWStxbuic+6CUZ5kqwNvTZsXlP3Acd344EZwcbDUiHQyAENsKKNmcRBe -4szPaM1UQMQVV0De1CIRQXdYoSsb+VDATsReRg9140Rcxg8fO881jz+CpmZzySWN -0PvzpTRP7XG+Th5V9tv0d1FnByigXMCXZGPXtKzQ8ZmoXFlBAp8tsfKxW8e005uW -qMogVDStJrgZXmFsLN5goVKe3yk5gcMSLgwmRIyzAoIBAQCoE6CkmsAd27uiaDc4 -+aLA/1TIzZmiu+NEo5NBKY1LyexvHHZGBJgqTcg6YDtw8zchCmuXSGMUeRk5cxrb -C3Cgx5wKVn7l8acqc18qPPIigATavBkn7o92XG2cLOJUjogfQVuDL+6GLxeeupRV -2x1cmakj/DegMq32j+YNWbRuOB8WClPaDyYLQ877dcR8X/2XGTmMLAEFfFoMrWtB -7D/oWo76EWNiae7FqH6RmkCDPwNLQxVHtW4LkQOm89PYKRHkLKbw0uKz/bzMOzUE -XA/Q8Lux/YuY19kJ/SACWUO6Eq4icObTfzQCPWO9mFRJog57JWttXyHZBOXk8Qzt -I4NpAoIBACurK0zJxaGUdTjmzaVipauyOZYFBsbzvCWsdSNodtZ/mw6n/qkj2N33 -vNCRLrsQAkDKATzWrscRg+xvl5/wIa4B3s8TZNIp3hL7bvI/NoR5bi5M0vcjdXEd -DeKeZsSBzEs5zivM3aWEF5MSR2zpJPNYyD0PnT6EvZOkMoq6LM3FJcouS1ChePLQ -wHEY5ZMqPODOcQ+EixNXl6FGdywaJYxKnG4liG9zdJ0lGNIivTA7gyM+JCbG4fs8 -73uGsbCpts5Y2xKFp3uK8HjWKbOCR3dE4mOZM8M/NlsUGNjSydXZMIJYWR8nvVmo -i3mHicYaTQxj0ruIz7JHOtFNVGi1sME= ------END PRIVATE KEY----- diff --git a/network/test_key_3.key b/network/test_key_3.key deleted file mode 100644 index 2cef238b67a9..000000000000 --- a/network/test_key_3.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDlpXvqK8hnWJaZ -bQ7MYwKuYVAGd6P0me1DWM/3KJpwq7uPln9sP96qjJyNHChKJ6wSt+dOEAbLsfzS -CVXD9eYLR9g09UM/LMAR02TjozfIQjyEAojppKSXWzUpUAelC41wb7fEgHf044zH -Re7JxnqCrPIyaOp5PRxOE0NdPAZuh0xUMB/SrbbRACrMH+XPSyMc9eViHbksnXI+ -2X6+Vx8as/6r7Y4NH2ucjv08TsBkHhdzrBQhxrldeQc9LvNDUh1rXGt/jjoxgJW5 -rMsWGnOrz+eGTE0zPF1mwlpZq8v7GSmMbEDJK+PSOW5AUayazE4hT9eAkP9VGqrI -hJemXbCDSXyamxT19XtsRbXLowhzsABFGBtsQ+IPPyb87yaMRbcLXq6e0scHsvM8 -IKK1OywZHrlKGSP4vD5ozhflkj5ebx4dNEJ6TxPkKXdKbZQWQuTaa6fpzCjJZaHC -Lr4R9bqSzRzupIe0lM/wOAutbFiYG4P08pIdyWlrj/m2iPnZidPp8gElbPmCAh2e -1EJofz7R5a/C68Ox09QvslAWMmZ9A50x6eU2pbZoS/H4lmJgskFaP1yOkjfc/kIB -8URUzcgWUMeYwdFND+Qotx+9IN+6h8QGgy3i2N/TVSZ1+LyQUpaX1tr0jsKhDXdT -gnXCSTorFq1k/t4YKYpGirzD5jfw2wIDAQABAoICAQC/Rt32h29NvTj7JB5OWS2z -h3R7Xo2ev9Mi5EecSyKQNEpuZ+FMjcpubd47nrdkRLULhkhP+gNfCKpXW9Um+psY -zEemnJ7dcO2uK1B+VsWwtJLpNZ9KVIuPUjXuai1j6EJv423Ca2r++8WXeYVSZVJH -o7u8By09vIvl8B+M+eE1kNYfzVHETlLWtHfxO6RTy/a8OYhM+ArzwVSWStxJuBE9 -Ua0PETffcEtWxLbi04lmGrZX7315QKfG1ncUHBYc/blpYjpbrWCFON/9HpKtn2y3 -L91dPBKVWXNGkx1kUTb+t8+mmchAh6Ejyhgt1Jma+g8dqf4KpTs3bJXRnLcfqCvL -Kq+wCUGv7iVWlTmhlzLpneajLDdBxGfbkAgwPFOyZoJNrnh6hU60TPc1IV6YSLlB -GsxesK9QWUrg3BAN4iKD3FvDt0qeUPbPztxEZi1OzSYQDZUQBrBL+WHuD9NxeAYe -2yx1OlPMo73gK5GW/MHBCz77+NX2kVURlTvYW4TsmInCRvOTsVNkRPUJtiHYT7Ss -Y8SzS5F/u9sfjFAVowGgwtNfq8Rm6Q1QdPZltiUNBgiTekFNQEy7WhzVg6MlT5Ca -BRqUhN3+CFwxLZ9rSQL6gxfAHk9umb0ee4JU9JgcYjtb5AtyE6DmmcSZPSejjxit -HwZ/g5MDK7kk5fKMcnL7kQKCAQEA895z7T0c6y3rhWfEUMDdTlsPgAoxYNf+jXyJ -aQmtfnDP9tf8BdPpobfHp29e7JRaGGa9QWPaaemBPHXMmD+IegG9/E+PQdHQwFSG -OpI13uCBULt8a+MMUbTCg1V4uXqf2j1BUo9SFQ6aXh/Rg1gVBgsq1M6eyvel93io -0X+/cinsDEpB5HENZwBuRb0SP0RfCgQR9Yh+jIy2TwJDDNw3sG1TvIo9aK7blSwB -z/gwSDx1UUa2KReD4ChYcqgLFUj3F/uF2f20P/JuaUn7tU3HoCsbG0C+Cci/XSJ9 -gu8xYl64Vg16bO3CflqjucPTFXgyBOt0lIug77YYa9CgCUJvEwKCAQEA8RHqGghV -meDnRXvPmAEwtoT7IKBe+eYjGN6wc2o+QZzjeUFkyfOtaB8rqriUXqvihD2GD6XQ -O/cSNCqp5g6yUhBLo3b9BmCsQsvxkhMpwB/hdi5aYjn+CFQVD4rAso9yGwRBWoA0 -gQdGMKenOUhU/PtVKyTTUuY7rFD8RhYq0ZLqEgO7chn8QXCNPo7MfE/qF9vQBosP -ktiS0FG442PJp2B/lYKK6N2w77ZeCoLhQowaNN0/N36kX/n4bjBE2XFLNpSuHtlg -C7bV/RMR5i/3yB0eRVUDVlqC077qlC1w0tCNZvvi6kbWwIu/4pQTdcA8mAz5B7Lc -OwOMbA2GT4OIGQKCAQABoyS0Gwzup0hFhQTUZfcWZ5YbDfZ25/xVhtiFVANOLgO3 -bIvMnjebVliIzz6b6AMS1t2+aqU0wNSVS1UsUIDiENDtuLsFfhsgr3CXRBQIgwlb -OWcEcmnKwqPrrc85r5ETLgYaP8wVSBvRNfV6JEU/3SNUem6mfjMnDjBT97+ZTJ7B -Fl6K4hds8ZvL7BELS7I3pv9X3qq61tcCgMlidLgK/zDouyTeZw4iWkFI3Cm20nEX -MppWfEnuX1b4rhgk9HB0QMQNSp7DLyV+n3iJJxSIBsIP1Mdx2V8viOO+1UxHlMs4 -CK8hvBbqMkGXJbFtG3l6fvoxZR6XfWl8j9IDPebxAoIBAF07cnBy/LgwdQE4awb8 -ntxX/c+WdmTrjnNV3KQmWMGDba49jj9UkKIOPBMgo7EhhM9kA+8VT72BRncKcP7a -fDikuLwVjrHivXxv55N4+dKmAcp1DtuiVg7ehe6m2PO16olsUeIwZx3ntEuo61GK -GeRlR4ESEvCivj1cbNSmShUXXpNtAheU2Sxt3RJuo8MIHR7xEjkVmwZN4CnVEU5Q -D3M+LNmjzRlWc9GhlCk4iOn1yUTctFBAGE5OHLhwzo/R8ya+xcCEjVK6eXQQ5gFC -V+/64vQpdsr04lgGJC7+i/3cTnOfwxicIP4CjkmQvx3xJP4hNka189qW+r3nVSR3 -WDECggEAAQCCqF4J8C2keY+o/kYQBq0tHhrC28HgiVQuCGc4XruYQtDh4di/I72F -RsvgVHS29ApAlh29i29ws7K2bU6WIc+JR3nmwAHUtiJmxRZhn/c722AvRXF5YMH/ -u46bEURHF5sGz8vr5chX/R4LiF579xyNsB9KC3mPqdjW/L6ACQdrBJVAS9cwplO0 -D+YWxmCE1Ps2tQtz6ZN+LUC7WO6M24k8KW2y4Scue0/23uCllWFgS3/vxDdQDZWn -+7AvMYPh4Wrfdd0t0cU+c9rirFYVz+uo/QBUIZOIw64AvIUjZpHTbhcjz1mAqcgJ -eAOQk+OFUTNKeI9uJwoNYOguHsxt2w== ------END PRIVATE KEY----- diff --git a/network/tracked_ip_test.go b/network/tracked_ip_test.go index 956f02cc19b4..90207e48a6e9 100644 --- a/network/tracked_ip_test.go +++ b/network/tracked_ip_test.go @@ -4,12 +4,63 @@ package network import ( + "net" "testing" "time" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/ips" +) + +var ( + ip *ips.ClaimedIPPort + otherIP *ips.ClaimedIPPort ) +func init() { + { + cert, err := staking.NewTLSCert() + if err != nil { + panic(err) + } + stakingCert, err := staking.ParseCertificate(cert.Leaf.Raw) + if err != nil { + panic(err) + } + ip = ips.NewClaimedIPPort( + stakingCert, + ips.IPPort{ + IP: net.IPv4(127, 0, 0, 1), + Port: 9651, + }, + 1, // timestamp + nil, // signature + ) + } + + { + cert, err := staking.NewTLSCert() + if err != nil { + panic(err) + } + stakingCert, err := staking.ParseCertificate(cert.Leaf.Raw) + if err != nil { + panic(err) + } + otherIP = ips.NewClaimedIPPort( + stakingCert, + ips.IPPort{ + IP: net.IPv4(127, 0, 0, 1), + Port: 9651, + }, + 1, // timestamp + nil, // signature + ) + } +} + func TestTrackedIP(t *testing.T) { require := require.New(t) diff --git a/staking/tls.go b/staking/tls.go index fbb5d9e488ae..c63dd84c3dfe 100644 --- a/staking/tls.go +++ b/staking/tls.go @@ -5,8 +5,9 @@ package staking import ( "bytes" + "crypto/ecdsa" + "crypto/elliptic" "crypto/rand" - "crypto/rsa" "crypto/tls" "crypto/x509" "encoding/pem" @@ -115,9 +116,9 @@ func NewTLSCert() (*tls.Certificate, error) { // Returns the PEM byte representations of both. func NewCertAndKeyBytes() ([]byte, []byte, error) { // Create key to sign cert with - key, err := rsa.GenerateKey(rand.Reader, 4096) + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { - return nil, nil, fmt.Errorf("couldn't generate rsa key: %w", err) + return nil, nil, fmt.Errorf("couldn't generate ecdsa key: %w", err) } // Create self-signed staking cert @@ -125,10 +126,10 @@ func NewCertAndKeyBytes() ([]byte, []byte, error) { SerialNumber: big.NewInt(0), NotBefore: time.Date(2000, time.January, 0, 0, 0, 0, 0, time.UTC), NotAfter: time.Now().AddDate(100, 0, 0), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageDataEncipherment, + KeyUsage: x509.KeyUsageDigitalSignature, BasicConstraintsValid: true, } - certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, &key.PublicKey, key) + certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, key.Public(), key) if err != nil { return nil, nil, fmt.Errorf("couldn't create certificate: %w", err) } diff --git a/staking/tls_test.go b/staking/tls_test.go index 6de376c2a538..31762542802d 100644 --- a/staking/tls_test.go +++ b/staking/tls_test.go @@ -29,3 +29,10 @@ func TestMakeKeys(t *testing.T) { require.NoError(cert.Leaf.CheckSignature(cert.Leaf.SignatureAlgorithm, msg, sig)) } + +func BenchmarkNewCertAndKeyBytes(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _, err := NewCertAndKeyBytes() + require.NoError(b, err) + } +} diff --git a/staking/verify_test.go b/staking/verify_test.go new file mode 100644 index 000000000000..9c8ca5ad98cb --- /dev/null +++ b/staking/verify_test.go @@ -0,0 +1,53 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package staking + +import ( + "crypto" + "crypto/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/hashing" +) + +func BenchmarkSign(b *testing.B) { + tlsCert, err := NewTLSCert() + require.NoError(b, err) + + signer := tlsCert.PrivateKey.(crypto.Signer) + msg := []byte("msg") + hash := hashing.ComputeHash256(msg) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := signer.Sign(rand.Reader, hash, crypto.SHA256) + require.NoError(b, err) + } +} + +func BenchmarkVerify(b *testing.B) { + tlsCert, err := NewTLSCert() + require.NoError(b, err) + + signer := tlsCert.PrivateKey.(crypto.Signer) + msg := []byte("msg") + signature, err := signer.Sign( + rand.Reader, + hashing.ComputeHash256(msg), + crypto.SHA256, + ) + require.NoError(b, err) + + certBytes := tlsCert.Leaf.Raw + cert, err := ParseCertificate(certBytes) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := CheckSignature(cert, msg, signature) + require.NoError(b, err) + } +} From 301f14d2057b0efe2a59fc17d650b4e14310a6ca Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 21 May 2024 14:13:36 -0400 Subject: [PATCH 012/102] Fix ACP links (#3037) --- RELEASES.md | 14 +++++++------- utils/constants/acps.go | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 051b1801aac5..e2bcdb7686a5 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -342,13 +342,13 @@ The plugin version is updated to `34` all plugins must update to be compatible. This upgrade consists of the following Avalanche Community Proposals (ACPs): -- [ACP-23](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/23-p-chain-native-transfers.md) P-Chain Native Transfers -- [ACP-24](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/24-shanghai-eips.md) Activate Shanghai EIPs on C-Chain -- [ACP-25](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/25-vm-application-errors.md) Virtual Machine Application Errors -- [ACP-30](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/30-avalanche-warp-x-evm.md) Integrate Avalanche Warp Messaging into the EVM -- [ACP-31](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/31-enable-subnet-ownership-transfer.md) Enable Subnet Ownership Transfer -- [ACP-41](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/41-remove-pending-stakers.md) Remove Pending Stakers -- [ACP-62](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/62-disable-addvalidatortx-and-adddelegatortx.md) Disable AddValidatorTx and AddDelegatorTx +- [ACP-23](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/23-p-chain-native-transfers/README.md) P-Chain Native Transfers +- [ACP-24](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/24-shanghai-eips/README.md) Activate Shanghai EIPs on C-Chain +- [ACP-25](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/25-vm-application-errors/README.md) Virtual Machine Application Errors +- [ACP-30](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/30-avalanche-warp-x-evm/README.md) Integrate Avalanche Warp Messaging into the EVM +- [ACP-31](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/31-enable-subnet-ownership-transfer/README.md) Enable Subnet Ownership Transfer +- [ACP-41](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/41-remove-pending-stakers/README.md) Remove Pending Stakers +- [ACP-62](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/62-disable-addvalidatortx-and-adddelegatortx/README.md) Disable AddValidatorTx and AddDelegatorTx The changes in the upgrade go into effect at 11 AM ET (4 PM UTC) on Wednesday, March 6th, 2024 on Mainnet. diff --git a/utils/constants/acps.go b/utils/constants/acps.go index 5392b21865ad..3b54f6073099 100644 --- a/utils/constants/acps.go +++ b/utils/constants/acps.go @@ -11,23 +11,23 @@ var ( // // See: https://github.com/orgs/avalanche-foundation/projects/1 CurrentACPs = set.Of[uint32]( - 23, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/23-p-chain-native-transfers.md - 24, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/24-shanghai-eips.md - 25, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/25-vm-application-errors.md - 30, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/30-avalanche-warp-x-evm.md - 31, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/31-enable-subnet-ownership-transfer.md - 41, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/41-remove-pending-stakers.md - 62, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/62-disable-addvalidatortx-and-adddelegatortx.md + 23, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/23-p-chain-native-transfers/README.md + 24, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/24-shanghai-eips/README.md + 25, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/25-vm-application-errors/README.md + 30, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/30-avalanche-warp-x-evm/README.md + 31, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/31-enable-subnet-ownership-transfer/README.md + 41, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/41-remove-pending-stakers/README.md + 62, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/62-disable-addvalidatortx-and-adddelegatortx/README.md ) // ScheduledACPs are the ACPs incuded into the next upgrade. ScheduledACPs = set.Of[uint32]( - 23, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/23-p-chain-native-transfers.md - 24, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/24-shanghai-eips.md - 25, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/25-vm-application-errors.md - 30, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/30-avalanche-warp-x-evm.md - 31, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/31-enable-subnet-ownership-transfer.md - 41, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/41-remove-pending-stakers.md - 62, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/62-disable-addvalidatortx-and-adddelegatortx.md + 23, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/23-p-chain-native-transfers/README.md + 24, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/24-shanghai-eips/README.md + 25, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/25-vm-application-errors/README.md + 30, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/30-avalanche-warp-x-evm/README.md + 31, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/31-enable-subnet-ownership-transfer/README.md + 41, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/41-remove-pending-stakers/README.md + 62, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/62-disable-addvalidatortx-and-adddelegatortx/README.md ) ) From 6a894d0e2e762e39970d6094f8139bb25449ce93 Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 21 May 2024 14:43:41 -0400 Subject: [PATCH 013/102] Prevent unnecessary bandwidth from activated ACPs (#3031) --- config/config.go | 5 +++++ utils/constants/acps.go | 23 ++++++++++------------- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/config/config.go b/config/config.go index 4cc327240637..44147b19dbd1 100644 --- a/config/config.go +++ b/config/config.go @@ -289,6 +289,11 @@ func getNetworkConfig( // peers that we support these upgrades. supportedACPs.Union(constants.ScheduledACPs) + // To decrease unnecessary network traffic, peers will not be notified of + // objection or support of activated ACPs. + supportedACPs.Difference(constants.ActivatedACPs) + objectedACPs.Difference(constants.ActivatedACPs) + config := network.Config{ ThrottlerConfig: network.ThrottlerConfig{ MaxInboundConnsPerSec: maxInboundConnsPerSec, diff --git a/utils/constants/acps.go b/utils/constants/acps.go index 3b54f6073099..efa315fb6ff3 100644 --- a/utils/constants/acps.go +++ b/utils/constants/acps.go @@ -6,11 +6,10 @@ package constants import "github.com/ava-labs/avalanchego/utils/set" var ( - // CurrentACPs is the set of ACPs that are currently, at the time of - // release, marked as implementable and not activated. + // ActivatedACPs is the set of ACPs that are activated. // // See: https://github.com/orgs/avalanche-foundation/projects/1 - CurrentACPs = set.Of[uint32]( + ActivatedACPs = set.Of[uint32]( 23, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/23-p-chain-native-transfers/README.md 24, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/24-shanghai-eips/README.md 25, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/25-vm-application-errors/README.md @@ -20,14 +19,12 @@ var ( 62, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/62-disable-addvalidatortx-and-adddelegatortx/README.md ) - // ScheduledACPs are the ACPs incuded into the next upgrade. - ScheduledACPs = set.Of[uint32]( - 23, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/23-p-chain-native-transfers/README.md - 24, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/24-shanghai-eips/README.md - 25, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/25-vm-application-errors/README.md - 30, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/30-avalanche-warp-x-evm/README.md - 31, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/31-enable-subnet-ownership-transfer/README.md - 41, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/41-remove-pending-stakers/README.md - 62, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/62-disable-addvalidatortx-and-adddelegatortx/README.md - ) + // CurrentACPs is the set of ACPs that are currently, at the time of + // release, marked as implementable and not activated. + // + // See: https://github.com/orgs/avalanche-foundation/projects/1 + CurrentACPs = set.Of[uint32]() + + // ScheduledACPs are the ACPs included into the next upgrade. + ScheduledACPs = set.Of[uint32]() ) From 4159a59281c82c28b81cede5ceccc76f78ea1f86 Mon Sep 17 00:00:00 2001 From: marun Date: Tue, 21 May 2024 17:12:23 -0700 Subject: [PATCH 014/102] [antithesis] Add test setup for xsvm (#2982) Signed-off-by: marun Co-authored-by: Stephen Buttolph --- .github/workflows/ci.yml | 14 +- .../workflows/publish_antithesis_images.yml | 7 + scripts/build_antithesis_images.sh | 71 ++++++-- scripts/build_antithesis_xsvm_workload.sh | 11 ++ scripts/tests.build_antithesis_images.sh | 17 +- tests/antithesis/README.md | 65 ++++++- .../antithesis/avalanchego/Dockerfile.config | 30 +--- tests/antithesis/avalanchego/Dockerfile.node | 3 + tests/antithesis/avalanchego/main.go | 44 +---- tests/antithesis/compose.go | 81 +++++++-- tests/antithesis/{avalanchego => }/config.go | 12 +- tests/antithesis/init_db.go | 60 +++++++ tests/antithesis/node_health.go | 50 ++++++ tests/antithesis/xsvm/Dockerfile.config | 5 + tests/antithesis/xsvm/Dockerfile.node | 61 +++++++ tests/antithesis/xsvm/Dockerfile.workload | 30 ++++ .../antithesis/xsvm/gencomposeconfig/main.go | 62 +++++++ tests/antithesis/xsvm/main.go | 168 ++++++++++++++++++ tests/e2e/e2e_test.go | 2 +- tests/e2e/vms/xsvm.go | 51 +----- tests/fixture/subnet/xsvm.go | 45 +++++ tests/fixture/tmpnet/defaults.go | 30 ++-- tests/fixture/tmpnet/network.go | 18 +- tests/fixture/tmpnet/node.go | 2 +- tests/fixture/tmpnet/node_config.go | 8 +- tests/fixture/tmpnet/node_process.go | 6 +- vms/example/xsvm/Dockerfile | 31 ++++ 27 files changed, 806 insertions(+), 178 deletions(-) create mode 100755 scripts/build_antithesis_xsvm_workload.sh rename tests/antithesis/{avalanchego => }/config.go (83%) create mode 100644 tests/antithesis/init_db.go create mode 100644 tests/antithesis/node_health.go create mode 100644 tests/antithesis/xsvm/Dockerfile.config create mode 100644 tests/antithesis/xsvm/Dockerfile.node create mode 100644 tests/antithesis/xsvm/Dockerfile.workload create mode 100644 tests/antithesis/xsvm/gencomposeconfig/main.go create mode 100644 tests/antithesis/xsvm/main.go create mode 100644 tests/fixture/subnet/xsvm.go create mode 100644 vms/example/xsvm/Dockerfile diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 68ebfadf9743..9c6ed3123212 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -265,8 +265,8 @@ jobs: - name: Check image build shell: bash run: bash -x scripts/tests.build_image.sh - test_build_antithesis_avalanchego_image: - name: Antithesis avalanchego build + test_build_antithesis_avalanchego_images: + name: Build Antithesis avalanchego images runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -275,6 +275,16 @@ jobs: run: bash -x scripts/tests.build_antithesis_images.sh env: TEST_SETUP: avalanchego + test_build_antithesis_xsvm_images: + name: Build Antithesis xsvm images + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Check image build for xsvm test setup + shell: bash + run: bash -x scripts/tests.build_antithesis_images.sh + env: + TEST_SETUP: xsvm govulncheck: runs-on: ubuntu-latest name: govulncheck diff --git a/.github/workflows/publish_antithesis_images.yml b/.github/workflows/publish_antithesis_images.yml index f3b121dd0642..35e77218fdb6 100644 --- a/.github/workflows/publish_antithesis_images.yml +++ b/.github/workflows/publish_antithesis_images.yml @@ -31,3 +31,10 @@ jobs: IMAGE_PREFIX: ${{ env.REGISTRY }}/${{ env.REPOSITORY }} TAG: latest TEST_SETUP: avalanchego + + - name: Build and push images for xsvm test setup + run: bash -x ./scripts/build_antithesis_images.sh + env: + IMAGE_PREFIX: ${{ env.REGISTRY }}/${{ env.REPOSITORY }} + TAG: latest + TEST_SETUP: xsvm diff --git a/scripts/build_antithesis_images.sh b/scripts/build_antithesis_images.sh index 3ccccd1d3523..8e3c534bbcbe 100755 --- a/scripts/build_antithesis_images.sh +++ b/scripts/build_antithesis_images.sh @@ -5,8 +5,10 @@ set -euo pipefail # Builds docker images for antithesis testing. # e.g., -# ./scripts/build_antithesis_images.sh # Build local images -# IMAGE_PREFIX=/ TAG=latest ./scripts/build_antithesis_images.sh # Specify a prefix to enable image push and use a specific tag +# TEST_SETUP=avalanchego ./scripts/build_antithesis_images.sh # Build local images for avalanchego +# TEST_SETUP=avalanchego NODE_ONLY=1 ./scripts/build_antithesis_images.sh # Build only a local node image for avalanchego +# TEST_SETUP=xsvm ./scripts/build_antithesis_images.sh # Build local images for xsvm +# TEST_SETUP=xsvm IMAGE_PREFIX=/ TAG=latest ./scripts/build_antithesis_images.sh # Specify a prefix to enable image push and use a specific tag # Directory above this script AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) @@ -28,11 +30,13 @@ GO_VERSION="$(go list -m -f '{{.GoVersion}}')" function build_images { local test_setup=$1 local uninstrumented_node_dockerfile=$2 + local image_prefix=$3 + local node_only=${4:-} # Define image names local base_image_name="antithesis-${test_setup}" - if [[ -n "${IMAGE_PREFIX}" ]]; then - base_image_name="${IMAGE_PREFIX}/${base_image_name}" + if [[ -n "${image_prefix}" ]]; then + base_image_name="${image_prefix}/${base_image_name}" fi local node_image_name="${base_image_name}-node:${TAG}" local workload_image_name="${base_image_name}-workload:${TAG}" @@ -49,22 +53,65 @@ function build_images { fi # Define default build command - local docker_cmd="docker buildx build --build-arg GO_VERSION=${GO_VERSION}" - if [[ -n "${IMAGE_PREFIX}" ]]; then + local docker_cmd="docker buildx build --build-arg GO_VERSION=${GO_VERSION} --build-arg NODE_IMAGE=${node_image_name}" + + if [[ "${test_setup}" == "xsvm" ]]; then + # The xsvm node image is built on the avalanchego node image, which is assumed to have already been + # built. The image name doesn't include the image prefix because it is not intended to be pushed. + docker_cmd="${docker_cmd} --build-arg AVALANCHEGO_NODE_IMAGE=antithesis-avalanchego-node:${TAG}" + fi + + # Build node image first to allow the workload image to use it. + ${docker_cmd} -t "${node_image_name}" -f "${node_dockerfile}" "${AVALANCHE_PATH}" + if [[ -n "${image_prefix}" ]]; then # Push images with an image prefix since the prefix defines a registry location docker_cmd="${docker_cmd} --push" fi - # Build node image first to allow the config and workload image builds to use it. - ${docker_cmd} -t "${node_image_name}" -f "${node_dockerfile}" "${AVALANCHE_PATH}" - ${docker_cmd} --build-arg NODE_IMAGE="${node_image_name}" -t "${workload_image_name}" -f "${base_dockerfile}.workload" "${AVALANCHE_PATH}" - ${docker_cmd} --build-arg IMAGE_TAG="${TAG}" -t "${config_image_name}" -f "${base_dockerfile}.config" "${AVALANCHE_PATH}" + if [[ -n "${node_only}" ]]; then + # Skip building the config and workload images. Supports building the avalanchego + # node image as the base image for the xsvm node image. + return + fi + + TARGET_PATH="${AVALANCHE_PATH}/build/antithesis/${test_setup}" + if [[ -d "${TARGET_PATH}" ]]; then + # Ensure the target path is empty before generating the compose config + rm -r "${TARGET_PATH:?}" + fi + + # Define the env vars for the compose config generation + COMPOSE_ENV="TARGET_PATH=${TARGET_PATH} IMAGE_TAG=${TAG}" + + if [[ "${test_setup}" == "xsvm" ]]; then + # Ensure avalanchego and xsvm binaries are available to create an initial db state that includes subnets. + "${AVALANCHE_PATH}"/scripts/build.sh + "${AVALANCHE_PATH}"/scripts/build_xsvm.sh + COMPOSE_ENV="${COMPOSE_ENV} AVALANCHEGO_PATH=${AVALANCHE_PATH}/build/avalanchego AVALANCHEGO_PLUGIN_DIR=${HOME}/.avalanchego/plugins" + fi + + # Generate compose config for copying into the config image + # shellcheck disable=SC2086 + env ${COMPOSE_ENV} go run "${AVALANCHE_PATH}/tests/antithesis/${test_setup}/gencomposeconfig" + + # Build the config image + ${docker_cmd} -t "${config_image_name}" -f "${base_dockerfile}.config" "${AVALANCHE_PATH}" + + # Build the workload image + ${docker_cmd} -t "${workload_image_name}" -f "${base_dockerfile}.workload" "${AVALANCHE_PATH}" } TEST_SETUP="${TEST_SETUP:-}" if [[ "${TEST_SETUP}" == "avalanchego" ]]; then - build_images avalanchego "${AVALANCHE_PATH}/Dockerfile" + build_images avalanchego "${AVALANCHE_PATH}/Dockerfile" "${IMAGE_PREFIX}" "${NODE_ONLY:-}" +elif [[ "${TEST_SETUP}" == "xsvm" ]]; then + # Only build the node image to use as the base for the xsvm image. Provide an empty + # image prefix (the 3rd argument) to prevent the image from being pushed + NODE_ONLY=1 + build_images avalanchego "${AVALANCHE_PATH}/Dockerfile" "" "${NODE_ONLY}" + + build_images xsvm "${AVALANCHE_PATH}/vms/example/xsvm/Dockerfile" "${IMAGE_PREFIX}" else - echo "TEST_SETUP must be set. Valid values are 'avalanchego'" + echo "TEST_SETUP must be set. Valid values are 'avalanchego' or 'xsvm'" exit 255 fi diff --git a/scripts/build_antithesis_xsvm_workload.sh b/scripts/build_antithesis_xsvm_workload.sh new file mode 100755 index 000000000000..153965eeb63b --- /dev/null +++ b/scripts/build_antithesis_xsvm_workload.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Directory above this script +AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) +# Load the constants +source "$AVALANCHE_PATH"/scripts/constants.sh + +echo "Building Workload..." +go build -o "$AVALANCHE_PATH/build/antithesis-xsvm-workload" "$AVALANCHE_PATH/tests/antithesis/xsvm/"*.go diff --git a/scripts/tests.build_antithesis_images.sh b/scripts/tests.build_antithesis_images.sh index a10e5c4ccda9..8fdce84ab641 100755 --- a/scripts/tests.build_antithesis_images.sh +++ b/scripts/tests.build_antithesis_images.sh @@ -10,6 +10,10 @@ set -euo pipefail # 4. Stopping the workload and its target network # +# e.g., +# TEST_SETUP=avalanchego ./scripts/tests.build_antithesis_images.sh # Test build of images for avalanchego test setup +# DEBUG=1 TEST_SETUP=avalanchego ./scripts/tests.build_antithesis_images.sh # Retain the temporary compose path for troubleshooting + AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Discover the default tag that will be used for the image @@ -27,6 +31,8 @@ docker create --name "${CONTAINER_NAME}" "${IMAGE_NAME}:${TAG}" /bin/true # Create a temporary directory to write the compose configuration to TMPDIR="$(mktemp -d)" +echo "using temporary directory ${TMPDIR} as the docker-compose path" + COMPOSE_FILE="${TMPDIR}/docker-compose.yml" COMPOSE_CMD="docker-compose -f ${COMPOSE_FILE}" @@ -36,8 +42,10 @@ function cleanup { docker rm "${CONTAINER_NAME}" echo "stopping and removing the docker compose project" ${COMPOSE_CMD} down --volumes - echo "removing temporary dir" - rm -rf "${TMPDIR}" + if [[ -z "${DEBUG:-}" ]]; then + echo "removing temporary dir" + rm -rf "${TMPDIR}" + fi } trap cleanup EXIT @@ -47,9 +55,10 @@ docker cp "${CONTAINER_NAME}":/docker-compose.yml "${COMPOSE_FILE}" # Copy the volume paths out of the container docker cp "${CONTAINER_NAME}":/volumes "${TMPDIR}/" -# Run the docker compose project for one minute without error +# Run the docker compose project for 30 seconds without error. Local +# network bootstrap is ~6s, but github workers can be much slower. ${COMPOSE_CMD} up -d -sleep 60 +sleep 30 if ${COMPOSE_CMD} ps -q | xargs docker inspect -f '{{ .State.Status }}' | grep -v 'running'; then echo "An error occurred." exit 255 diff --git a/tests/antithesis/README.md b/tests/antithesis/README.md index 96328d211d34..c838b64780a4 100644 --- a/tests/antithesis/README.md +++ b/tests/antithesis/README.md @@ -8,11 +8,14 @@ enables discovery and reproduction of anomalous behavior. ## Package details -| Filename | Purpose | -|:-------------|:----------------------------------------------------------------------------------| -| compose.go | Enables generation of Docker Compose project files for antithesis testing. | -| avalanchego/ | Contains resources supporting antithesis testing of avalanchego's primary chains. | - +| Filename | Purpose | +|:---------------|:-----------------------------------------------------------------------------------| +| compose.go | Generates Docker Compose project file and initial database for antithesis testing. | +| config.go | Defines common flags for the workload binary. | +| init_db.go | Initializes initial db state for subnet testing. | +| node_health.go | Helper to check node health. | +| avalanchego/ | Defines an antithesis test setup for avalanchego's primary chains. | +| xsvm/ | Defines an antithesis test setup for the xsvm VM. | ## Instrumentation @@ -45,3 +48,55 @@ a test setup: In addition, github workflows are suggested to ensure `scripts/tests.build_antithesis_images.sh` runs against PRs and `scripts/build_antithesis_images.sh` runs against pushes. + +## Troubleshooting a test setup + +### Running a workload directly + +The workload of the 'avalanchego' test setup can be invoked against an +arbitrary network: + +```bash +$ AVAWL_URIS="http://10.0.20.3:9650 http://10.0.20.4:9650" go run ./tests/antithesis/avalanchego +``` + +The workload of a subnet test setup like 'xsvm' additionally requires +a network with a configured chain for the xsvm VM and the ID for that +chain needs to be provided to the workload: + +```bash +$ AVAWL_URIS=... CHAIN_IDS="2S9ypz...AzMj9" go run ./tests/antithesis/xsvm +``` + +### Running a workload with docker-compose + +Running the test script for a given test setup with the `DEBUG` flag +set will avoid cleaning up the the temporary directory where the +docker-compose setup is written to. This will allow manual invocation of +docker-compose to see the log output of the workload. + +```bash +$ DEBUG=1 ./scripts/tests.build_antithesis_images.sh +``` + +After the test script has terminated, the name of the temporary +directory will appear in the output of the script: + +``` +... +using temporary directory /tmp/tmp.E6eHdDr4ln as the docker-compose path" +... +``` + +Running compose from the temporary directory will ensure the workload +output appears on stdout for inspection: + +```bash +$ cd [temporary directory] + +# Start the compose project +$ docker-compose up + +# Cleanup the compose project +$ docker-compose down --volumes +``` diff --git a/tests/antithesis/avalanchego/Dockerfile.config b/tests/antithesis/avalanchego/Dockerfile.config index 5c8236c4bdc8..36e0214bb80f 100644 --- a/tests/antithesis/avalanchego/Dockerfile.config +++ b/tests/antithesis/avalanchego/Dockerfile.config @@ -1,29 +1,5 @@ -# The version is supplied as a build argument rather than hard-coded -# to minimize the cost of version changes. -ARG GO_VERSION - -# ============= Compilation Stage ================ -FROM golang:$GO_VERSION-bullseye AS builder - -WORKDIR /build -# Copy and download avalanche dependencies using go mod -COPY go.mod . -COPY go.sum . -RUN go mod download - -# Copy the code into the container -COPY . . - -# IMAGE_TAG should be set to the tag for the images in the generated -# docker compose file. -ARG IMAGE_TAG=latest - -# Generate docker compose configuration -RUN TARGET_PATH=./build IMAGE_TAG="$IMAGE_TAG" go run ./tests/antithesis/avalanchego/gencomposeconfig - -# ============= Cleanup Stage ================ FROM scratch AS execution -# Copy the docker compose file and volumes into the container -COPY --from=builder /build/build/docker-compose.yml /docker-compose.yml -COPY --from=builder /build/build/volumes /volumes +# Copy config artifacts from the build path. For simplicity, artifacts +# are built outside of the docker image. +COPY ./build/antithesis/avalanchego/ / diff --git a/tests/antithesis/avalanchego/Dockerfile.node b/tests/antithesis/avalanchego/Dockerfile.node index d4591250e202..6dc1cb782cef 100644 --- a/tests/antithesis/avalanchego/Dockerfile.node +++ b/tests/antithesis/avalanchego/Dockerfile.node @@ -56,6 +56,9 @@ RUN mkdir -p /symbols COPY --from=builder /avalanchego_instrumented/symbols /symbols COPY --from=builder /opt/antithesis/lib/libvoidstar.so /usr/lib/libvoidstar.so +# Use the same path as the uninstrumented node image for consistency +WORKDIR /avalanchego/build + # Copy the executable into the container COPY --from=builder /avalanchego_instrumented/customer/build/avalanchego ./avalanchego diff --git a/tests/antithesis/avalanchego/main.go b/tests/antithesis/avalanchego/main.go index 5d7d614f75c5..57b12a51b6c2 100644 --- a/tests/antithesis/avalanchego/main.go +++ b/tests/antithesis/avalanchego/main.go @@ -11,11 +11,11 @@ import ( "os" "time" - "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/tests/antithesis" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/set" @@ -38,13 +38,15 @@ import ( const NumKeys = 5 func main() { - c, err := NewConfig(os.Args) + c, err := antithesis.NewConfig(os.Args) if err != nil { log.Fatalf("invalid config: %s", err) } ctx := context.Background() - awaitHealthyNodes(ctx, c.URIs) + if err := antithesis.AwaitHealthyNodes(ctx, c.URIs); err != nil { + log.Fatalf("failed to await healthy nodes: %s", err) + } kc := secp256k1fx.NewKeychain(genesis.EWOQKey) walletSyncStartTime := time.Now() @@ -99,8 +101,7 @@ func main() { }, }}) if err != nil { - log.Printf("failed to issue initial funding X-chain baseTx: %s", err) - return + log.Fatalf("failed to issue initial funding X-chain baseTx: %s", err) } log.Printf("issued initial funding X-chain baseTx %s in %s", baseTx.ID(), time.Since(baseStartTime)) @@ -133,39 +134,6 @@ func main() { genesisWorkload.run(ctx) } -func awaitHealthyNodes(ctx context.Context, uris []string) { - for _, uri := range uris { - awaitHealthyNode(ctx, uri) - } - log.Println("all nodes reported healthy") -} - -func awaitHealthyNode(ctx context.Context, uri string) { - client := health.NewClient(uri) - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - log.Printf("awaiting node health at %s", uri) - for { - res, err := client.Health(ctx, nil) - switch { - case err != nil: - log.Printf("node couldn't be reached at %s", uri) - case res.Healthy: - log.Printf("node reported healthy at %s", uri) - return - default: - log.Printf("node reported unhealthy at %s", uri) - } - - select { - case <-ticker.C: - case <-ctx.Done(): - log.Printf("node health check cancelled at %s", uri) - } - } -} - type workload struct { id int wallet primary.Wallet diff --git a/tests/antithesis/compose.go b/tests/antithesis/compose.go index 3a2a49cb8fd5..e17e189281bd 100644 --- a/tests/antithesis/compose.go +++ b/tests/antithesis/compose.go @@ -7,6 +7,7 @@ import ( "fmt" "os" "path/filepath" + "strconv" "strings" "github.com/compose-spec/compose-go/types" @@ -19,6 +20,8 @@ import ( "github.com/ava-labs/avalanchego/utils/perms" ) +const bootstrapIndex = 0 + // Initialize the given path with the docker-compose configuration (compose file and // volumes) needed for an Antithesis test setup. func GenerateComposeConfig( @@ -94,7 +97,6 @@ func newComposeProject(network *tmpnet.Network, nodeImageName string, workloadIm env := types.Mapping{ config.NetworkNameKey: constants.LocalName, - config.AdminAPIEnabledKey: "true", config.LogLevelKey: logging.Debug.String(), config.LogDisplayLevelKey: logging.Trace.String(), config.HTTPHostKey: "0.0.0.0", @@ -104,13 +106,48 @@ func newComposeProject(network *tmpnet.Network, nodeImageName string, workloadIm config.StakingSignerKeyContentKey: signerKey, } - nodeName := "avalanche" + // Apply configuration appropriate to a test network + for k, v := range tmpnet.DefaultTestFlags() { + switch value := v.(type) { + case string: + env[k] = value + case bool: + env[k] = strconv.FormatBool(value) + default: + return nil, fmt.Errorf("unable to convert unsupported type %T to string", v) + } + } + + serviceName := getServiceName(i) + + volumes := []types.ServiceVolumeConfig{ + { + Type: types.VolumeTypeBind, + Source: fmt.Sprintf("./volumes/%s/logs", serviceName), + Target: "/root/.avalanchego/logs", + }, + } + + trackSubnets, err := node.Flags.GetStringVal(config.TrackSubnetsKey) + if err != nil { + return nil, err + } + if len(trackSubnets) > 0 { + env[config.TrackSubnetsKey] = trackSubnets + if i == bootstrapIndex { + // DB volume for bootstrap node will need to initialized with the subnet + volumes = append(volumes, types.ServiceVolumeConfig{ + Type: types.VolumeTypeBind, + Source: fmt.Sprintf("./volumes/%s/db", serviceName), + Target: "/root/.avalanchego/db", + }) + } + } + if i == 0 { - nodeName += "-bootstrap-node" bootstrapIP = address + ":9651" bootstrapIDs = node.NodeID.String() } else { - nodeName = fmt.Sprintf("%s-node-%d", nodeName, i+1) env[config.BootstrapIPsKey] = bootstrapIP env[config.BootstrapIDsKey] = bootstrapIDs } @@ -120,18 +157,12 @@ func newComposeProject(network *tmpnet.Network, nodeImageName string, workloadIm env = keyMapToEnvVarMap(env) services[i+1] = types.ServiceConfig{ - Name: nodeName, - ContainerName: nodeName, - Hostname: nodeName, + Name: serviceName, + ContainerName: serviceName, + Hostname: serviceName, Image: nodeImageName, - Volumes: []types.ServiceVolumeConfig{ - { - Type: types.VolumeTypeBind, - Source: fmt.Sprintf("./volumes/%s/logs", nodeName), - Target: "/root/.avalanchego/logs", - }, - }, - Environment: env.ToMappingWithEquals(), + Volumes: volumes, + Environment: env.ToMappingWithEquals(), Networks: map[string]*types.ServiceNetworkConfig{ networkName: { Ipv4Address: address, @@ -146,6 +177,15 @@ func newComposeProject(network *tmpnet.Network, nodeImageName string, workloadIm workloadEnv := types.Mapping{ "AVAWL_URIS": strings.Join(uris, " "), } + chainIDs := []string{} + for _, subnet := range network.Subnets { + for _, chain := range subnet.Chains { + chainIDs = append(chainIDs, chain.ChainID.String()) + } + } + if len(chainIDs) > 0 { + workloadEnv["AVAWL_CHAIN_IDS"] = strings.Join(chainIDs, " ") + } workloadName := "workload" services[0] = types.ServiceConfig{ @@ -188,3 +228,14 @@ func keyMapToEnvVarMap(keyMap types.Mapping) types.Mapping { } return envVarMap } + +// Retrieve the service name for a node at the given index. Common to +// GenerateComposeConfig and InitDBVolumes to ensure consistency +// between db volumes configuration and volume paths. +func getServiceName(index int) string { + baseName := "avalanche" + if index == 0 { + return baseName + "-bootstrap-node" + } + return fmt.Sprintf("%s-node-%d", baseName, index) +} diff --git a/tests/antithesis/avalanchego/config.go b/tests/antithesis/config.go similarity index 83% rename from tests/antithesis/avalanchego/config.go rename to tests/antithesis/config.go index 2db1af5fa06b..471b12bb2c17 100644 --- a/tests/antithesis/avalanchego/config.go +++ b/tests/antithesis/config.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package main +package antithesis import ( "errors" @@ -15,7 +15,8 @@ import ( ) const ( - URIsKey = "uris" + URIsKey = "uris" + ChainIDsKey = "chain-ids" FlagsName = "workload" EnvPrefix = "avawl" @@ -27,7 +28,8 @@ var ( ) type Config struct { - URIs []string + URIs []string + ChainIDs []string } func NewConfig(arguments []string) (*Config, error) { @@ -37,7 +39,8 @@ func NewConfig(arguments []string) (*Config, error) { } c := &Config{ - URIs: v.GetStringSlice(URIsKey), + URIs: v.GetStringSlice(URIsKey), + ChainIDs: v.GetStringSlice(ChainIDsKey), } return c, c.Verify() } @@ -56,6 +59,7 @@ func parseFlags(arguments []string) (*viper.Viper, error) { fs := pflag.NewFlagSet(FlagsName, pflag.ContinueOnError) fs.StringSlice(URIsKey, []string{primary.LocalAPIURI}, "URIs of nodes that the workload can communicate with") + fs.StringSlice(ChainIDsKey, []string{}, "IDs of chains to target for testing") if err := fs.Parse(arguments[1:]); err != nil { return nil, fmt.Errorf("failed parsing CLI flags: %w", err) } diff --git a/tests/antithesis/init_db.go b/tests/antithesis/init_db.go new file mode 100644 index 000000000000..cad82f623b1d --- /dev/null +++ b/tests/antithesis/init_db.go @@ -0,0 +1,60 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package antithesis + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/perms" +) + +// Given a path, compose the expected path of the bootstrap node's docker compose db volume. +func GetBootstrapVolumePath(targetPath string) (string, error) { + absPath, err := filepath.Abs(targetPath) + if err != nil { + return "", fmt.Errorf("failed to convert target path to absolute path: %w", err) + } + return filepath.Join(absPath, "volumes", getServiceName(bootstrapIndex)), nil +} + +// Bootstraps a local process-based network, creates its subnets and chains, and copies +// the resulting db state from one of the nodes to the provided path. The path will be +// created if it does not already exist. +func InitBootstrapDB(network *tmpnet.Network, avalancheGoPath string, pluginDir string, destPath string) error { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*2) + defer cancel() + if err := tmpnet.StartNewNetwork( + ctx, + os.Stdout, + network, + "", + avalancheGoPath, + pluginDir, + ); err != nil { + return fmt.Errorf("failed to start network: %w", err) + } + // Since the goal is to initialize the DB, we can stop the network after it has been started successfully + if err := network.Stop(ctx); err != nil { + return fmt.Errorf("failed to stop network: %w", err) + } + + // Copy the db state from the bootstrap node to the compose volume path. + sourcePath := filepath.Join(network.Nodes[0].GetDataDir(), "db") + if err := os.MkdirAll(destPath, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create db path %q: %w", destPath, err) + } + // TODO(marun) Replace with os.CopyFS once we upgrade to Go 1.23 + cmd := exec.Command("cp", "-r", sourcePath, destPath) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to copy bootstrap db from %q to %q: %w", sourcePath, destPath, err) + } + + return nil +} diff --git a/tests/antithesis/node_health.go b/tests/antithesis/node_health.go new file mode 100644 index 000000000000..039442398a73 --- /dev/null +++ b/tests/antithesis/node_health.go @@ -0,0 +1,50 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package antithesis + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/ava-labs/avalanchego/api/health" +) + +// Waits for the nodes at the provided URIs to report healthy. +func AwaitHealthyNodes(ctx context.Context, uris []string) error { + for _, uri := range uris { + if err := awaitHealthyNode(ctx, uri); err != nil { + return err + } + } + log.Println("all nodes reported healthy") + return nil +} + +func awaitHealthyNode(ctx context.Context, uri string) error { + client := health.NewClient(uri) + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + log.Printf("awaiting node health at %s", uri) + for { + res, err := client.Health(ctx, nil) + switch { + case err != nil: + log.Printf("node couldn't be reached at %s", uri) + case res.Healthy: + log.Printf("node reported healthy at %s", uri) + return nil + default: + log.Printf("node reported unhealthy at %s", uri) + } + + select { + case <-ticker.C: + case <-ctx.Done(): + return fmt.Errorf("node health check cancelled at %s: %w", uri, ctx.Err()) + } + } +} diff --git a/tests/antithesis/xsvm/Dockerfile.config b/tests/antithesis/xsvm/Dockerfile.config new file mode 100644 index 000000000000..3c1128c6f51b --- /dev/null +++ b/tests/antithesis/xsvm/Dockerfile.config @@ -0,0 +1,5 @@ +FROM scratch AS execution + +# Copy config artifacts from the build path. For simplicity, artifacts +# are built outside of the docker image. +COPY ./build/antithesis/xsvm/ / diff --git a/tests/antithesis/xsvm/Dockerfile.node b/tests/antithesis/xsvm/Dockerfile.node new file mode 100644 index 000000000000..1d8d673026d8 --- /dev/null +++ b/tests/antithesis/xsvm/Dockerfile.node @@ -0,0 +1,61 @@ +# The version is supplied as a build argument rather than hard-coded +# to minimize the cost of version changes. +ARG GO_VERSION + +# AVALANCHEGO_NODE_IMAGE needs to identify an existing avalanchego node image and should include the tag +ARG AVALANCHEGO_NODE_IMAGE + +# Antithesis: Getting the Antithesis golang instrumentation library +FROM docker.io/antithesishq/go-instrumentor AS instrumentor + +# ============= Compilation Stage ================ +FROM golang:$GO_VERSION-bullseye AS builder + +WORKDIR /build +# Copy and download avalanche dependencies using go mod +COPY go.mod . +COPY go.sum . +RUN go mod download + +# Copy the code into the container +COPY . . + +# Keep the commit hash to easily verify the exact version that is running +RUN git rev-parse HEAD > ./commit_hash.txt + +# Copy the instrumentor and supporting files to their correct locations +COPY --from=instrumentor /opt/antithesis /opt/antithesis +COPY --from=instrumentor /opt/antithesis/lib /lib + +# Create the destination output directory for the instrumented code +RUN mkdir -p /avalanchego_instrumented + +# Park the .git file in a safe location +RUN mkdir -p /opt/tmp/ +RUN cp -r .git /opt/tmp/ + +# Instrument avalanchego +RUN /opt/antithesis/bin/goinstrumentor \ + -stderrthreshold=INFO \ + -antithesis /opt/antithesis/instrumentation \ + . \ + /avalanchego_instrumented + +WORKDIR /avalanchego_instrumented/customer +RUN go mod download +RUN ln -s /opt/tmp/.git .git + +# Build xsvm VM +RUN ./scripts/build_xsvm.sh + +# ============= Cleanup Stage ================ +FROM $AVALANCHEGO_NODE_IMAGE AS execution + +# The commit hash and antithesis dependencies should be part of the base image. + +# Copy the executable into the container +RUN mkdir -p /root/.avalanchego/plugins +COPY --from=builder /avalanchego_instrumented/customer/build/xsvm \ + /root/.avalanchego/plugins/v3m4wPxaHpvGr8qfMeyK6PRW3idZrPHmYcMTt7oXdK47yurVH + +# The node image's entrypoint will be reused. diff --git a/tests/antithesis/xsvm/Dockerfile.workload b/tests/antithesis/xsvm/Dockerfile.workload new file mode 100644 index 000000000000..f9da9009fb05 --- /dev/null +++ b/tests/antithesis/xsvm/Dockerfile.workload @@ -0,0 +1,30 @@ +# The version is supplied as a build argument rather than hard-coded +# to minimize the cost of version changes. +ARG GO_VERSION + +# NODE_IMAGE needs to identify an existing node image and should include the tag +ARG NODE_IMAGE + +# ============= Compilation Stage ================ +FROM golang:$GO_VERSION-bullseye AS builder + +WORKDIR /build +# Copy and download avalanche dependencies using go mod +COPY go.mod . +COPY go.sum . +RUN go mod download + +# Copy the code into the container +COPY . . + +# Build the workload +RUN ./scripts/build_antithesis_xsvm_workload.sh + +# ============= Cleanup Stage ================ +# Base the workflow on the node image to support bootstrap testing +FROM $NODE_IMAGE AS execution + +# Copy the executable into the container +COPY --from=builder /build/build/antithesis-xsvm-workload ./workload + +CMD [ "./workload" ] diff --git a/tests/antithesis/xsvm/gencomposeconfig/main.go b/tests/antithesis/xsvm/gencomposeconfig/main.go new file mode 100644 index 000000000000..43720d56155c --- /dev/null +++ b/tests/antithesis/xsvm/gencomposeconfig/main.go @@ -0,0 +1,62 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "fmt" + "log" + "os" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/tests/antithesis" + "github.com/ava-labs/avalanchego/tests/fixture/subnet" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" +) + +const baseImageName = "antithesis-xsvm" + +// Creates docker-compose.yml and its associated volumes in the target path. +func main() { + avalancheGoPath := os.Getenv("AVALANCHEGO_PATH") + if len(avalancheGoPath) == 0 { + log.Fatal("AVALANCHEGO_PATH environment variable not set") + } + + pluginDir := os.Getenv("AVALANCHEGO_PLUGIN_DIR") + if len(pluginDir) == 0 { + log.Fatal("AVALANCHEGO_PLUGIN_DIR environment variable not set") + } + + targetPath := os.Getenv("TARGET_PATH") + if len(targetPath) == 0 { + log.Fatal("TARGET_PATH environment variable not set") + } + + imageTag := os.Getenv("IMAGE_TAG") + if len(imageTag) == 0 { + log.Fatal("IMAGE_TAG environment variable not set") + } + + nodeImageName := fmt.Sprintf("%s-node:%s", baseImageName, imageTag) + workloadImageName := fmt.Sprintf("%s-workload:%s", baseImageName, imageTag) + + // Create a network with an xsvm subnet + network := tmpnet.LocalNetworkOrPanic() + network.Subnets = []*tmpnet.Subnet{ + subnet.NewXSVMOrPanic("xsvm", genesis.VMRQKey, network.Nodes...), + } + + bootstrapVolumePath, err := antithesis.GetBootstrapVolumePath(targetPath) + if err != nil { + log.Fatalf("failed to get bootstrap volume path: %v", err) + } + + if err := antithesis.InitBootstrapDB(network, avalancheGoPath, pluginDir, bootstrapVolumePath); err != nil { + log.Fatalf("failed to initialize db volumes: %v", err) + } + + if err := antithesis.GenerateComposeConfig(network, nodeImageName, workloadImageName, targetPath); err != nil { + log.Fatalf("failed to generate config for docker-compose: %v", err) + } +} diff --git a/tests/antithesis/xsvm/main.go b/tests/antithesis/xsvm/main.go new file mode 100644 index 000000000000..84371752d4ef --- /dev/null +++ b/tests/antithesis/xsvm/main.go @@ -0,0 +1,168 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "crypto/rand" + "log" + "math/big" + "os" + "time" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/tests/antithesis" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/example/xsvm/api" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/status" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/transfer" +) + +const NumKeys = 5 + +func main() { + c, err := antithesis.NewConfig(os.Args) + if err != nil { + log.Fatalf("invalid config: %s", err) + } + + ctx := context.Background() + if err := antithesis.AwaitHealthyNodes(ctx, c.URIs); err != nil { + log.Fatalf("failed to await healthy nodes: %s", err) + } + + if len(c.ChainIDs) != 1 { + log.Fatalf("expected 1 chainID, saw %d", len(c.ChainIDs)) + } + chainID, err := ids.FromString(c.ChainIDs[0]) + if err != nil { + log.Fatalf("failed to parse chainID: %s", err) + } + + genesisWorkload := &workload{ + id: 0, + chainID: chainID, + key: genesis.VMRQKey, + addrs: set.Of(genesis.VMRQKey.Address()), + uris: c.URIs, + } + + workloads := make([]*workload, NumKeys) + workloads[0] = genesisWorkload + + initialAmount := 100 * units.KiloAvax + for i := 1; i < NumKeys; i++ { + key, err := secp256k1.NewPrivateKey() + if err != nil { + log.Fatalf("failed to generate key: %s", err) + } + + var ( + addr = key.Address() + baseStartTime = time.Now() + ) + transferTxStatus, err := transfer.Transfer( + ctx, + &transfer.Config{ + URI: c.URIs[0], + ChainID: chainID, + AssetID: chainID, + Amount: initialAmount, + To: addr, + PrivateKey: genesisWorkload.key, + }, + ) + if err != nil { + log.Fatalf("failed to issue initial funding transfer: %s", err) + } + log.Printf("issued initial funding transfer %s in %s", transferTxStatus.TxID, time.Since(baseStartTime)) + + genesisWorkload.confirmTransferTx(ctx, transferTxStatus) + + workloads[i] = &workload{ + id: i, + chainID: chainID, + key: key, + addrs: set.Of(addr), + uris: c.URIs, + } + } + + for _, w := range workloads[1:] { + go w.run(ctx) + } + genesisWorkload.run(ctx) +} + +type workload struct { + id int + chainID ids.ID + key *secp256k1.PrivateKey + addrs set.Set[ids.ShortID] + uris []string +} + +func (w *workload) run(ctx context.Context) { + timer := time.NewTimer(0) + if !timer.Stop() { + <-timer.C + } + + uri := w.uris[w.id%len(w.uris)] + + client := api.NewClient(uri, w.chainID.String()) + balance, err := client.Balance(ctx, w.key.Address(), w.chainID) + if err != nil { + log.Fatalf("failed to fetch balance: %s", err) + } + log.Printf("worker %d starting with a balance of %d", w.id, balance) + + for { + log.Printf("worker %d executing transfer", w.id) + destAddress, _ := w.addrs.Peek() + txStatus, err := transfer.Transfer( + ctx, + &transfer.Config{ + URI: uri, + ChainID: w.chainID, + AssetID: w.chainID, + Amount: units.Schmeckle, + To: destAddress, + PrivateKey: w.key, + }, + ) + if err != nil { + log.Printf("worker %d failed to issue transfer: %s", w.id, err) + } else { + log.Printf("worker %d issued transfer %s in %s", w.id, txStatus.TxID, time.Since(txStatus.StartTime)) + w.confirmTransferTx(ctx, txStatus) + } + + val, err := rand.Int(rand.Reader, big.NewInt(int64(time.Second))) + if err != nil { + log.Fatalf("failed to read randomness: %s", err) + } + + timer.Reset(time.Duration(val.Int64())) + select { + case <-ctx.Done(): + return + case <-timer.C: + } + } +} + +func (w *workload) confirmTransferTx(ctx context.Context, tx *status.TxIssuance) { + for _, uri := range w.uris { + client := api.NewClient(uri, w.chainID.String()) + if err := api.WaitForAcceptance(ctx, client, w.key.Address(), tx.Nonce); err != nil { + log.Printf("worker %d failed to confirm transaction %s on %s: %s", w.id, tx.TxID, uri, err) + return + } + } + log.Printf("worker %d confirmed transaction %s on all nodes", w.id, tx.TxID) +} diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index 73c29b3bc83f..9d235bc363da 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -38,7 +38,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Run only once in the first ginkgo process nodes := tmpnet.NewNodesOrPanic(flagVars.NodeCount()) - subnets := vms.XSVMSubnets(nodes...) + subnets := vms.XSVMSubnetsOrPanic(nodes...) return e2e.NewTestEnvironment( flagVars, &tmpnet.Network{ diff --git a/tests/e2e/vms/xsvm.go b/tests/e2e/vms/xsvm.go index 6e8d79c51d15..ebb00f882b16 100644 --- a/tests/e2e/vms/xsvm.go +++ b/tests/e2e/vms/xsvm.go @@ -5,22 +5,19 @@ package vms import ( "fmt" - "math" - "time" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/tests" "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/subnet" "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/example/xsvm" "github.com/ava-labs/avalanchego/vms/example/xsvm/api" "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/export" "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/importtx" "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/transfer" - "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" ginkgo "github.com/onsi/ginkgo/v2" ) @@ -30,10 +27,14 @@ var ( subnetBName = "xsvm-b" ) -func XSVMSubnets(nodes ...*tmpnet.Node) []*tmpnet.Subnet { +func XSVMSubnetsOrPanic(nodes ...*tmpnet.Node) []*tmpnet.Subnet { + key, err := secp256k1.NewPrivateKey() + if err != nil { + panic(err) + } return []*tmpnet.Subnet{ - newXSVMSubnet(subnetAName, nodes...), - newXSVMSubnet(subnetBName, nodes...), + subnet.NewXSVMOrPanic(subnetAName, key, nodes...), + subnet.NewXSVMOrPanic(subnetBName, key, nodes...), } } @@ -141,39 +142,3 @@ var _ = ginkgo.Describe("[XSVM]", func() { require.Equal(units.Schmeckle, destinationBalance) }) }) - -func newXSVMSubnet(name string, nodes ...*tmpnet.Node) *tmpnet.Subnet { - if len(nodes) == 0 { - panic("a subnet must be validated by at least one node") - } - - key, err := secp256k1.NewPrivateKey() - if err != nil { - panic(err) - } - - genesisBytes, err := genesis.Codec.Marshal(genesis.CodecVersion, &genesis.Genesis{ - Timestamp: time.Now().Unix(), - Allocations: []genesis.Allocation{ - { - Address: key.Address(), - Balance: math.MaxUint64, - }, - }, - }) - if err != nil { - panic(err) - } - - return &tmpnet.Subnet{ - Name: name, - Chains: []*tmpnet.Chain{ - { - VMID: xsvm.ID, - Genesis: genesisBytes, - PreFundedKey: key, - }, - }, - ValidatorIDs: tmpnet.NodesToIDs(nodes...), - } -} diff --git a/tests/fixture/subnet/xsvm.go b/tests/fixture/subnet/xsvm.go new file mode 100644 index 000000000000..28fb017da5a9 --- /dev/null +++ b/tests/fixture/subnet/xsvm.go @@ -0,0 +1,45 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package subnet + +import ( + "math" + "time" + + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/example/xsvm" + "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" +) + +func NewXSVMOrPanic(name string, key *secp256k1.PrivateKey, nodes ...*tmpnet.Node) *tmpnet.Subnet { + if len(nodes) == 0 { + panic("a subnet must be validated by at least one node") + } + + genesisBytes, err := genesis.Codec.Marshal(genesis.CodecVersion, &genesis.Genesis{ + Timestamp: time.Now().Unix(), + Allocations: []genesis.Allocation{ + { + Address: key.Address(), + Balance: math.MaxUint64, + }, + }, + }) + if err != nil { + panic(err) + } + + return &tmpnet.Subnet{ + Name: name, + Chains: []*tmpnet.Chain{ + { + VMID: xsvm.ID, + Genesis: genesisBytes, + PreFundedKey: key, + }, + }, + ValidatorIDs: tmpnet.NodesToIDs(nodes...), + } +} diff --git a/tests/fixture/tmpnet/defaults.go b/tests/fixture/tmpnet/defaults.go index e2f0a2afd61d..c5dbfeeebc96 100644 --- a/tests/fixture/tmpnet/defaults.go +++ b/tests/fixture/tmpnet/defaults.go @@ -35,25 +35,35 @@ const ( defaultConfigFilename = "config.json" ) -// A set of flags appropriate for testing. -func DefaultFlags() FlagsMap { - // Supply only non-default configuration to ensure that default values will be used. +// Flags appropriate for networks used for all types of testing. +func DefaultTestFlags() FlagsMap { return FlagsMap{ config.NetworkPeerListPullGossipFreqKey: "250ms", config.NetworkMaxReconnectDelayKey: "1s", - config.PublicIPKey: "127.0.0.1", - config.HTTPHostKey: "127.0.0.1", - config.StakingHostKey: "127.0.0.1", config.HealthCheckFreqKey: "2s", config.AdminAPIEnabledKey: true, config.IndexEnabledKey: true, - config.LogDisplayLevelKey: logging.Off.String(), // Display logging not needed since nodes run headless - config.LogLevelKey: logging.Debug.String(), - config.MinStakeDurationKey: DefaultMinStakeDuration.String(), - config.ProposerVMUseCurrentHeightKey: true, } } +// Flags appropriate for tmpnet networks. +func DefaultTmpnetFlags() FlagsMap { + // Supply only non-default configuration to ensure that default values will be used. + flags := FlagsMap{ + // Specific to tmpnet deployment + config.PublicIPKey: "127.0.0.1", + config.HTTPHostKey: "127.0.0.1", + config.StakingHostKey: "127.0.0.1", + config.LogDisplayLevelKey: logging.Off.String(), // Display logging not needed since nodes run headless + config.LogLevelKey: logging.Debug.String(), + // Specific to e2e testing + config.MinStakeDurationKey: DefaultMinStakeDuration.String(), + config.ProposerVMUseCurrentHeightKey: true, + } + flags.SetDefaults(DefaultTestFlags()) + return flags +} + // A set of chain configurations appropriate for testing. func DefaultChainConfigs() map[string]FlagsMap { return map[string]FlagsMap{ diff --git a/tests/fixture/tmpnet/network.go b/tests/fixture/tmpnet/network.go index bd5b1b914efc..e3efdd88cf5c 100644 --- a/tests/fixture/tmpnet/network.go +++ b/tests/fixture/tmpnet/network.go @@ -198,7 +198,7 @@ func (n *Network) EnsureDefaultConfig(w io.Writer, avalancheGoPath string, plugi if n.DefaultFlags == nil { n.DefaultFlags = FlagsMap{} } - n.DefaultFlags.SetDefaults(DefaultFlags()) + n.DefaultFlags.SetDefaults(DefaultTmpnetFlags()) if len(n.Nodes) == 1 { // Sybil protection needs to be disabled for a single node network to start @@ -214,8 +214,8 @@ func (n *Network) EnsureDefaultConfig(w io.Writer, avalancheGoPath string, plugi } } - // Ensure pre-funded keys - if len(n.PreFundedKeys) == 0 { + // Ensure pre-funded keys if the genesis is not predefined + if n.Genesis == nil && len(n.PreFundedKeys) == 0 { keys, err := NewPrivateKeys(DefaultPreFundedKeyCount) if err != nil { return err @@ -294,7 +294,7 @@ func (n *Network) Create(rootDir string) error { } } - if n.Genesis == nil { + if n.NetworkID == 0 && n.Genesis == nil { // Pre-fund known legacy keys to support ad-hoc testing. Usage of a legacy key will // require knowing the key beforehand rather than retrieving it from the set of pre-funded // keys exposed by a network. Since allocation will not be exclusive, a test using a @@ -525,10 +525,13 @@ func (n *Network) EnsureNodeConfig(node *Node) error { // Set fields including the network path if len(n.Dir) > 0 { defaultFlags := FlagsMap{ - config.GenesisFileKey: n.getGenesisPath(), config.ChainConfigDirKey: n.getChainConfigDir(), } + if n.Genesis != nil { + defaultFlags[config.GenesisFileKey] = n.getGenesisPath() + } + // Only set the subnet dir if it exists or the node won't start. subnetDir := n.getSubnetDir() if _, err := os.Stat(subnetDir); err == nil { @@ -540,7 +543,7 @@ func (n *Network) EnsureNodeConfig(node *Node) error { node.Flags.SetDefaults(defaultFlags) // Ensure the node's data dir is configured - dataDir := node.getDataDir() + dataDir := node.GetDataDir() if len(dataDir) == 0 { // NodeID will have been set by EnsureKeys dataDir = filepath.Join(n.Dir, node.NodeID.String()) @@ -637,9 +640,6 @@ func (n *Network) CreateSubnets(ctx context.Context, w io.Writer) error { return nil } - // Ensure the in-memory subnet state - n.Subnets = append(n.Subnets, createdSubnets...) - // Ensure the pre-funded key changes are persisted to disk if err := n.Write(); err != nil { return err diff --git a/tests/fixture/tmpnet/node.go b/tests/fixture/tmpnet/node.go index 99777e674c04..e29cb92a2960 100644 --- a/tests/fixture/tmpnet/node.go +++ b/tests/fixture/tmpnet/node.go @@ -190,7 +190,7 @@ func (n *Node) readState() error { return n.getRuntime().readState() } -func (n *Node) getDataDir() string { +func (n *Node) GetDataDir() string { return cast.ToString(n.Flags[config.DataDirKey]) } diff --git a/tests/fixture/tmpnet/node_config.go b/tests/fixture/tmpnet/node_config.go index 1f47c926dfc0..4752b2c343c3 100644 --- a/tests/fixture/tmpnet/node_config.go +++ b/tests/fixture/tmpnet/node_config.go @@ -18,7 +18,7 @@ import ( // (reading/writing configuration) and node.go (orchestration). func (n *Node) getFlagsPath() string { - return filepath.Join(n.getDataDir(), "flags.json") + return filepath.Join(n.GetDataDir(), "flags.json") } func (n *Node) readFlags() error { @@ -46,7 +46,7 @@ func (n *Node) writeFlags() error { } func (n *Node) getConfigPath() string { - return filepath.Join(n.getDataDir(), defaultConfigFilename) + return filepath.Join(n.GetDataDir(), defaultConfigFilename) } func (n *Node) readConfig() error { @@ -95,7 +95,7 @@ func (n *Node) Read() error { } func (n *Node) Write() error { - if err := os.MkdirAll(n.getDataDir(), perms.ReadWriteExecute); err != nil { + if err := os.MkdirAll(n.GetDataDir(), perms.ReadWriteExecute); err != nil { return fmt.Errorf("failed to create node dir: %w", err) } @@ -106,7 +106,7 @@ func (n *Node) Write() error { } func (n *Node) writeMetricsSnapshot(data []byte) error { - metricsDir := filepath.Join(n.getDataDir(), "metrics") + metricsDir := filepath.Join(n.GetDataDir(), "metrics") if err := os.MkdirAll(metricsDir, perms.ReadWriteExecute); err != nil { return fmt.Errorf("failed to create metrics dir: %w", err) } diff --git a/tests/fixture/tmpnet/node_process.go b/tests/fixture/tmpnet/node_process.go index f2a9c7ff628d..a866cec63db9 100644 --- a/tests/fixture/tmpnet/node_process.go +++ b/tests/fixture/tmpnet/node_process.go @@ -122,7 +122,7 @@ func (p *NodeProcess) Start(w io.Writer) error { } // Determine appropriate level of node description detail - dataDir := p.node.getDataDir() + dataDir := p.node.GetDataDir() nodeDescription := fmt.Sprintf("node %q", p.node.NodeID) if p.node.IsEphemeral { nodeDescription = "ephemeral " + nodeDescription @@ -201,7 +201,7 @@ func (p *NodeProcess) IsHealthy(ctx context.Context) (bool, error) { } func (p *NodeProcess) getProcessContextPath() string { - return filepath.Join(p.node.getDataDir(), config.DefaultProcessContextFilename) + return filepath.Join(p.node.GetDataDir(), config.DefaultProcessContextFilename) } func (p *NodeProcess) waitForProcessContext(ctx context.Context) error { @@ -294,7 +294,7 @@ func (p *NodeProcess) writeMonitoringConfig() error { } promtailLabels := FlagsMap{ - "__path__": filepath.Join(p.node.getDataDir(), "logs", "*.log"), + "__path__": filepath.Join(p.node.GetDataDir(), "logs", "*.log"), } promtailLabels.SetDefaults(commonLabels) promtailConfig := []FlagsMap{ diff --git a/vms/example/xsvm/Dockerfile b/vms/example/xsvm/Dockerfile new file mode 100644 index 000000000000..8e7c4c5bba9f --- /dev/null +++ b/vms/example/xsvm/Dockerfile @@ -0,0 +1,31 @@ +# The version is supplied as a build argument rather than hard-coded +# to minimize the cost of version changes. +ARG GO_VERSION + +# AVALANCHEGO_NODE_IMAGE needs to identify an existing node image and should include the tag +ARG AVALANCHEGO_NODE_IMAGE + +# ============= Compilation Stage ================ +FROM golang:$GO_VERSION-bullseye AS builder + +WORKDIR /build + +# Copy and download avalanche dependencies using go mod +COPY go.mod . +COPY go.sum . +RUN go mod download + +# Copy the code into the container +COPY . . + +# Build xsvm +RUN ./scripts/build_xsvm.sh + +# ============= Cleanup Stage ================ +FROM $AVALANCHEGO_NODE_IMAGE AS execution + +# Copy the xsvm binary to the default plugin path +RUN mkdir -p /root/.avalanchego/plugins +COPY --from=builder /build/build/xsvm /root/.avalanchego/plugins/v3m4wPxaHpvGr8qfMeyK6PRW3idZrPHmYcMTt7oXdK47yurVH + +# The node image's entrypoint will be reused. From fc2d8cbe4a1a50cadb7cab2e27a0befdd6c0b106 Mon Sep 17 00:00:00 2001 From: marun Date: Wed, 22 May 2024 08:28:49 -0700 Subject: [PATCH 015/102] [antithesis] Ensure node image is pushed (#3042) --- scripts/build_antithesis_images.sh | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/scripts/build_antithesis_images.sh b/scripts/build_antithesis_images.sh index 8e3c534bbcbe..8e49d1bcb700 100755 --- a/scripts/build_antithesis_images.sh +++ b/scripts/build_antithesis_images.sh @@ -61,13 +61,17 @@ function build_images { docker_cmd="${docker_cmd} --build-arg AVALANCHEGO_NODE_IMAGE=antithesis-avalanchego-node:${TAG}" fi - # Build node image first to allow the workload image to use it. - ${docker_cmd} -t "${node_image_name}" -f "${node_dockerfile}" "${AVALANCHE_PATH}" - if [[ -n "${image_prefix}" ]]; then - # Push images with an image prefix since the prefix defines a registry location + if [[ -n "${image_prefix}" && -z "${node_only}" ]]; then + # Push images with an image prefix since the prefix defines a + # registry location, and only if building all images. When + # building just the node image the image is only intended to be + # used locally. docker_cmd="${docker_cmd} --push" fi + # Build node image first to allow the workload image to use it. + ${docker_cmd} -t "${node_image_name}" -f "${node_dockerfile}" "${AVALANCHE_PATH}" + if [[ -n "${node_only}" ]]; then # Skip building the config and workload images. Supports building the avalanchego # node image as the base image for the xsvm node image. From 04d883ba6d5e699555efad3326294ec99933caa0 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 22 May 2024 11:59:33 -0400 Subject: [PATCH 016/102] Cleanup fee config passing (#3043) --- node/node.go | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/node/node.go b/node/node.go index fff8e13572cb..3138f00c1c38 100644 --- a/node/node.go +++ b/node/node.go @@ -75,7 +75,6 @@ import ( "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" "github.com/ava-labs/avalanchego/vms/registry" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" @@ -1134,25 +1133,15 @@ func (n *Node) initVMs() error { SybilProtectionEnabled: n.Config.SybilProtectionEnabled, PartialSyncPrimaryNetwork: n.Config.PartialSyncPrimaryNetwork, TrackedSubnets: n.Config.TrackedSubnets, - StaticFeeConfig: fee.StaticConfig{ - TxFee: n.Config.TxFee, - CreateAssetTxFee: n.Config.CreateAssetTxFee, - CreateSubnetTxFee: n.Config.CreateSubnetTxFee, - TransformSubnetTxFee: n.Config.TransformSubnetTxFee, - CreateBlockchainTxFee: n.Config.CreateBlockchainTxFee, - AddPrimaryNetworkValidatorFee: n.Config.AddPrimaryNetworkValidatorFee, - AddPrimaryNetworkDelegatorFee: n.Config.AddPrimaryNetworkDelegatorFee, - AddSubnetValidatorFee: n.Config.AddSubnetValidatorFee, - AddSubnetDelegatorFee: n.Config.AddSubnetDelegatorFee, - }, - UptimePercentage: n.Config.UptimeRequirement, - MinValidatorStake: n.Config.MinValidatorStake, - MaxValidatorStake: n.Config.MaxValidatorStake, - MinDelegatorStake: n.Config.MinDelegatorStake, - MinDelegationFee: n.Config.MinDelegationFee, - MinStakeDuration: n.Config.MinStakeDuration, - MaxStakeDuration: n.Config.MaxStakeDuration, - RewardConfig: n.Config.RewardConfig, + StaticFeeConfig: n.Config.StaticConfig, + UptimePercentage: n.Config.UptimeRequirement, + MinValidatorStake: n.Config.MinValidatorStake, + MaxValidatorStake: n.Config.MaxValidatorStake, + MinDelegatorStake: n.Config.MinDelegatorStake, + MinDelegationFee: n.Config.MinDelegationFee, + MinStakeDuration: n.Config.MinStakeDuration, + MaxStakeDuration: n.Config.MaxStakeDuration, + RewardConfig: n.Config.RewardConfig, UpgradeConfig: upgrade.Config{ ApricotPhase3Time: version.GetApricotPhase3Time(n.Config.NetworkID), ApricotPhase5Time: version.GetApricotPhase5Time(n.Config.NetworkID), From 5fe91a4dc68627d2a654b26362d578ba5a7930ac Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 22 May 2024 12:49:23 -0400 Subject: [PATCH 017/102] Fix typo fix (#3044) --- snow/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snow/README.md b/snow/README.md index 86a90919b2ac..99cfc5351356 100644 --- a/snow/README.md +++ b/snow/README.md @@ -47,7 +47,7 @@ Currently, Avalanchego implements its own message serialization to communicate. ### [Network](https://github.com/ava-labs/avalanchego/blob/master/network/network.go) -The networking interface is shared across all chains. It implements functions from the `ExternalSender` interface. The two functions it implements are `Send` and `Gossip`. `Send` sends a message of type `OutboundMessage` to a specific set of nodes (specified by an array of `NodeIDs`). `Gossip` sends a message of type `OutboundMessage` to a random group of nodes in a subnet (can be a validator or a non-validator). gossiping is used to push transactions across the network. The networking protocol uses TLS to pass messages between peers. +The networking interface is shared across all chains. It implements functions from the `ExternalSender` interface. The two functions it implements are `Send` and `Gossip`. `Send` sends a message of type `OutboundMessage` to a specific set of nodes (specified by an array of `NodeIDs`). `Gossip` sends a message of type `OutboundMessage` to a random group of nodes in a subnet (can be a validator or a non-validator). Gossiping is used to push transactions across the network. The networking protocol uses TLS to pass messages between peers. Along with sending and gossiping, the networking library is also responsible for making connections and maintaining connections. Any node whether they are a validator or non-validator will attempt to connect to the primary network. From dd7a18f268cfe5b0b7632d2b58fc0ad0c631742b Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 22 May 2024 17:08:06 -0400 Subject: [PATCH 018/102] Grab iterator at previously executed height (#3045) --- .../snowman/bootstrap/interval/state.go | 10 +++++ snow/engine/snowman/bootstrap/storage.go | 41 ++++++++++++------- 2 files changed, 37 insertions(+), 14 deletions(-) diff --git a/snow/engine/snowman/bootstrap/interval/state.go b/snow/engine/snowman/bootstrap/interval/state.go index cf2e2bf3a2ef..8ba06824eea2 100644 --- a/snow/engine/snowman/bootstrap/interval/state.go +++ b/snow/engine/snowman/bootstrap/interval/state.go @@ -78,6 +78,16 @@ func GetBlockIterator(db database.Iteratee) database.Iterator { return db.NewIteratorWithPrefix(blockPrefix) } +// GetBlockIterator returns a block iterator that will produce values +// corresponding to persisted blocks in order of increasing height starting at +// [height]. +func GetBlockIteratorWithStart(db database.Iteratee, height uint64) database.Iterator { + return db.NewIteratorWithStartAndPrefix( + makeBlockKey(height), + blockPrefix, + ) +} + func GetBlock(db database.KeyValueReader, height uint64) ([]byte, error) { return db.Get(makeBlockKey(height)) } diff --git a/snow/engine/snowman/bootstrap/storage.go b/snow/engine/snowman/bootstrap/storage.go index b70330218439..53c2e735a572 100644 --- a/snow/engine/snowman/bootstrap/storage.go +++ b/snow/engine/snowman/bootstrap/storage.go @@ -139,7 +139,9 @@ func execute( log("compacting database before executing blocks...") if err := db.Compact(nil, nil); err != nil { // Not a fatal error, log and move on. - log("failed to compact bootstrap database before executing blocks", zap.Error(err)) + log("failed to compact bootstrap database before executing blocks", + zap.Error(err), + ) } } @@ -167,6 +169,25 @@ func execute( ) defer func() { iterator.Release() + + halted := haltable.Halted() + if !halted { + log("compacting database after executing blocks...") + if err := db.Compact(nil, nil); err != nil { + // Not a fatal error, log and move on. + log("failed to compact bootstrap database after executing blocks", + zap.Error(err), + ) + } + } + + numProcessed := totalNumberToProcess - tree.Len() + log("executed blocks", + zap.Uint64("numExecuted", numProcessed), + zap.Uint64("numToExecute", totalNumberToProcess), + zap.Bool("halted", halted), + zap.Duration("duration", time.Since(startTime)), + ) }() log("executing blocks", @@ -208,7 +229,10 @@ func execute( processedSinceIteratorRelease = 0 iterator.Release() - iterator = interval.GetBlockIterator(db) + // We specify the starting key of the iterator so that the + // underlying database doesn't need to scan over the, potentially + // not yet compacted, blocks we just deleted. + iterator = interval.GetBlockIteratorWithStart(db, height+1) } if now := time.Now(); now.After(timeOfNextLog) { @@ -248,16 +272,5 @@ func execute( if err := writeBatch(); err != nil { return err } - if err := iterator.Error(); err != nil { - return err - } - - numProcessed := totalNumberToProcess - tree.Len() - log("executed blocks", - zap.Uint64("numExecuted", numProcessed), - zap.Uint64("numToExecute", totalNumberToProcess), - zap.Bool("halted", haltable.Halted()), - zap.Duration("duration", time.Since(startTime)), - ) - return nil + return iterator.Error() } From c08fdae7aacee7648be6cee1b694a152eab927ef Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 23 May 2024 10:51:09 -0400 Subject: [PATCH 019/102] Verify signatures during Parse (#3046) --- indexer/examples/p-chain/main.go | 3 +- indexer/examples/x-chain-blocks/main.go | 4 +- vms/proposervm/batched_vm.go | 2 +- vms/proposervm/block.go | 7 +- vms/proposervm/block/block.go | 45 +++---- vms/proposervm/block/block_test.go | 35 ++--- vms/proposervm/block/build_test.go | 12 +- vms/proposervm/block/option.go | 4 + vms/proposervm/block/option_test.go | 13 -- vms/proposervm/block/parse.go | 20 ++- vms/proposervm/block/parse_test.go | 172 ++++++++++++++---------- vms/proposervm/block_test.go | 49 ++++--- vms/proposervm/pre_fork_block.go | 12 +- vms/proposervm/state/block_state.go | 2 +- vms/proposervm/vm.go | 2 +- 15 files changed, 208 insertions(+), 174 deletions(-) delete mode 100644 vms/proposervm/block/option_test.go diff --git a/indexer/examples/p-chain/main.go b/indexer/examples/p-chain/main.go index 866424254feb..8c2a17c86360 100644 --- a/indexer/examples/p-chain/main.go +++ b/indexer/examples/p-chain/main.go @@ -9,6 +9,7 @@ import ( "time" "github.com/ava-labs/avalanchego/indexer" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/wallet/subnet/primary" platformvmblock "github.com/ava-labs/avalanchego/vms/platformvm/block" @@ -33,7 +34,7 @@ func main() { } platformvmBlockBytes := container.Bytes - proposerVMBlock, err := proposervmblock.Parse(container.Bytes) + proposerVMBlock, err := proposervmblock.Parse(container.Bytes, constants.PlatformChainID) if err == nil { platformvmBlockBytes = proposerVMBlock.Block() } diff --git a/indexer/examples/x-chain-blocks/main.go b/indexer/examples/x-chain-blocks/main.go index 882f9a9c0ad5..5b57d4d35e47 100644 --- a/indexer/examples/x-chain-blocks/main.go +++ b/indexer/examples/x-chain-blocks/main.go @@ -8,6 +8,7 @@ import ( "log" "time" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/indexer" "github.com/ava-labs/avalanchego/vms/proposervm/block" "github.com/ava-labs/avalanchego/wallet/chain/x/builder" @@ -19,6 +20,7 @@ import ( func main() { var ( uri = primary.LocalAPIURI + "/ext/index/X/block" + xChainID = ids.FromStringOrPanic("2eNy1mUFdmaxXNj1eQHUe7Np4gju9sJsEtWQ4MX3ToiNKuADed") client = indexer.NewClient(uri) ctx = context.Background() nextIndex uint64 @@ -31,7 +33,7 @@ func main() { continue } - proposerVMBlock, err := block.Parse(container.Bytes) + proposerVMBlock, err := block.Parse(container.Bytes, xChainID) if err != nil { log.Fatalf("failed to parse proposervm block: %s\n", err) } diff --git a/vms/proposervm/batched_vm.go b/vms/proposervm/batched_vm.go index ff1ce6a597a0..853c858e6bba 100644 --- a/vms/proposervm/batched_vm.go +++ b/vms/proposervm/batched_vm.go @@ -101,7 +101,7 @@ func (vm *VM) BatchedParseBlock(ctx context.Context, blks [][]byte) ([]snowman.B ) for ; blocksIndex < len(blks); blocksIndex++ { blkBytes := blks[blocksIndex] - statelessBlock, err := statelessblock.Parse(blkBytes) + statelessBlock, err := statelessblock.Parse(blkBytes, vm.ctx.ChainID) if err != nil { break } diff --git a/vms/proposervm/block.go b/vms/proposervm/block.go index 9d48da49152b..464acb9fc8cd 100644 --- a/vms/proposervm/block.go +++ b/vms/proposervm/block.go @@ -36,6 +36,7 @@ var ( errTimeTooAdvanced = errors.New("time is too far advanced") errProposerWindowNotStarted = errors.New("proposer window hasn't started") errUnexpectedProposer = errors.New("unexpected proposer for current window") + errProposerMismatch = errors.New("proposer mismatch") errProposersNotActivated = errors.New("proposers haven't been activated yet") errPChainHeightTooLow = errors.New("block P-chain height is too low") ) @@ -152,9 +153,9 @@ func (p *postForkCommonComponents) Verify( return err } - // Verify the signature of the node - if err := child.SignedBlock.Verify(shouldHaveProposer, p.vm.ctx.ChainID); err != nil { - return err + hasProposer := child.SignedBlock.Proposer() != ids.EmptyNodeID + if shouldHaveProposer != hasProposer { + return fmt.Errorf("%w: shouldHaveProposer (%v) != hasProposer (%v)", errProposerMismatch, shouldHaveProposer, hasProposer) } p.vm.ctx.Log.Debug("verified post-fork block", diff --git a/vms/proposervm/block/block.go b/vms/proposervm/block/block.go index d99a569c96f6..68da910e1dbd 100644 --- a/vms/proposervm/block/block.go +++ b/vms/proposervm/block/block.go @@ -17,9 +17,8 @@ import ( var ( _ SignedBlock = (*statelessBlock)(nil) - errUnexpectedProposer = errors.New("expected no proposer but one was provided") - errMissingProposer = errors.New("expected proposer but none was provided") - errInvalidCertificate = errors.New("invalid certificate") + errUnexpectedSignature = errors.New("signature provided when none was expected") + errInvalidCertificate = errors.New("invalid certificate") ) type Block interface { @@ -29,6 +28,7 @@ type Block interface { Bytes() []byte initialize(bytes []byte) error + verify(chainID ids.ID) error } type SignedBlock interface { @@ -36,9 +36,10 @@ type SignedBlock interface { PChainHeight() uint64 Timestamp() time.Time - Proposer() ids.NodeID - Verify(shouldHaveProposer bool, chainID ids.ID) error + // Proposer returns the ID of the node that proposed this block. If no node + // signed this block, [ids.EmptyNodeID] will be returned. + Proposer() ids.NodeID } type statelessUnsignedBlock struct { @@ -101,26 +102,12 @@ func (b *statelessBlock) initialize(bytes []byte) error { return nil } -func (b *statelessBlock) PChainHeight() uint64 { - return b.StatelessBlock.PChainHeight -} - -func (b *statelessBlock) Timestamp() time.Time { - return b.timestamp -} - -func (b *statelessBlock) Proposer() ids.NodeID { - return b.proposer -} - -func (b *statelessBlock) Verify(shouldHaveProposer bool, chainID ids.ID) error { - if !shouldHaveProposer { - if len(b.Signature) > 0 || len(b.StatelessBlock.Certificate) > 0 { - return errUnexpectedProposer +func (b *statelessBlock) verify(chainID ids.ID) error { + if len(b.StatelessBlock.Certificate) == 0 { + if len(b.Signature) > 0 { + return errUnexpectedSignature } return nil - } else if b.cert == nil { - return errMissingProposer } header, err := BuildHeader(chainID, b.StatelessBlock.ParentID, b.id) @@ -135,3 +122,15 @@ func (b *statelessBlock) Verify(shouldHaveProposer bool, chainID ids.ID) error { b.Signature, ) } + +func (b *statelessBlock) PChainHeight() uint64 { + return b.StatelessBlock.PChainHeight +} + +func (b *statelessBlock) Timestamp() time.Time { + return b.timestamp +} + +func (b *statelessBlock) Proposer() ids.NodeID { + return b.proposer +} diff --git a/vms/proposervm/block/block_test.go b/vms/proposervm/block/block_test.go index 8a8a57ae3b9d..2b8918eb902c 100644 --- a/vms/proposervm/block/block_test.go +++ b/vms/proposervm/block/block_test.go @@ -14,37 +14,22 @@ import ( "github.com/ava-labs/avalanchego/utils/units" ) -func equal(require *require.Assertions, chainID ids.ID, want, have SignedBlock) { +func equal(require *require.Assertions, want, have Block) { require.Equal(want.ID(), have.ID()) require.Equal(want.ParentID(), have.ParentID()) - require.Equal(want.PChainHeight(), have.PChainHeight()) - require.Equal(want.Timestamp(), have.Timestamp()) require.Equal(want.Block(), have.Block()) - require.Equal(want.Proposer(), have.Proposer()) require.Equal(want.Bytes(), have.Bytes()) - require.Equal(want.Verify(false, chainID), have.Verify(false, chainID)) - require.Equal(want.Verify(true, chainID), have.Verify(true, chainID)) -} - -func TestVerifyNoCertWithSignature(t *testing.T) { - parentID := ids.ID{1} - timestamp := time.Unix(123, 0) - pChainHeight := uint64(2) - innerBlockBytes := []byte{3} - - require := require.New(t) - - builtBlockIntf, err := BuildUnsigned(parentID, timestamp, pChainHeight, innerBlockBytes) - require.NoError(err) - - builtBlock := builtBlockIntf.(*statelessBlock) - builtBlock.Signature = []byte{0} - err = builtBlock.Verify(false, ids.Empty) - require.ErrorIs(err, errUnexpectedProposer) + signedWant, wantIsSigned := want.(SignedBlock) + signedHave, haveIsSigned := have.(SignedBlock) + require.Equal(wantIsSigned, haveIsSigned) + if !wantIsSigned { + return + } - err = builtBlock.Verify(true, ids.Empty) - require.ErrorIs(err, errMissingProposer) + require.Equal(signedWant.PChainHeight(), signedHave.PChainHeight()) + require.Equal(signedWant.Timestamp(), signedHave.Timestamp()) + require.Equal(signedWant.Proposer(), signedHave.Proposer()) } func TestBlockSizeLimit(t *testing.T) { diff --git a/vms/proposervm/block/build_test.go b/vms/proposervm/block/build_test.go index 5589a9ac95bb..2ed9510c696c 100644 --- a/vms/proposervm/block/build_test.go +++ b/vms/proposervm/block/build_test.go @@ -29,6 +29,7 @@ func TestBuild(t *testing.T) { cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) require.NoError(err) key := tlsCert.PrivateKey.(crypto.Signer) + nodeID := ids.NodeIDFromCert(cert) builtBlock, err := Build( parentID, @@ -45,11 +46,7 @@ func TestBuild(t *testing.T) { require.Equal(pChainHeight, builtBlock.PChainHeight()) require.Equal(timestamp, builtBlock.Timestamp()) require.Equal(innerBlockBytes, builtBlock.Block()) - - require.NoError(builtBlock.Verify(true, chainID)) - - err = builtBlock.Verify(false, chainID) - require.ErrorIs(err, errUnexpectedProposer) + require.Equal(nodeID, builtBlock.Proposer()) } func TestBuildUnsigned(t *testing.T) { @@ -68,11 +65,6 @@ func TestBuildUnsigned(t *testing.T) { require.Equal(timestamp, builtBlock.Timestamp()) require.Equal(innerBlockBytes, builtBlock.Block()) require.Equal(ids.EmptyNodeID, builtBlock.Proposer()) - - require.NoError(builtBlock.Verify(false, ids.Empty)) - - err = builtBlock.Verify(true, ids.Empty) - require.ErrorIs(err, errMissingProposer) } func TestBuildHeader(t *testing.T) { diff --git a/vms/proposervm/block/option.go b/vms/proposervm/block/option.go index c80651b621fc..115b6d0b9f99 100644 --- a/vms/proposervm/block/option.go +++ b/vms/proposervm/block/option.go @@ -37,3 +37,7 @@ func (b *option) initialize(bytes []byte) error { b.bytes = bytes return nil } + +func (*option) verify(ids.ID) error { + return nil +} diff --git a/vms/proposervm/block/option_test.go b/vms/proposervm/block/option_test.go deleted file mode 100644 index d5af9c100079..000000000000 --- a/vms/proposervm/block/option_test.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package block - -import "github.com/stretchr/testify/require" - -func equalOption(require *require.Assertions, want, have Block) { - require.Equal(want.ID(), have.ID()) - require.Equal(want.ParentID(), have.ParentID()) - require.Equal(want.Block(), have.Block()) - require.Equal(want.Bytes(), have.Bytes()) -} diff --git a/vms/proposervm/block/parse.go b/vms/proposervm/block/parse.go index cf275134d888..f6bc63877166 100644 --- a/vms/proposervm/block/parse.go +++ b/vms/proposervm/block/parse.go @@ -3,9 +3,25 @@ package block -import "fmt" +import ( + "fmt" -func Parse(bytes []byte) (Block, error) { + "github.com/ava-labs/avalanchego/ids" +) + +// Parse a block and verify that the signature attached to the block is valid +// for the certificate provided in the block. +func Parse(bytes []byte, chainID ids.ID) (Block, error) { + block, err := ParseWithoutVerification(bytes) + if err != nil { + return nil, err + } + return block, block.verify(chainID) +} + +// ParseWithoutVerification parses a block without verifying that the signature +// on the block is correct. +func ParseWithoutVerification(bytes []byte) (Block, error) { var block Block parsedVersion, err := Codec.Unmarshal(bytes, &block) if err != nil { diff --git a/vms/proposervm/block/parse_test.go b/vms/proposervm/block/parse_test.go index e894c1e7a058..ce1d5d97cbb8 100644 --- a/vms/proposervm/block/parse_test.go +++ b/vms/proposervm/block/parse_test.go @@ -17,8 +17,6 @@ import ( ) func TestParse(t *testing.T) { - require := require.New(t) - parentID := ids.ID{1} timestamp := time.Unix(123, 0) pChainHeight := uint64(2) @@ -26,13 +24,13 @@ func TestParse(t *testing.T) { chainID := ids.ID{4} tlsCert, err := staking.NewTLSCert() - require.NoError(err) + require.NoError(t, err) cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) - require.NoError(err) + require.NoError(t, err) key := tlsCert.PrivateKey.(crypto.Signer) - builtBlock, err := Build( + signedBlock, err := Build( parentID, timestamp, pChainHeight, @@ -41,27 +39,106 @@ func TestParse(t *testing.T) { chainID, key, ) - require.NoError(err) - - builtBlockBytes := builtBlock.Bytes() - parsedBlockIntf, err := Parse(builtBlockBytes) - require.NoError(err) - - parsedBlock, ok := parsedBlockIntf.(SignedBlock) - require.True(ok) - - equal(require, chainID, builtBlock, parsedBlock) + require.NoError(t, err) + + unsignedBlock, err := BuildUnsigned(parentID, timestamp, pChainHeight, innerBlockBytes) + require.NoError(t, err) + + signedWithoutCertBlockIntf, err := BuildUnsigned(parentID, timestamp, pChainHeight, innerBlockBytes) + require.NoError(t, err) + signedWithoutCertBlock := signedWithoutCertBlockIntf.(*statelessBlock) + signedWithoutCertBlock.Signature = []byte{5} + + signedWithoutCertBlock.bytes, err = Codec.Marshal(CodecVersion, &signedWithoutCertBlockIntf) + require.NoError(t, err) + + optionBlock, err := BuildOption(parentID, innerBlockBytes) + require.NoError(t, err) + + tests := []struct { + name string + block Block + chainID ids.ID + expectedErr error + }{ + { + name: "correct chainID", + block: signedBlock, + chainID: chainID, + expectedErr: nil, + }, + { + name: "invalid chainID", + block: signedBlock, + chainID: ids.ID{5}, + expectedErr: staking.ErrECDSAVerificationFailure, + }, + { + name: "unsigned block", + block: unsignedBlock, + chainID: chainID, + expectedErr: nil, + }, + { + name: "invalid signature", + block: signedWithoutCertBlockIntf, + chainID: chainID, + expectedErr: errUnexpectedSignature, + }, + { + name: "option block", + block: optionBlock, + chainID: chainID, + expectedErr: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + blockBytes := test.block.Bytes() + parsedBlockWithoutVerification, err := ParseWithoutVerification(blockBytes) + require.NoError(err) + equal(require, test.block, parsedBlockWithoutVerification) + + parsedBlock, err := Parse(blockBytes, test.chainID) + require.ErrorIs(err, test.expectedErr) + if test.expectedErr == nil { + equal(require, test.block, parsedBlock) + } + }) + } } -func TestParseDuplicateExtension(t *testing.T) { - require := require.New(t) - - blockHex := "0000000000000100000000000000000000000000000000000000000000000000000000000000000000000000007b0000000000000002000004bd308204b9308202a1a003020102020100300d06092a864886f70d01010b050030003020170d3939313233313030303030305a180f32313232303830333233323835335a300030820222300d06092a864886f70d01010105000382020f003082020a0282020100c2b2de1c16924d9b9254a0d5b80a4bc5f9beaa4f4f40a0e4efb69eb9b55d7d37f8c82328c237d7c5b451f5427b487284fa3f365f9caa53c7fcfef8d7a461d743bd7d88129f2da62b877ebe9d6feabf1bd12923e6c12321382c782fc3bb6b6cb4986a937a1edc3814f4e621e1a62053deea8c7649e43edd97ab6b56315b00d9ab5026bb9c31fb042dc574ba83c54e720e0120fcba2e8a66b77839be3ece0d4a6383ef3f76aac952b49a15b65e18674cd1340c32cecbcbaf80ae45be001366cb56836575fb0ab51ea44bf7278817e99b6b180fdd110a49831a132968489822c56692161bbd372cf89d9b8ee5a734cff15303b3a960ee78d79e76662a701941d9ec084429f26707f767e9b1d43241c0e4f96655d95c1f4f4aa00add78eff6bf0a6982766a035bf0b465786632c5bb240788ca0fdf032d8815899353ea4bec5848fd30118711e5b356bde8a0da074cc25709623225e734ff5bd0cf65c40d9fd8fccf746d8f8f35145bcebcf378d2b086e57d78b11e84f47fa467c4d037f92bff6dd4e934e0189b58193f24c4222ffb72b5c06361cf68ca64345bc3e230cc0f40063ad5f45b1659c643662996328c2eeddcd760d6f7c9cbae081ccc065844f7ea78c858564a408979764de882793706acc67d88092790dff567ed914b03355330932616a0f26f994b963791f0b1dbd8df979db86d1ea490700a3120293c3c2b10bef10203010001a33c303a300e0603551d0f0101ff0404030204b030130603551d25040c300a06082b0601050507030230130603551d25040c300a06082b06010505070302300d06092a864886f70d01010b05000382020100a21a0d73ec9ef4eb39f810557ac70b0b775772b8bae5f42c98565bc50b5b2c57317aa9cb1da12f55d0aac7bb36a00cd4fd0d7384c4efa284b53520c5a3c4b8a65240b393eeab02c802ea146c0728c3481c9e8d3aaad9d4dd7607103dcfaa96da83460adbe18174ed5b71bde7b0a93d4fb52234a9ff54e3fd25c5b74790dfb090f2e59dc5907357f510cc3a0b70ccdb87aee214def794b316224f318b471ffa13b66e44b467670e881cb1628c99c048a503376d9b6d7b8eef2e7be47ff7d5c1d56221f4cf7fa2519b594cb5917815c64dc75d8d281bcc99b5a12899b08f2ca0f189857b64a1afc5963337f3dd6e79390e85221569f6dbbb13aadce06a3dfb5032f0cc454809627872cd7cd0cea5eba187723f07652c8abc3fc42bd62136fc66287f2cc19a7cb416923ad1862d7f820b55cacb65e43731cb6df780e2651e457a3438456aeeeb278ad9c0ad2e760f6c1cbe276eeb621c8a4e609b5f2d902beb3212e3e45df99497021ff536d0b56390c5d785a8bf7909f6b61bdc705d7d92ae22f58e7b075f164a0450d82d8286bf449072751636ab5185f59f518b845a75d112d6f7b65223479202cff67635e2ad88106bc8a0cc9352d87c5b182ac19a4680a958d814a093acf46730f87da0df6926291d02590f215041b44a0a1a32eeb3a52cddabc3d256689bace18a8d85e644cf9137cce3718f7caac1cb16ae06e874f4c701000000010300000200b8e3a4d9a4394bac714cb597f5ba1a81865185e35c782d0317e7abc0b52d49ff8e10f787bedf86f08148e3dbd2d2d478caa2a2893d31db7d5ee51339883fe84d3004440f16cb3797a7fab0f627d3ebd79217e995488e785cd6bb7b96b9d306f8109daa9cfc4162f9839f60fb965bcb3b56a5fa787549c153a4c80027398f73a617b90b7f24f437b140cd3ac832c0b75ec98b9423b275782988a9fd426937b8f82fbb0e88a622934643fb6335c1a080a4d13125544b04585d5f5295be7cd2c8be364246ea3d5df3e837b39a85074575a1fa2f4799050460110bdfb20795c8a9172a20f61b95e1c5c43eccd0c2c155b67385366142c63409cb3fb488e7aba6c8930f7f151abf1c24a54bd21c3f7a06856ea9db35beddecb30d2c61f533a3d0590bdbb438c6f2a2286dfc3c71b383354f0abad72771c2cc3687b50c2298783e53857cf26058ed78d0c1cf53786eb8d006a058ee3c85a7b2b836b5d03ef782709ce8f2725548e557b3de45a395a669a15f1d910e97015d22ac70020cab7e2531e8b1f739b023b49e742203e9e19a7fe0053826a9a2fe2e118d3b83498c2cb308573202ad41aa4a390aee4b6b5dd2164e5c5cd1b5f68b7d5632cf7dbb9a9139663c9aac53a74b2c6fc73cad80e228a186ba027f6f32f0182d62503e04fcced385f2e7d2e11c00940622ebd533b4d144689082f9777e5b16c36f9af9066e0ad6564d43" - blockBytes, err := hex.DecodeString(blockHex) - require.NoError(err) - - _, err = Parse(blockBytes) - require.NoError(err) +func TestParseBytes(t *testing.T) { + chainID := ids.ID{4} + tests := []struct { + name string + hex string + expectedErr error + }{ + { + name: "duplicate extensions in certificate", + hex: "0000000000000100000000000000000000000000000000000000000000000000000000000000000000000000007b0000000000000002000004bd308204b9308202a1a003020102020100300d06092a864886f70d01010b050030003020170d3939313233313030303030305a180f32313232303830333233323835335a300030820222300d06092a864886f70d01010105000382020f003082020a0282020100c2b2de1c16924d9b9254a0d5b80a4bc5f9beaa4f4f40a0e4efb69eb9b55d7d37f8c82328c237d7c5b451f5427b487284fa3f365f9caa53c7fcfef8d7a461d743bd7d88129f2da62b877ebe9d6feabf1bd12923e6c12321382c782fc3bb6b6cb4986a937a1edc3814f4e621e1a62053deea8c7649e43edd97ab6b56315b00d9ab5026bb9c31fb042dc574ba83c54e720e0120fcba2e8a66b77839be3ece0d4a6383ef3f76aac952b49a15b65e18674cd1340c32cecbcbaf80ae45be001366cb56836575fb0ab51ea44bf7278817e99b6b180fdd110a49831a132968489822c56692161bbd372cf89d9b8ee5a734cff15303b3a960ee78d79e76662a701941d9ec084429f26707f767e9b1d43241c0e4f96655d95c1f4f4aa00add78eff6bf0a6982766a035bf0b465786632c5bb240788ca0fdf032d8815899353ea4bec5848fd30118711e5b356bde8a0da074cc25709623225e734ff5bd0cf65c40d9fd8fccf746d8f8f35145bcebcf378d2b086e57d78b11e84f47fa467c4d037f92bff6dd4e934e0189b58193f24c4222ffb72b5c06361cf68ca64345bc3e230cc0f40063ad5f45b1659c643662996328c2eeddcd760d6f7c9cbae081ccc065844f7ea78c858564a408979764de882793706acc67d88092790dff567ed914b03355330932616a0f26f994b963791f0b1dbd8df979db86d1ea490700a3120293c3c2b10bef10203010001a33c303a300e0603551d0f0101ff0404030204b030130603551d25040c300a06082b0601050507030230130603551d25040c300a06082b06010505070302300d06092a864886f70d01010b05000382020100a21a0d73ec9ef4eb39f810557ac70b0b775772b8bae5f42c98565bc50b5b2c57317aa9cb1da12f55d0aac7bb36a00cd4fd0d7384c4efa284b53520c5a3c4b8a65240b393eeab02c802ea146c0728c3481c9e8d3aaad9d4dd7607103dcfaa96da83460adbe18174ed5b71bde7b0a93d4fb52234a9ff54e3fd25c5b74790dfb090f2e59dc5907357f510cc3a0b70ccdb87aee214def794b316224f318b471ffa13b66e44b467670e881cb1628c99c048a503376d9b6d7b8eef2e7be47ff7d5c1d56221f4cf7fa2519b594cb5917815c64dc75d8d281bcc99b5a12899b08f2ca0f189857b64a1afc5963337f3dd6e79390e85221569f6dbbb13aadce06a3dfb5032f0cc454809627872cd7cd0cea5eba187723f07652c8abc3fc42bd62136fc66287f2cc19a7cb416923ad1862d7f820b55cacb65e43731cb6df780e2651e457a3438456aeeeb278ad9c0ad2e760f6c1cbe276eeb621c8a4e609b5f2d902beb3212e3e45df99497021ff536d0b56390c5d785a8bf7909f6b61bdc705d7d92ae22f58e7b075f164a0450d82d8286bf449072751636ab5185f59f518b845a75d112d6f7b65223479202cff67635e2ad88106bc8a0cc9352d87c5b182ac19a4680a958d814a093acf46730f87da0df6926291d02590f215041b44a0a1a32eeb3a52cddabc3d256689bace18a8d85e644cf9137cce3718f7caac1cb16ae06e874f4c701000000010300000200b8e3a4d9a4394bac714cb597f5ba1a81865185e35c782d0317e7abc0b52d49ff8e10f787bedf86f08148e3dbd2d2d478caa2a2893d31db7d5ee51339883fe84d3004440f16cb3797a7fab0f627d3ebd79217e995488e785cd6bb7b96b9d306f8109daa9cfc4162f9839f60fb965bcb3b56a5fa787549c153a4c80027398f73a617b90b7f24f437b140cd3ac832c0b75ec98b9423b275782988a9fd426937b8f82fbb0e88a622934643fb6335c1a080a4d13125544b04585d5f5295be7cd2c8be364246ea3d5df3e837b39a85074575a1fa2f4799050460110bdfb20795c8a9172a20f61b95e1c5c43eccd0c2c155b67385366142c63409cb3fb488e7aba6c8930f7f151abf1c24a54bd21c3f7a06856ea9db35beddecb30d2c61f533a3d0590bdbb438c6f2a2286dfc3c71b383354f0abad72771c2cc3687b50c2298783e53857cf26058ed78d0c1cf53786eb8d006a058ee3c85a7b2b836b5d03ef782709ce8f2725548e557b3de45a395a669a15f1d910e97015d22ac70020cab7e2531e8b1f739b023b49e742203e9e19a7fe0053826a9a2fe2e118d3b83498c2cb308573202ad41aa4a390aee4b6b5dd2164e5c5cd1b5f68b7d5632cf7dbb9a9139663c9aac53a74b2c6fc73cad80e228a186ba027f6f32f0182d62503e04fcced385f2e7d2e11c00940622ebd533b4d144689082f9777e5b16c36f9af9066e0ad6564d43", + expectedErr: nil, + }, + { + name: "gibberish", + hex: "000102030405", + expectedErr: codec.ErrUnknownVersion, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + bytes, err := hex.DecodeString(test.hex) + require.NoError(err) + + _, err = Parse(bytes, chainID) + require.ErrorIs(err, test.expectedErr) + }) + } } func TestParseHeader(t *testing.T) { @@ -85,50 +162,3 @@ func TestParseHeader(t *testing.T) { equalHeader(require, builtHeader, parsedHeader) } - -func TestParseOption(t *testing.T) { - require := require.New(t) - - parentID := ids.ID{1} - innerBlockBytes := []byte{3} - - builtOption, err := BuildOption(parentID, innerBlockBytes) - require.NoError(err) - - builtOptionBytes := builtOption.Bytes() - - parsedOption, err := Parse(builtOptionBytes) - require.NoError(err) - - equalOption(require, builtOption, parsedOption) -} - -func TestParseUnsigned(t *testing.T) { - require := require.New(t) - - parentID := ids.ID{1} - timestamp := time.Unix(123, 0) - pChainHeight := uint64(2) - innerBlockBytes := []byte{3} - - builtBlock, err := BuildUnsigned(parentID, timestamp, pChainHeight, innerBlockBytes) - require.NoError(err) - - builtBlockBytes := builtBlock.Bytes() - parsedBlockIntf, err := Parse(builtBlockBytes) - require.NoError(err) - - parsedBlock, ok := parsedBlockIntf.(SignedBlock) - require.True(ok) - - equal(require, ids.Empty, builtBlock, parsedBlock) -} - -func TestParseGibberish(t *testing.T) { - require := require.New(t) - - bytes := []byte{0, 1, 2, 3, 4, 5} - - _, err := Parse(bytes) - require.ErrorIs(err, codec.ErrUnknownVersion) -} diff --git a/vms/proposervm/block_test.go b/vms/proposervm/block_test.go index 1d16222f0124..d8c867058f52 100644 --- a/vms/proposervm/block_test.go +++ b/vms/proposervm/block_test.go @@ -179,10 +179,12 @@ func TestPreDurangoValidatorNodeBlockBuiltDelaysTests(t *testing.T) { localTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay - time.Second) proVM.Set(localTime) - childBlk, err := proVM.BuildBlock(ctx) + childBlkIntf, err := proVM.BuildBlock(ctx) require.NoError(err) - require.IsType(&postForkBlock{}, childBlk) - require.Equal(proVM.ctx.NodeID, childBlk.(*postForkBlock).Proposer()) // signed block + require.IsType(&postForkBlock{}, childBlkIntf) + + childBlk := childBlkIntf.(*postForkBlock) + require.Equal(proVM.ctx.NodeID, childBlk.Proposer()) // signed block } { @@ -191,34 +193,41 @@ func TestPreDurangoValidatorNodeBlockBuiltDelaysTests(t *testing.T) { localTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay) proVM.Set(localTime) - childBlk, err := proVM.BuildBlock(ctx) + childBlkIntf, err := proVM.BuildBlock(ctx) require.NoError(err) - require.IsType(&postForkBlock{}, childBlk) - require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // signed block + require.IsType(&postForkBlock{}, childBlkIntf) + + childBlk := childBlkIntf.(*postForkBlock) + require.Equal(ids.EmptyNodeID, childBlk.Proposer()) // unsigned block } { - // Set local clock among MaxVerifyDelay and MaxBuildDelay from parent timestamp - // Check that child block is unsigned + // Set local clock between MaxVerifyDelay and MaxBuildDelay from parent + // timestamp. + // Check that child block is unsigned. localTime := parentBlk.Timestamp().Add((proposer.MaxVerifyDelay + proposer.MaxBuildDelay) / 2) proVM.Set(localTime) - childBlk, err := proVM.BuildBlock(ctx) + childBlkIntf, err := proVM.BuildBlock(ctx) require.NoError(err) - require.IsType(&postForkBlock{}, childBlk) - require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // unsigned so no proposer + require.IsType(&postForkBlock{}, childBlkIntf) + + childBlk := childBlkIntf.(*postForkBlock) + require.Equal(ids.EmptyNodeID, childBlk.Proposer()) // unsigned block } { - // Set local clock after MaxBuildDelay from parent timestamp - // Check that child block is unsigned + // Set local clock after MaxBuildDelay from parent timestamp. + // Check that child block is unsigned. localTime := parentBlk.Timestamp().Add(proposer.MaxBuildDelay) proVM.Set(localTime) - childBlk, err := proVM.BuildBlock(ctx) + childBlkIntf, err := proVM.BuildBlock(ctx) require.NoError(err) - require.IsType(&postForkBlock{}, childBlk) - require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // unsigned so no proposer + require.IsType(&postForkBlock{}, childBlkIntf) + + childBlk := childBlkIntf.(*postForkBlock) + require.Equal(ids.EmptyNodeID, childBlk.Proposer()) // unsigned block } } @@ -332,10 +341,12 @@ func TestPreDurangoNonValidatorNodeBlockBuiltDelaysTests(t *testing.T) { localTime := parentBlk.Timestamp().Add(proposer.MaxBuildDelay) proVM.Set(localTime) - childBlk, err := proVM.BuildBlock(ctx) + childBlkIntf, err := proVM.BuildBlock(ctx) require.NoError(err) - require.IsType(&postForkBlock{}, childBlk) - require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // unsigned so no proposer + require.IsType(&postForkBlock{}, childBlkIntf) + + childBlk := childBlkIntf.(*postForkBlock) + require.Equal(ids.EmptyNodeID, childBlk.Proposer()) // unsigned block } } diff --git a/vms/proposervm/pre_fork_block.go b/vms/proposervm/pre_fork_block.go index ffb618eed59f..737659cacc05 100644 --- a/vms/proposervm/pre_fork_block.go +++ b/vms/proposervm/pre_fork_block.go @@ -5,18 +5,24 @@ package proposervm import ( "context" + "errors" "fmt" "time" "go.uber.org/zap" "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/vms/proposervm/block" ) -var _ Block = (*preForkBlock)(nil) +var ( + _ Block = (*preForkBlock)(nil) + + errChildOfPreForkBlockHasProposer = errors.New("child of pre-fork block has proposer") +) type preForkBlock struct { snowman.Block @@ -167,8 +173,8 @@ func (b *preForkBlock) verifyPostForkChild(ctx context.Context, child *postForkB } // Verify the lack of signature on the node - if err := child.SignedBlock.Verify(false, b.vm.ctx.ChainID); err != nil { - return err + if child.SignedBlock.Proposer() != ids.EmptyNodeID { + return errChildOfPreForkBlockHasProposer } // Verify the inner block and track it as verified diff --git a/vms/proposervm/state/block_state.go b/vms/proposervm/state/block_state.go index 64a588851686..8e888332e9c2 100644 --- a/vms/proposervm/state/block_state.go +++ b/vms/proposervm/state/block_state.go @@ -109,7 +109,7 @@ func (s *blockState) GetBlock(blkID ids.ID) (block.Block, choices.Status, error) } // The key was in the database - blk, err := block.Parse(blkWrapper.Block) + blk, err := block.ParseWithoutVerification(blkWrapper.Block) if err != nil { return nil, choices.Unknown, err } diff --git a/vms/proposervm/vm.go b/vms/proposervm/vm.go index ccb07fec83f8..02af69b11dd7 100644 --- a/vms/proposervm/vm.go +++ b/vms/proposervm/vm.go @@ -513,7 +513,7 @@ func (vm *VM) setLastAcceptedMetadata(ctx context.Context) error { } func (vm *VM) parsePostForkBlock(ctx context.Context, b []byte) (PostForkBlock, error) { - statelessBlock, err := statelessblock.Parse(b) + statelessBlock, err := statelessblock.Parse(b, vm.ctx.ChainID) if err != nil { return nil, err } From 944d3db88aabe132f0c2040e0214c8305c477e28 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 23 May 2024 11:58:55 -0400 Subject: [PATCH 020/102] Update versions for v1.11.6 (#3047) Co-authored-by: Darioush Jalali --- RELEASES.md | 122 +++++++++++++++++++++++++++++++++++++ database/pebble/db.go | 37 +++++++---- go.mod | 29 +++++---- go.sum | 69 ++++++++++++--------- version/compatibility.json | 3 +- version/constants.go | 2 +- 6 files changed, 205 insertions(+), 57 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index e2bcdb7686a5..ea14d67c6df6 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,127 @@ # Release Notes +## [v1.11.6](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.6) + +This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. + +The plugin version is unchanged at `35` and is compatible with versions `v1.11.3-v1.11.5`. + +### APIs + +- Updated cache metrics: + - `*_cache_put_sum` was replaced with `*_cache_put_time` + - `*_cache_get_sum` was replaced with `*_cache_get_time` + - `*_cache_hit` and `*_cache_miss` were removed and `*_cache_get_count` added a `result` label +- Updated db metrics: + - `*_db_{method}_count` were replaced with `*_db_calls` with a `method` label + - `*_db_{method}_sum` were replaced with `*_db_duration` with a `method` label + - `*_db_{method}_size_count` were deleted + - `*_db_{method}_size_sum` were replaced with `*_db_size` with a `method` label +- Updated p2p message compression metrics: + - `avalanche_network_codec_{type}_{op}_{direction}_time_count` were replaced with `avalanche_network_codec_compressed_count` with `direction`, `op`, and `type` labels +- Updated p2p message metrics: + - `avalanche_network_{op}_{io}` were replaced with `avalanche_network_msgs` with `compressed:"false"`, `io`, and `op` labels + - `avalanche_network_{op}_{io}_bytes` were replaced with `avalanche_network_msgs_bytes` with `io` and `op` labels + - `avalanche_network_{op}_compression_saved_{io}_bytes_sum` were replaced with `avalanche_network_msgs_bytes_saved` with `io` and `op` labels + - `avalanche_network_{op}_compression_saved_{io}_bytes_count` were replaced with `avalanche_network_msgs` with `compressed:"true"`, `io`, and `op` labels + - `avalanche_network_{op}_failed` were replaced with `avalanche_network_msgs_failed_to_send` with an `op` label +- Updated p2p sdk message metrics: + - `*_p2p_{op}_count` were replaced with `*_p2p_msg_count` with an `op` label + - `*_p2p_{op}_time` were replaced with `*_p2p_msg_time` with an `op` label +- Updated consensus message queue metrics: + - `avalanche_{chainID}_handler_unprocessed_msgs_{op}` were replaced with `avalanche_{chainID}_handler_unprocessed_msgs_count` with an `op` label + - `avalanche_{chainID}_handler_async_unprocessed_msgs_{op}` were replaced with `avalanche_{chainID}_handler_unprocessed_msgs_count` with an `op` label +- Updated consensus handler metrics: + - `avalanche_{chainID}_handler_{op}_count` were replaced with `avalanche_{chainID}_handler_messages` with an `op` label + - `avalanche_{chainID}_handler_{op}_msg_handling_count` was deleted + - `avalanche_{chainID}_handler_{op}_msg_handling_sum` were replaced with `avalanche_{chainID}_handler_message_handling_time` with an `op` label + - `avalanche_{chainID}_handler_{op}_sum` were replaced with `avalanche_{chainID}_handler_locking_time` +- Updated consensus sender metrics: + - `avalanche_{chainID}_{op}_failed_benched` were replaced with `avalanche_{chainID}_failed_benched` with an `op` label +- Updated consensus latency metrics: + - `avalanche_{chainID}_lat_{op}_count` were replaced with `avalanche_{chainID}_response_messages` with an `op` label + - `avalanche_{chainID}_lat_{op}_sum` were replaced with `avalanche_{chainID}_response_message_latencies` with an `op` label +- Updated X-chain metrics: + - `avalanche_X_vm_avalanche_{tx}_txs_accepted` were replaced with `avalanche_X_vm_avalanche_txs_accepted` with a `tx` label +- Updated P-chain metrics: + - `avalanche_P_vm_{tx}_txs_accepted` were replaced with `avalanche_P_vm_txs_accepted` with a `tx` label + - `avalanche_P_vm_{blk}_blks_accepted` were replaced with `avalanche_P_vm_blks_accepted` with a `blk` label + +### Fixes + +- Fixed performance regression while executing blocks in bootstrapping +- Fixed peer connection tracking in the P-chain and C-chain to re-enable tx pull gossip +- Fixed C-chain deadlock while executing blocks in bootstrapping after aborting state sync +- Fixed negative ETA while fetching blocks after aborting state sync +- Fixed C-chain snapshot initialization after state sync +- Fixed panic when running avalanchego in environments with an incorrectly implemented monotonic clock +- Fixed memory corruption when accessing keys and values from released pebbledb iterators +- Fixed prefixdb compaction when specifying a `nil` limit + +### What's Changed + +- Consolidate record poll by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2970 +- Update metercacher to use vectors by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2979 +- Reduce p2p sdk metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2980 +- Use vectors in message queue metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2985 +- Use vectors for p2p message metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2983 +- Simplify gossip metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2984 +- Use vectors for message handler metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2987 +- Use vector in message sender by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2988 +- Simplify go version maintenance by @marun in https://github.com/ava-labs/avalanchego/pull/2977 +- Use vector for router latency metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2989 +- Use vectors for accepted tx and block metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2990 +- fix: version application error by @jujube in https://github.com/ava-labs/avalanchego/pull/2995 +- Chore: fix some typos. by @hattizai in https://github.com/ava-labs/avalanchego/pull/2993 +- Cleanup meterdb metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2991 +- Cleanup compression metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2992 +- Fix antithesis image publication by @marun in https://github.com/ava-labs/avalanchego/pull/2998 +- Remove unused `Metadata` struct by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3001 +- prefixdb: fix bug with Compact nil limit by @a1k0n in https://github.com/ava-labs/avalanchego/pull/3000 +- Update go version to 1.21.10 by @marun in https://github.com/ava-labs/avalanchego/pull/3004 +- vms/txs/mempool: unify avm and platformvm mempool implementations by @lebdron in https://github.com/ava-labs/avalanchego/pull/2994 +- Use gauges for time metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3009 +- Chore: fix typos. by @cocoyeal in https://github.com/ava-labs/avalanchego/pull/3010 +- [antithesis] Refactor existing job to support xsvm test setup by @marun in https://github.com/ava-labs/avalanchego/pull/2976 +- chore: fix some function names by @cartnavoy in https://github.com/ava-labs/avalanchego/pull/3015 +- Mark nodes as connected to the P-chain networking stack by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2981 +- [antithesis] Ensure images with a prefix are pushed by @marun in https://github.com/ava-labs/avalanchego/pull/3016 +- boostrapper: compact blocks before iterating them by @a1k0n in https://github.com/ava-labs/avalanchego/pull/2997 +- Remove pre-Durango networking checks by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3018 +- Repackaged upgrades times into upgrade package by @abi87 in https://github.com/ava-labs/avalanchego/pull/3019 +- Standardize peer logs by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3017 +- Fix pebbledb memory corruption by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3020 +- [vms/avm] fix linter error in benchmark : Use of weak random number generator by @tsachiherman in https://github.com/ava-labs/avalanchego/pull/3023 +- Simplify sampler interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3026 +- [build] Update linter version by @tsachiherman in https://github.com/ava-labs/avalanchego/pull/3024 +- fix broken link. by @cocoyeal in https://github.com/ava-labs/avalanchego/pull/3028 +- `gossipping` -> `gossiping` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3033 +- [tmpnet] Ensure tmpnet compatibility with windows by @marun in https://github.com/ava-labs/avalanchego/pull/3002 +- Fix negative ETA caused by rollback in vm.SetState by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3036 +- [tmpnet] Enable single node networks by @marun in https://github.com/ava-labs/avalanchego/pull/3003 +- P-chain - introducing fees calculators by @abi87 in https://github.com/ava-labs/avalanchego/pull/2698 +- Change default staking key from RSA 4096 to secp256r1 by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3025 +- Fix ACP links by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3037 +- Prevent unnecessary bandwidth from activated ACPs by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3031 +- [antithesis] Add test setup for xsvm by @marun in https://github.com/ava-labs/avalanchego/pull/2982 +- [antithesis] Ensure node image is pushed by @marun in https://github.com/ava-labs/avalanchego/pull/3042 +- Cleanup fee config passing by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3043 +- Fix typo fix by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3044 +- Grab iterator at previously executed height by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3045 +- Verify signatures during Parse by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3046 + +### New Contributors + +- @jujube made their first contribution in https://github.com/ava-labs/avalanchego/pull/2995 +- @hattizai made their first contribution in https://github.com/ava-labs/avalanchego/pull/2993 +- @a1k0n made their first contribution in https://github.com/ava-labs/avalanchego/pull/3000 +- @lebdron made their first contribution in https://github.com/ava-labs/avalanchego/pull/2994 +- @cocoyeal made their first contribution in https://github.com/ava-labs/avalanchego/pull/3010 +- @cartnavoy made their first contribution in https://github.com/ava-labs/avalanchego/pull/3015 +- @tsachiherman made their first contribution in https://github.com/ava-labs/avalanchego/pull/3023 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.11.5...v1.11.6 + ## [v1.11.5](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.5) This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. diff --git a/database/pebble/db.go b/database/pebble/db.go index 8e99e0690b64..0acb10d12c5c 100644 --- a/database/pebble/db.go +++ b/database/pebble/db.go @@ -26,6 +26,8 @@ const ( // pebbleByteOverHead is the number of bytes of constant overhead that // should be added to a batch size per operation. pebbleByteOverHead = 8 + + defaultCacheSize = 512 * units.MiB ) var ( @@ -33,8 +35,7 @@ var ( errInvalidOperation = errors.New("invalid operation") - defaultCacheSize = 512 * units.MiB - DefaultConfig = Config{ + DefaultConfig = Config{ CacheSize: defaultCacheSize, BytesPerSync: 512 * units.KiB, WALBytesPerSync: 0, // Default to no background syncing. @@ -53,13 +54,13 @@ type Database struct { } type Config struct { - CacheSize int `json:"cacheSize"` - BytesPerSync int `json:"bytesPerSync"` - WALBytesPerSync int `json:"walBytesPerSync"` // 0 means no background syncing - MemTableStopWritesThreshold int `json:"memTableStopWritesThreshold"` - MemTableSize int `json:"memTableSize"` - MaxOpenFiles int `json:"maxOpenFiles"` - MaxConcurrentCompactions int `json:"maxConcurrentCompactions"` + CacheSize int64 `json:"cacheSize"` + BytesPerSync int `json:"bytesPerSync"` + WALBytesPerSync int `json:"walBytesPerSync"` // 0 means no background syncing + MemTableStopWritesThreshold int `json:"memTableStopWritesThreshold"` + MemTableSize uint64 `json:"memTableSize"` + MaxOpenFiles int `json:"maxOpenFiles"` + MaxConcurrentCompactions int `json:"maxConcurrentCompactions"` } // TODO: Add metrics @@ -72,7 +73,7 @@ func New(file string, configBytes []byte, log logging.Logger, _ string, _ promet } opts := &pebble.Options{ - Cache: pebble.NewCache(int64(cfg.CacheSize)), + Cache: pebble.NewCache(cfg.CacheSize), BytesPerSync: cfg.BytesPerSync, Comparer: pebble.DefaultComparer, WALBytesPerSync: cfg.WALBytesPerSync, @@ -193,7 +194,10 @@ func (db *Database) Compact(start []byte, end []byte) error { // keys but pebble treats a nil [limit] as a key before all keys in // Compact. Use the greatest key in the database as the [limit] to get // the desired behavior. - it := db.pebbleDB.NewIter(&pebble.IterOptions{}) + it, err := db.pebbleDB.NewIter(&pebble.IterOptions{}) + if err != nil { + return updateError(err) + } if !it.Last() { // The database is empty. @@ -238,9 +242,18 @@ func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database } } + it, err := db.pebbleDB.NewIter(keyRange(start, prefix)) + if err != nil { + return &iter{ + db: db, + closed: true, + err: updateError(err), + } + } + iter := &iter{ db: db, - iter: db.pebbleDB.NewIter(keyRange(start, prefix)), + iter: it, } db.openIterators.Add(iter) return iter diff --git a/go.mod b/go.mod index 57c942972d0d..3e4559e2b297 100644 --- a/go.mod +++ b/go.mod @@ -9,13 +9,13 @@ go 1.21.10 require ( github.com/DataDog/zstd v1.5.2 github.com/NYTimes/gziphandler v1.1.1 - github.com/ava-labs/coreth v0.13.4-0.20240506124912-82b6c4e91557 + github.com/ava-labs/coreth v0.13.4-rc.0 github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 github.com/btcsuite/btcd/btcutil v1.1.3 - github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 + github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 github.com/compose-spec/compose-go v1.20.2 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 - github.com/ethereum/go-ethereum v1.13.2 + github.com/ethereum/go-ethereum v1.13.8 github.com/google/btree v1.1.2 github.com/google/renameio/v2 v2.0.0 github.com/google/uuid v1.6.0 @@ -72,19 +72,21 @@ require ( require ( github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e // indirect github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec // indirect - github.com/VictoriaMetrics/fastcache v1.10.0 // indirect + github.com/VictoriaMetrics/fastcache v1.12.1 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.7.0 // indirect + github.com/bits-and-blooms/bitset v1.10.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.12.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect + github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/distribution/reference v0.5.0 // indirect @@ -92,17 +94,17 @@ require ( github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect - github.com/ethereum/c-kzg-4844 v0.3.1 // indirect + github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect github.com/frankban/quicktest v1.14.4 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect + github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect - github.com/go-stack/stack v1.8.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect @@ -115,15 +117,15 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect - github.com/holiman/uint256 v1.2.3 // indirect + github.com/holiman/uint256 v1.2.4 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/klauspost/compress v1.15.15 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-runewidth v0.0.13 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect @@ -134,8 +136,9 @@ require ( github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.39.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/rivo/uniseg v0.2.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sanity-io/litter v1.5.1 // indirect diff --git a/go.sum b/go.sum index 735aded83e8d..88ec260be734 100644 --- a/go.sum +++ b/go.sum @@ -53,22 +53,22 @@ github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKz github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= -github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY= -github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= +github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= +github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/coreth v0.13.4-0.20240506124912-82b6c4e91557 h1:92JWd4u2pqpO551gXUIZ/qDZu3l7vn8jIxX2qRyyFwM= -github.com/ava-labs/coreth v0.13.4-0.20240506124912-82b6c4e91557/go.mod h1:yMIxezDyB/5moKt8LlATlfwR/Z5cmipY3gUQ1SqHvQ0= +github.com/ava-labs/coreth v0.13.4-rc.0 h1:UU7SOlLVeviT59Y+FyDyrdt0Wvl+SOj0EyET8ZtH1ZY= +github.com/ava-labs/coreth v0.13.4-rc.0/go.mod h1:mP1QRzaQKq+y+bqyx3xMR3/K/+TpjHbPJi+revI6Y38= github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 h1:mg9Uw6oZFJKytJxgxnl3uxZOs/SB8CVHg6Io4Tf99Zc= github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34/go.mod h1:pJxaT9bUgeRNVmNRgtCHb7sFDIRKy7CzTQVi8gGNT6g= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= -github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= +github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA= @@ -98,7 +98,6 @@ github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -114,17 +113,19 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o= -github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w= -github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/compose-spec/compose-go v1.20.2 h1:u/yfZHn4EaHGdidrZycWpxXgFffjYULlTbRfJ51ykjQ= github.com/compose-spec/compose-go v1.20.2/go.mod h1:+MdqXV4RA7wdFsahh/Kb8U0pAJqkg7mr4PM9tFKU8RM= @@ -138,8 +139,10 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= -github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= +github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= +github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -180,10 +183,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg= -github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/ethereum/go-ethereum v1.13.2 h1:g9mCpfPWqCA1OL4e6C98PeVttb0HadfBRuKTGvMnOvw= -github.com/ethereum/go-ethereum v1.13.2/go.mod h1:gkQ5Ygi64ZBh9M/4iXY1R8WqoNCx1Ey0CkYn2BD4/fw= +github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= +github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/go-ethereum v1.13.8 h1:1od+thJel3tM52ZUNQwvpYOeRHlbkVFZ5S8fhi0Lgsg= +github.com/ethereum/go-ethereum v1.13.8/go.mod h1:sc48XYQxCzH3fG9BcrXCOOgQk2JfZzNAmIKnceogzsA= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= @@ -198,6 +201,8 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= @@ -220,8 +225,6 @@ github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= -github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= -github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= @@ -289,6 +292,8 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -340,8 +345,8 @@ github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZ github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o= -github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= +github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= +github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= @@ -412,10 +417,12 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= @@ -494,10 +501,12 @@ github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQg github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= @@ -819,13 +828,13 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= @@ -1048,8 +1057,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= -gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/version/compatibility.json b/version/compatibility.json index ebfe0020db8d..af88db003657 100644 --- a/version/compatibility.json +++ b/version/compatibility.json @@ -2,7 +2,8 @@ "35": [ "v1.11.3", "v1.11.4", - "v1.11.5" + "v1.11.5", + "v1.11.6" ], "34": [ "v1.11.2" diff --git a/version/constants.go b/version/constants.go index ac684cba63a5..159f83a34ac2 100644 --- a/version/constants.go +++ b/version/constants.go @@ -26,7 +26,7 @@ var ( Current = &Semantic{ Major: 1, Minor: 11, - Patch: 5, + Patch: 6, } CurrentApp = &Application{ Name: Client, From 54c4b5384d0c8287975e1ee9df3a7aa8d03a9b41 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 24 May 2024 11:56:20 -0400 Subject: [PATCH 021/102] Expose canonical warp formatting function (#3049) --- vms/platformvm/warp/validator.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/vms/platformvm/warp/validator.go b/vms/platformvm/warp/validator.go index a74c140e9ecd..0d33a9f12f26 100644 --- a/vms/platformvm/warp/validator.go +++ b/vms/platformvm/warp/validator.go @@ -58,9 +58,17 @@ func GetCanonicalValidatorSet( return nil, 0, err } + // Convert the validator set into the canonical ordering. + return FlattenValidatorSet(vdrSet) +} + +// FlattenValidatorSet converts the provided [vdrSet] into a canonical ordering. +// Also returns the total weight of the validator set. +func FlattenValidatorSet(vdrSet map[ids.NodeID]*validators.GetValidatorOutput) ([]*Validator, uint64, error) { var ( vdrs = make(map[string]*Validator, len(vdrSet)) totalWeight uint64 + err error ) for _, vdr := range vdrSet { totalWeight, err = math.Add64(totalWeight, vdr.Weight) From 15ac8cd3473f56d8e39b5b08564b63efe9b32c8a Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 24 May 2024 15:39:19 -0400 Subject: [PATCH 022/102] Remove subnet filter from Peer.TrackedSubnets() (#2975) --- network/metrics.go | 29 ++-- network/network.go | 14 +- network/peer/config.go | 15 +- network/peer/peer.go | 37 +++-- network/peer/peer_test.go | 340 ++++++++++++++++++-------------------- 5 files changed, 225 insertions(+), 210 deletions(-) diff --git a/network/metrics.go b/network/metrics.go index e2a3a363b403..c6b47a1360ab 100644 --- a/network/metrics.go +++ b/network/metrics.go @@ -12,11 +12,13 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" ) type metrics struct { + // trackedSubnets does not include the primary network ID + trackedSubnets set.Set[ids.ID] + numTracked prometheus.Gauge numPeers prometheus.Gauge numSubnetPeers *prometheus.GaugeVec @@ -41,8 +43,13 @@ type metrics struct { peerConnectedStartTimesSum float64 } -func newMetrics(namespace string, registerer prometheus.Registerer, initialSubnetIDs set.Set[ids.ID]) (*metrics, error) { +func newMetrics( + namespace string, + registerer prometheus.Registerer, + trackedSubnets set.Set[ids.ID], +) (*metrics, error) { m := &metrics{ + trackedSubnets: trackedSubnets, numPeers: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "peers", @@ -169,11 +176,7 @@ func newMetrics(namespace string, registerer prometheus.Registerer, initialSubne ) // init subnet tracker metrics with tracked subnets - for subnetID := range initialSubnetIDs { - // no need to track primary network ID - if subnetID == constants.PrimaryNetworkID { - continue - } + for subnetID := range trackedSubnets { // initialize to 0 subnetIDStr := subnetID.String() m.numSubnetPeers.WithLabelValues(subnetIDStr).Set(0) @@ -189,8 +192,10 @@ func (m *metrics) markConnected(peer peer.Peer) { m.connected.Inc() trackedSubnets := peer.TrackedSubnets() - for subnetID := range trackedSubnets { - m.numSubnetPeers.WithLabelValues(subnetID.String()).Inc() + for subnetID := range m.trackedSubnets { + if trackedSubnets.Contains(subnetID) { + m.numSubnetPeers.WithLabelValues(subnetID.String()).Inc() + } } m.lock.Lock() @@ -206,8 +211,10 @@ func (m *metrics) markDisconnected(peer peer.Peer) { m.disconnected.Inc() trackedSubnets := peer.TrackedSubnets() - for subnetID := range trackedSubnets { - m.numSubnetPeers.WithLabelValues(subnetID.String()).Dec() + for subnetID := range m.trackedSubnets { + if trackedSubnets.Contains(subnetID) { + m.numSubnetPeers.WithLabelValues(subnetID.String()).Dec() + } } m.lock.Lock() diff --git a/network/network.go b/network/network.go index a143e6202e9f..9963612c0161 100644 --- a/network/network.go +++ b/network/network.go @@ -460,8 +460,12 @@ func (n *network) Connected(nodeID ids.NodeID) { peerVersion := peer.Version() n.router.Connected(nodeID, peerVersion, constants.PrimaryNetworkID) - for subnetID := range peer.TrackedSubnets() { - n.router.Connected(nodeID, peerVersion, subnetID) + + trackedSubnets := peer.TrackedSubnets() + for subnetID := range n.peerConfig.MySubnets { + if trackedSubnets.Contains(subnetID) { + n.router.Connected(nodeID, peerVersion, subnetID) + } } } @@ -694,8 +698,7 @@ func (n *network) getPeers( continue } - trackedSubnets := peer.TrackedSubnets() - if subnetID != constants.PrimaryNetworkID && !trackedSubnets.Contains(subnetID) { + if trackedSubnets := peer.TrackedSubnets(); !trackedSubnets.Contains(subnetID) { continue } @@ -731,8 +734,7 @@ func (n *network) samplePeers( numValidatorsToSample+config.NonValidators+config.Peers, func(p peer.Peer) bool { // Only return peers that are tracking [subnetID] - trackedSubnets := p.TrackedSubnets() - if subnetID != constants.PrimaryNetworkID && !trackedSubnets.Contains(subnetID) { + if trackedSubnets := p.TrackedSubnets(); !trackedSubnets.Contains(subnetID) { return false } diff --git a/network/peer/config.go b/network/peer/config.go index 3eb8319216d7..8aa12820cc41 100644 --- a/network/peer/config.go +++ b/network/peer/config.go @@ -33,13 +33,14 @@ type Config struct { Network Network Router router.InboundHandler VersionCompatibility version.Compatibility - MySubnets set.Set[ids.ID] - Beacons validators.Manager - Validators validators.Manager - NetworkID uint32 - PingFrequency time.Duration - PongTimeout time.Duration - MaxClockDifference time.Duration + // MySubnets does not include the primary network ID + MySubnets set.Set[ids.ID] + Beacons validators.Manager + Validators validators.Manager + NetworkID uint32 + PingFrequency time.Duration + PongTimeout time.Duration + MaxClockDifference time.Duration SupportedACPs []uint32 ObjectedACPs []uint32 diff --git a/network/peer/peer.go b/network/peer/peer.go index 4352e21a7af5..a87bca708544 100644 --- a/network/peer/peer.go +++ b/network/peer/peer.go @@ -35,6 +35,9 @@ const ( // maxBloomSaltLen restricts the allowed size of the bloom salt to prevent // excessively expensive bloom filter contains checks. maxBloomSaltLen = 32 + // maxNumTrackedSubnets limits how many subnets a peer can track to prevent + // excessive memory usage. + maxNumTrackedSubnets = 16 disconnectingLog = "disconnecting from peer" failedToCreateMessageLog = "failed to create message" @@ -139,8 +142,8 @@ type peer struct { // version is the claimed version the peer is running that we received in // the Handshake message. version *version.Application - // trackedSubnets is the subset of subnetIDs the peer sent us in the Handshake - // message that we are also tracking. + // trackedSubnets are the subnetIDs the peer sent us in the Handshake + // message. The primary network ID is always included. trackedSubnets set.Set[ids.ID] // options of ACPs provided in the Handshake message. supportedACPs set.Set[uint32] @@ -271,9 +274,8 @@ func (p *peer) Info() Info { publicIPStr = p.ip.IPPort.String() } - uptimes := make(map[ids.ID]json.Uint32, p.trackedSubnets.Len()) - - for subnetID := range p.trackedSubnets { + uptimes := make(map[ids.ID]json.Uint32, p.MySubnets.Len()) + for subnetID := range p.MySubnets { uptime, exist := p.ObservedUptime(subnetID) if !exist { continue @@ -851,8 +853,12 @@ func (p *peer) getUptimes() (uint32, []*p2p.SubnetUptime) { primaryUptime = 0 } - subnetUptimes := make([]*p2p.SubnetUptime, 0, p.trackedSubnets.Len()) - for subnetID := range p.trackedSubnets { + subnetUptimes := make([]*p2p.SubnetUptime, 0, p.MySubnets.Len()) + for subnetID := range p.MySubnets { + if !p.trackedSubnets.Contains(subnetID) { + continue + } + subnetUptime, err := p.UptimeCalculator.CalculateUptimePercent(p.id, subnetID) if err != nil { p.Log.Debug(failedToGetUptimeLog, @@ -951,6 +957,18 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { } // handle subnet IDs + if numTrackedSubnets := len(msg.TrackedSubnets); numTrackedSubnets > maxNumTrackedSubnets { + p.Log.Debug(malformedMessageLog, + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "trackedSubnets"), + zap.Int("numTrackedSubnets", numTrackedSubnets), + ) + p.StartClose() + return + } + + p.trackedSubnets.Add(constants.PrimaryNetworkID) for _, subnetIDBytes := range msg.TrackedSubnets { subnetID, err := ids.ToID(subnetIDBytes) if err != nil { @@ -963,10 +981,7 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { p.StartClose() return } - // add only if we also track this subnet - if p.MySubnets.Contains(subnetID) { - p.trackedSubnets.Add(subnetID) - } + p.trackedSubnets.Add(subnetID) } for _, acp := range msg.SupportedAcps { diff --git a/network/peer/peer_test.go b/network/peer/peer_test.go index 30dc817c517f..ffd5915aa2ce 100644 --- a/network/peer/peer_test.go +++ b/network/peer/peer_test.go @@ -39,7 +39,6 @@ type testPeer struct { type rawTestPeer struct { config *Config - conn net.Conn cert *staking.Certificate nodeID ids.NodeID inboundMsgChan <-chan message.InboundMessage @@ -60,27 +59,10 @@ func newMessageCreator(t *testing.T) message.Creator { return mc } -func makeRawTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*rawTestPeer, *rawTestPeer) { +func newConfig(t *testing.T) Config { t.Helper() require := require.New(t) - conn0, conn1 := net.Pipe() - - tlsCert0, err := staking.NewTLSCert() - require.NoError(err) - cert0, err := staking.ParseCertificate(tlsCert0.Leaf.Raw) - require.NoError(err) - - tlsCert1, err := staking.NewTLSCert() - require.NoError(err) - cert1, err := staking.ParseCertificate(tlsCert1.Leaf.Raw) - require.NoError(err) - - nodeID0 := ids.NodeIDFromCert(cert0) - nodeID1 := ids.NodeIDFromCert(cert1) - - mc := newMessageCreator(t) - metrics, err := NewMetrics( "", prometheus.NewRegistry(), @@ -95,14 +77,17 @@ func makeRawTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*rawTestPee ) require.NoError(err) - sharedConfig := Config{ + return Config{ + ReadBufferSize: constants.DefaultNetworkPeerReadBufferSize, + WriteBufferSize: constants.DefaultNetworkPeerWriteBufferSize, Metrics: metrics, - MessageCreator: mc, + MessageCreator: newMessageCreator(t), Log: logging.NoLog{}, InboundMsgThrottler: throttling.NewNoInboundThrottler(), + Network: TestNetwork, + Router: nil, VersionCompatibility: version.GetCompatibility(constants.LocalID), - MySubnets: trackedSubnets, - UptimeCalculator: uptime.NoOpCalculator, + MySubnets: nil, Beacons: validators.NewManager(), Validators: validators.NewManager(), NetworkID: constants.LocalID, @@ -110,141 +95,91 @@ func makeRawTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*rawTestPee PongTimeout: constants.DefaultPingPongTimeout, MaxClockDifference: time.Minute, ResourceTracker: resourceTracker, + UptimeCalculator: uptime.NoOpCalculator, + IPSigner: nil, } - peerConfig0 := sharedConfig - peerConfig1 := sharedConfig - - ip0 := ips.NewDynamicIPPort(net.IPv6loopback, 1) - tls0 := tlsCert0.PrivateKey.(crypto.Signer) - bls0, err := bls.NewSecretKey() - require.NoError(err) +} - peerConfig0.IPSigner = NewIPSigner(ip0, tls0, bls0) +func newRawTestPeer(t *testing.T, config Config) *rawTestPeer { + t.Helper() + require := require.New(t) - peerConfig0.Network = TestNetwork - inboundMsgChan0 := make(chan message.InboundMessage) - peerConfig0.Router = router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { - inboundMsgChan0 <- msg - }) + tlsCert, err := staking.NewTLSCert() + require.NoError(err) + cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) + require.NoError(err) + nodeID := ids.NodeIDFromCert(cert) - ip1 := ips.NewDynamicIPPort(net.IPv6loopback, 2) - tls1 := tlsCert1.PrivateKey.(crypto.Signer) - bls1, err := bls.NewSecretKey() + ip := ips.NewDynamicIPPort(net.IPv6loopback, 1) + tls := tlsCert.PrivateKey.(crypto.Signer) + bls, err := bls.NewSecretKey() require.NoError(err) - peerConfig1.IPSigner = NewIPSigner(ip1, tls1, bls1) + config.IPSigner = NewIPSigner(ip, tls, bls) - peerConfig1.Network = TestNetwork - inboundMsgChan1 := make(chan message.InboundMessage) - peerConfig1.Router = router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { - inboundMsgChan1 <- msg + inboundMsgChan := make(chan message.InboundMessage) + config.Router = router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { + inboundMsgChan <- msg }) - peer0 := &rawTestPeer{ - config: &peerConfig0, - conn: conn0, - cert: cert0, - nodeID: nodeID0, - inboundMsgChan: inboundMsgChan0, - } - peer1 := &rawTestPeer{ - config: &peerConfig1, - conn: conn1, - cert: cert1, - nodeID: nodeID1, - inboundMsgChan: inboundMsgChan1, + return &rawTestPeer{ + config: &config, + cert: cert, + nodeID: nodeID, + inboundMsgChan: inboundMsgChan, } - return peer0, peer1 } -func makeTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*testPeer, *testPeer) { - rawPeer0, rawPeer1 := makeRawTestPeers(t, trackedSubnets) - - peer0 := &testPeer{ - Peer: Start( - rawPeer0.config, - rawPeer0.conn, - rawPeer1.cert, - rawPeer1.nodeID, - NewThrottledMessageQueue( - rawPeer0.config.Metrics, - rawPeer1.nodeID, - logging.NoLog{}, - throttling.NewNoOutboundThrottler(), - ), - ), - inboundMsgChan: rawPeer0.inboundMsgChan, - } - peer1 := &testPeer{ +func startTestPeer(self *rawTestPeer, peer *rawTestPeer, conn net.Conn) *testPeer { + return &testPeer{ Peer: Start( - rawPeer1.config, - rawPeer1.conn, - rawPeer0.cert, - rawPeer0.nodeID, + self.config, + conn, + peer.cert, + peer.nodeID, NewThrottledMessageQueue( - rawPeer1.config.Metrics, - rawPeer0.nodeID, + self.config.Metrics, + peer.nodeID, logging.NoLog{}, throttling.NewNoOutboundThrottler(), ), ), - inboundMsgChan: rawPeer1.inboundMsgChan, + inboundMsgChan: self.inboundMsgChan, } +} + +func startTestPeers(rawPeer0 *rawTestPeer, rawPeer1 *rawTestPeer) (*testPeer, *testPeer) { + conn0, conn1 := net.Pipe() + peer0 := startTestPeer(rawPeer0, rawPeer1, conn0) + peer1 := startTestPeer(rawPeer1, rawPeer0, conn1) return peer0, peer1 } -func makeReadyTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*testPeer, *testPeer) { +func awaitReady(t *testing.T, peers ...Peer) { t.Helper() require := require.New(t) - peer0, peer1 := makeTestPeers(t, trackedSubnets) - - require.NoError(peer0.AwaitReady(context.Background())) - require.True(peer0.Ready()) - - require.NoError(peer1.AwaitReady(context.Background())) - require.True(peer1.Ready()) - - return peer0, peer1 + for _, peer := range peers { + require.NoError(peer.AwaitReady(context.Background())) + require.True(peer.Ready()) + } } func TestReady(t *testing.T) { require := require.New(t) - rawPeer0, rawPeer1 := makeRawTestPeers(t, set.Set[ids.ID]{}) - peer0 := Start( - rawPeer0.config, - rawPeer0.conn, - rawPeer1.cert, - rawPeer1.nodeID, - NewThrottledMessageQueue( - rawPeer0.config.Metrics, - rawPeer1.nodeID, - logging.NoLog{}, - throttling.NewNoOutboundThrottler(), - ), - ) + config := newConfig(t) - require.False(peer0.Ready()) + rawPeer0 := newRawTestPeer(t, config) + rawPeer1 := newRawTestPeer(t, config) - peer1 := Start( - rawPeer1.config, - rawPeer1.conn, - rawPeer0.cert, - rawPeer0.nodeID, - NewThrottledMessageQueue( - rawPeer1.config.Metrics, - rawPeer0.nodeID, - logging.NoLog{}, - throttling.NewNoOutboundThrottler(), - ), - ) + conn0, conn1 := net.Pipe() - require.NoError(peer0.AwaitReady(context.Background())) - require.True(peer0.Ready()) + peer0 := startTestPeer(rawPeer0, rawPeer1, conn0) + require.False(peer0.Ready()) - require.NoError(peer1.AwaitReady(context.Background())) - require.True(peer1.Ready()) + peer1 := startTestPeer(rawPeer1, rawPeer0, conn1) + awaitReady(t, peer0, peer1) peer0.StartClose() require.NoError(peer0.AwaitClosed(context.Background())) @@ -254,10 +189,15 @@ func TestReady(t *testing.T) { func TestSend(t *testing.T) { require := require.New(t) - peer0, peer1 := makeReadyTestPeers(t, set.Set[ids.ID]{}) - mc := newMessageCreator(t) + sharedConfig := newConfig(t) - outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty) + rawPeer0 := newRawTestPeer(t, sharedConfig) + rawPeer1 := newRawTestPeer(t, sharedConfig) + + peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) + awaitReady(t, peer0, peer1) + + outboundGetMsg, err := sharedConfig.MessageCreator.Get(ids.Empty, 1, time.Second, ids.Empty) require.NoError(err) require.True(peer0.Send(context.Background(), outboundGetMsg)) @@ -274,9 +214,8 @@ func TestPingUptimes(t *testing.T) { trackedSubnetID := ids.GenerateTestID() untrackedSubnetID := ids.GenerateTestID() - trackedSubnets := set.Of(trackedSubnetID) - - mc := newMessageCreator(t) + sharedConfig := newConfig(t) + sharedConfig.MySubnets = set.Of(trackedSubnetID) testCases := []struct { name string @@ -287,10 +226,11 @@ func TestPingUptimes(t *testing.T) { { name: "primary network only", msg: func() message.OutboundMessage { - pingMsg, err := mc.Ping(1, nil) + pingMsg, err := sharedConfig.MessageCreator.Ping(1, nil) require.NoError(t, err) return pingMsg }(), + shouldClose: false, assertFn: func(require *require.Assertions, peer *testPeer) { uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID) require.True(ok) @@ -304,7 +244,7 @@ func TestPingUptimes(t *testing.T) { { name: "primary network and subnet", msg: func() message.OutboundMessage { - pingMsg, err := mc.Ping( + pingMsg, err := sharedConfig.MessageCreator.Ping( 1, []*p2p.SubnetUptime{ { @@ -316,6 +256,7 @@ func TestPingUptimes(t *testing.T) { require.NoError(t, err) return pingMsg }(), + shouldClose: false, assertFn: func(require *require.Assertions, peer *testPeer) { uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID) require.True(ok) @@ -329,7 +270,7 @@ func TestPingUptimes(t *testing.T) { { name: "primary network and non tracked subnet", msg: func() message.OutboundMessage { - pingMsg, err := mc.Ping( + pingMsg, err := sharedConfig.MessageCreator.Ping( 1, []*p2p.SubnetUptime{ { @@ -348,27 +289,30 @@ func TestPingUptimes(t *testing.T) { return pingMsg }(), shouldClose: true, + assertFn: nil, }, } - // Note: we reuse peers across tests because makeReadyTestPeers takes awhile - // to run. - peer0, peer1 := makeReadyTestPeers(t, trackedSubnets) - defer func() { - peer1.StartClose() - peer0.StartClose() - require.NoError(t, peer0.AwaitClosed(context.Background())) - require.NoError(t, peer1.AwaitClosed(context.Background())) - }() + // The raw peers are generated outside of the test cases to avoid generating + // many TLS keys. + rawPeer0 := newRawTestPeer(t, sharedConfig) + rawPeer1 := newRawTestPeer(t, sharedConfig) for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { require := require.New(t) + peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) + awaitReady(t, peer0, peer1) + defer func() { + peer1.StartClose() + peer0.StartClose() + require.NoError(peer0.AwaitClosed(context.Background())) + require.NoError(peer1.AwaitClosed(context.Background())) + }() + require.True(peer0.Send(context.Background(), tc.msg)) - // Note: shouldClose can only be `true` for the last test because - // we reuse peers across tests. if tc.shouldClose { require.NoError(peer1.AwaitClosed(context.Background())) return @@ -385,11 +329,85 @@ func TestPingUptimes(t *testing.T) { } } +func TestTrackedSubnets(t *testing.T) { + sharedConfig := newConfig(t) + rawPeer0 := newRawTestPeer(t, sharedConfig) + rawPeer1 := newRawTestPeer(t, sharedConfig) + + makeSubnetIDs := func(numSubnets int) []ids.ID { + subnetIDs := make([]ids.ID, numSubnets) + for i := range subnetIDs { + subnetIDs[i] = ids.GenerateTestID() + } + return subnetIDs + } + + tests := []struct { + name string + trackedSubnets []ids.ID + shouldDisconnect bool + }{ + { + name: "primary network only", + trackedSubnets: makeSubnetIDs(0), + shouldDisconnect: false, + }, + { + name: "single subnet", + trackedSubnets: makeSubnetIDs(1), + shouldDisconnect: false, + }, + { + name: "max subnets", + trackedSubnets: makeSubnetIDs(maxNumTrackedSubnets), + shouldDisconnect: false, + }, + { + name: "too many subnets", + trackedSubnets: makeSubnetIDs(maxNumTrackedSubnets + 1), + shouldDisconnect: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + rawPeer0.config.MySubnets = set.Of(test.trackedSubnets...) + peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) + if test.shouldDisconnect { + require.NoError(peer0.AwaitClosed(context.Background())) + require.NoError(peer1.AwaitClosed(context.Background())) + return + } + + defer func() { + peer1.StartClose() + peer0.StartClose() + require.NoError(peer0.AwaitClosed(context.Background())) + require.NoError(peer1.AwaitClosed(context.Background())) + }() + + awaitReady(t, peer0, peer1) + + require.Equal(set.Of(constants.PrimaryNetworkID), peer0.TrackedSubnets()) + + expectedTrackedSubnets := set.Of(test.trackedSubnets...) + expectedTrackedSubnets.Add(constants.PrimaryNetworkID) + require.Equal(expectedTrackedSubnets, peer1.TrackedSubnets()) + }) + } +} + // Test that a peer using the wrong BLS key is disconnected from. func TestInvalidBLSKeyDisconnects(t *testing.T) { require := require.New(t) - rawPeer0, rawPeer1 := makeRawTestPeers(t, nil) + sharedConfig := newConfig(t) + + rawPeer0 := newRawTestPeer(t, sharedConfig) + rawPeer1 := newRawTestPeer(t, sharedConfig) + require.NoError(rawPeer0.config.Validators.AddStaker( constants.PrimaryNetworkID, rawPeer1.nodeID, @@ -407,36 +425,8 @@ func TestInvalidBLSKeyDisconnects(t *testing.T) { ids.GenerateTestID(), 1, )) - peer0 := &testPeer{ - Peer: Start( - rawPeer0.config, - rawPeer0.conn, - rawPeer1.cert, - rawPeer1.nodeID, - NewThrottledMessageQueue( - rawPeer0.config.Metrics, - rawPeer1.nodeID, - logging.NoLog{}, - throttling.NewNoOutboundThrottler(), - ), - ), - inboundMsgChan: rawPeer0.inboundMsgChan, - } - peer1 := &testPeer{ - Peer: Start( - rawPeer1.config, - rawPeer1.conn, - rawPeer0.cert, - rawPeer0.nodeID, - NewThrottledMessageQueue( - rawPeer1.config.Metrics, - rawPeer0.nodeID, - logging.NoLog{}, - throttling.NewNoOutboundThrottler(), - ), - ), - inboundMsgChan: rawPeer1.inboundMsgChan, - } + + peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) // Because peer1 thinks that peer0 is using the wrong BLS key, they should // disconnect from each other. From cf7b7a2c8771b2a2082a96d2f8a68035bfc417cc Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 27 May 2024 13:42:20 -0400 Subject: [PATCH 023/102] Remove optional gatherer (#3052) --- api/metrics/multi_gatherer_test.go | 5 +- api/metrics/optional_gatherer.go | 61 ----------------------- api/metrics/optional_gatherer_test.go | 71 --------------------------- chains/linearizable_vm.go | 2 +- chains/manager.go | 15 ++---- go.mod | 2 +- go.sum | 4 +- snow/context.go | 2 +- snow/snowtest/snowtest.go | 2 +- vms/avm/vm.go | 2 +- vms/metervm/block_vm.go | 10 ++-- vms/metervm/vertex_vm.go | 10 ++-- vms/platformvm/vm.go | 2 +- vms/proposervm/vm.go | 13 ++--- vms/rpcchainvm/vm_client.go | 15 ++---- vms/rpcchainvm/vm_server.go | 2 +- 16 files changed, 32 insertions(+), 186 deletions(-) delete mode 100644 api/metrics/optional_gatherer.go delete mode 100644 api/metrics/optional_gatherer_test.go diff --git a/api/metrics/multi_gatherer_test.go b/api/metrics/multi_gatherer_test.go index 033e3e88b1e6..51b548d18a68 100644 --- a/api/metrics/multi_gatherer_test.go +++ b/api/metrics/multi_gatherer_test.go @@ -4,8 +4,10 @@ package metrics import ( + "errors" "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" dto "github.com/prometheus/client_model/go" @@ -25,7 +27,7 @@ func TestMultiGathererDuplicatedPrefix(t *testing.T) { require := require.New(t) g := NewMultiGatherer() - og := NewOptionalGatherer() + og := prometheus.NewRegistry() require.NoError(g.Register("", og)) @@ -40,6 +42,7 @@ func TestMultiGathererAddedError(t *testing.T) { g := NewMultiGatherer() + errTest := errors.New("non-nil error") tg := &testGatherer{ err: errTest, } diff --git a/api/metrics/optional_gatherer.go b/api/metrics/optional_gatherer.go deleted file mode 100644 index 686856efcc86..000000000000 --- a/api/metrics/optional_gatherer.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import ( - "fmt" - "sync" - - "github.com/prometheus/client_golang/prometheus" - - dto "github.com/prometheus/client_model/go" -) - -var _ OptionalGatherer = (*optionalGatherer)(nil) - -// OptionalGatherer extends the Gatherer interface by allowing the optional -// registration of a single gatherer. If no gatherer is registered, Gather will -// return no metrics and no error. If a gatherer is registered, Gather will -// return the results of calling Gather on the provided gatherer. -type OptionalGatherer interface { - prometheus.Gatherer - - // Register the provided gatherer. If a gatherer was previously registered, - // an error will be returned. - Register(gatherer prometheus.Gatherer) error -} - -type optionalGatherer struct { - lock sync.RWMutex - gatherer prometheus.Gatherer -} - -func NewOptionalGatherer() OptionalGatherer { - return &optionalGatherer{} -} - -func (g *optionalGatherer) Gather() ([]*dto.MetricFamily, error) { - g.lock.RLock() - defer g.lock.RUnlock() - - if g.gatherer == nil { - return nil, nil - } - return g.gatherer.Gather() -} - -func (g *optionalGatherer) Register(gatherer prometheus.Gatherer) error { - g.lock.Lock() - defer g.lock.Unlock() - - if g.gatherer != nil { - return fmt.Errorf("%w; existing: %#v; new: %#v", - errReregisterGatherer, - g.gatherer, - gatherer, - ) - } - g.gatherer = gatherer - return nil -} diff --git a/api/metrics/optional_gatherer_test.go b/api/metrics/optional_gatherer_test.go deleted file mode 100644 index 201750701313..000000000000 --- a/api/metrics/optional_gatherer_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/require" - - dto "github.com/prometheus/client_model/go" -) - -var errTest = errors.New("non-nil error") - -func TestOptionalGathererEmptyGather(t *testing.T) { - require := require.New(t) - - g := NewOptionalGatherer() - - mfs, err := g.Gather() - require.NoError(err) - require.Empty(mfs) -} - -func TestOptionalGathererDuplicated(t *testing.T) { - require := require.New(t) - - g := NewOptionalGatherer() - og := NewOptionalGatherer() - - require.NoError(g.Register(og)) - err := g.Register(og) - require.ErrorIs(err, errReregisterGatherer) -} - -func TestOptionalGathererAddedError(t *testing.T) { - require := require.New(t) - - g := NewOptionalGatherer() - - tg := &testGatherer{ - err: errTest, - } - - require.NoError(g.Register(tg)) - - mfs, err := g.Gather() - require.ErrorIs(err, errTest) - require.Empty(mfs) -} - -func TestMultiGathererAdded(t *testing.T) { - require := require.New(t) - - g := NewOptionalGatherer() - - tg := &testGatherer{ - mfs: []*dto.MetricFamily{{ - Name: &hello, - }}, - } - - require.NoError(g.Register(tg)) - - mfs, err := g.Gather() - require.NoError(err) - require.Len(mfs, 1) - require.Equal(&hello, mfs[0].Name) -} diff --git a/chains/linearizable_vm.go b/chains/linearizable_vm.go index 97fe9eb4d1f4..0521e418667f 100644 --- a/chains/linearizable_vm.go +++ b/chains/linearizable_vm.go @@ -29,7 +29,7 @@ type initializeOnLinearizeVM struct { vmToInitialize common.VM vmToLinearize *linearizeOnInitializeVM - registerer metrics.OptionalGatherer + registerer metrics.MultiGatherer ctx *snow.Context db database.Database genesisBytes []byte diff --git a/chains/manager.go b/chains/manager.go index 5c8d21d20387..c5b79dd470e1 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -434,7 +434,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c return nil, fmt.Errorf("error while registering DAG metrics %w", err) } - vmMetrics := metrics.NewOptionalGatherer() + vmMetrics := metrics.NewMultiGatherer() vmNamespace := metric.AppendNamespace(chainNamespace, "vm") if err := m.Metrics.Register(vmNamespace, vmMetrics); err != nil { return nil, fmt.Errorf("error while registering vm's metrics %w", err) @@ -642,17 +642,12 @@ func (m *manager) createAvalancheChain( }, ) - avalancheRegisterer := metrics.NewOptionalGatherer() - snowmanRegisterer := metrics.NewOptionalGatherer() - - registerer := metrics.NewMultiGatherer() - if err := registerer.Register("avalanche", avalancheRegisterer); err != nil { - return nil, err - } - if err := registerer.Register("", snowmanRegisterer); err != nil { + avalancheRegisterer := metrics.NewMultiGatherer() + snowmanRegisterer := metrics.NewMultiGatherer() + if err := ctx.Context.Metrics.Register("avalanche", avalancheRegisterer); err != nil { return nil, err } - if err := ctx.Context.Metrics.Register(registerer); err != nil { + if err := ctx.Context.Metrics.Register("", snowmanRegisterer); err != nil { return nil, err } diff --git a/go.mod b/go.mod index 3e4559e2b297..a1df1b0537e4 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ go 1.21.10 require ( github.com/DataDog/zstd v1.5.2 github.com/NYTimes/gziphandler v1.1.1 - github.com/ava-labs/coreth v0.13.4-rc.0 + github.com/ava-labs/coreth v0.13.5-remove-optional-gatherer.2 github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 github.com/btcsuite/btcd/btcutil v1.1.3 github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 diff --git a/go.sum b/go.sum index 88ec260be734..7b1761abed8a 100644 --- a/go.sum +++ b/go.sum @@ -60,8 +60,8 @@ github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/coreth v0.13.4-rc.0 h1:UU7SOlLVeviT59Y+FyDyrdt0Wvl+SOj0EyET8ZtH1ZY= -github.com/ava-labs/coreth v0.13.4-rc.0/go.mod h1:mP1QRzaQKq+y+bqyx3xMR3/K/+TpjHbPJi+revI6Y38= +github.com/ava-labs/coreth v0.13.5-remove-optional-gatherer.2 h1:RX9DcvgWxq42B2aiGzk77Y8w2bcB7ApO/Cdj9hA6QoE= +github.com/ava-labs/coreth v0.13.5-remove-optional-gatherer.2/go.mod h1:cm5c12xo5NiTgtbmeduv8i2nYdzgkczz9Wm3yiwwTRU= github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 h1:mg9Uw6oZFJKytJxgxnl3uxZOs/SB8CVHg6Io4Tf99Zc= github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34/go.mod h1:pJxaT9bUgeRNVmNRgtCHb7sFDIRKy7CzTQVi8gGNT6g= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= diff --git a/snow/context.go b/snow/context.go index 2cbbedb38b47..f610adca9998 100644 --- a/snow/context.go +++ b/snow/context.go @@ -46,7 +46,7 @@ type Context struct { Keystore keystore.BlockchainKeystore SharedMemory atomic.SharedMemory BCLookup ids.AliaserReader - Metrics metrics.OptionalGatherer + Metrics metrics.MultiGatherer WarpSigner warp.Signer diff --git a/snow/snowtest/snowtest.go b/snow/snowtest/snowtest.go index 9879b726955c..0ddee75707ab 100644 --- a/snow/snowtest/snowtest.go +++ b/snow/snowtest/snowtest.go @@ -90,7 +90,7 @@ func Context(tb testing.TB, chainID ids.ID) *snow.Context { Log: logging.NoLog{}, BCLookup: aliaser, - Metrics: metrics.NewOptionalGatherer(), + Metrics: metrics.NewMultiGatherer(), ValidatorState: validatorState, ChainDataDir: "", diff --git a/vms/avm/vm.go b/vms/avm/vm.go index c2dce8d27051..b8fe322ef959 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -174,7 +174,7 @@ func (vm *VM) Initialize( ) registerer := prometheus.NewRegistry() - if err := ctx.Metrics.Register(registerer); err != nil { + if err := ctx.Metrics.Register("", registerer); err != nil { return err } vm.registerer = registerer diff --git a/vms/metervm/block_vm.go b/vms/metervm/block_vm.go index 7c93b9078577..0ecb982c4742 100644 --- a/vms/metervm/block_vm.go +++ b/vms/metervm/block_vm.go @@ -70,18 +70,14 @@ func (vm *blockVM) Initialize( return err } - optionalGatherer := metrics.NewOptionalGatherer() multiGatherer := metrics.NewMultiGatherer() - if err := multiGatherer.Register("metervm", registerer); err != nil { + if err := chainCtx.Metrics.Register("metervm", registerer); err != nil { return err } - if err := multiGatherer.Register("", optionalGatherer); err != nil { + if err := chainCtx.Metrics.Register("", multiGatherer); err != nil { return err } - if err := chainCtx.Metrics.Register(multiGatherer); err != nil { - return err - } - chainCtx.Metrics = optionalGatherer + chainCtx.Metrics = multiGatherer return vm.ChainVM.Initialize(ctx, chainCtx, db, genesisBytes, upgradeBytes, configBytes, toEngine, fxs, appSender) } diff --git a/vms/metervm/vertex_vm.go b/vms/metervm/vertex_vm.go index 8992b4863283..7cd112ffde24 100644 --- a/vms/metervm/vertex_vm.go +++ b/vms/metervm/vertex_vm.go @@ -50,18 +50,14 @@ func (vm *vertexVM) Initialize( return err } - optionalGatherer := metrics.NewOptionalGatherer() multiGatherer := metrics.NewMultiGatherer() - if err := multiGatherer.Register("metervm", registerer); err != nil { + if err := chainCtx.Metrics.Register("metervm", registerer); err != nil { return err } - if err := multiGatherer.Register("", optionalGatherer); err != nil { + if err := chainCtx.Metrics.Register("", multiGatherer); err != nil { return err } - if err := chainCtx.Metrics.Register(multiGatherer); err != nil { - return err - } - chainCtx.Metrics = optionalGatherer + chainCtx.Metrics = multiGatherer return vm.LinearizableVMWithEngine.Initialize( ctx, diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 3358bed51974..f33451b18d48 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -114,7 +114,7 @@ func (vm *VM) Initialize( chainCtx.Log.Info("using VM execution config", zap.Reflect("config", execConfig)) registerer := prometheus.NewRegistry() - if err := chainCtx.Metrics.Register(registerer); err != nil { + if err := chainCtx.Metrics.Register("", registerer); err != nil { return err } diff --git a/vms/proposervm/vm.go b/vms/proposervm/vm.go index 02af69b11dd7..dfff407a03d2 100644 --- a/vms/proposervm/vm.go +++ b/vms/proposervm/vm.go @@ -132,20 +132,15 @@ func (vm *VM) Initialize( ) error { // TODO: Add a helper for this metrics override, it is performed in multiple // places. - multiGatherer := metrics.NewMultiGatherer() registerer := prometheus.NewRegistry() - if err := multiGatherer.Register("proposervm", registerer); err != nil { + if err := chainCtx.Metrics.Register("proposervm", registerer); err != nil { return err } - - optionalGatherer := metrics.NewOptionalGatherer() - if err := multiGatherer.Register("", optionalGatherer); err != nil { - return err - } - if err := chainCtx.Metrics.Register(multiGatherer); err != nil { + multiGatherer := metrics.NewMultiGatherer() + if err := chainCtx.Metrics.Register("", multiGatherer); err != nil { return err } - chainCtx.Metrics = optionalGatherer + chainCtx.Metrics = multiGatherer vm.ctx = chainCtx vm.db = versiondb.New(prefixdb.New(dbPrefix, db)) diff --git a/vms/rpcchainvm/vm_client.go b/vms/rpcchainvm/vm_client.go index 04a3c8fd308f..038a728c0ffe 100644 --- a/vms/rpcchainvm/vm_client.go +++ b/vms/rpcchainvm/vm_client.go @@ -18,7 +18,6 @@ import ( "google.golang.org/protobuf/types/known/emptypb" "github.com/ava-labs/avalanchego/api/keystore/gkeystore" - "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/chains/atomic/gsharedmemory" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/rpcdb" @@ -137,15 +136,14 @@ func (vm *VMClient) Initialize( // Register metrics registerer := prometheus.NewRegistry() - multiGatherer := metrics.NewMultiGatherer() vm.grpcServerMetrics = grpc_prometheus.NewServerMetrics() if err := registerer.Register(vm.grpcServerMetrics); err != nil { return err } - if err := multiGatherer.Register("rpcchainvm", registerer); err != nil { + if err := chainCtx.Metrics.Register("rpcchainvm", registerer); err != nil { return err } - if err := multiGatherer.Register("", vm); err != nil { + if err := chainCtx.Metrics.Register("", vm); err != nil { return err } @@ -226,7 +224,7 @@ func (vm *VMClient) Initialize( time: time, } - chainState, err := chain.NewMeteredState( + vm.State, err = chain.NewMeteredState( registerer, &chain.Config{ DecidedCacheSize: decidedCacheSize, @@ -241,12 +239,7 @@ func (vm *VMClient) Initialize( BuildBlockWithContext: vm.buildBlockWithContext, }, ) - if err != nil { - return err - } - vm.State = chainState - - return chainCtx.Metrics.Register(multiGatherer) + return err } func (vm *VMClient) newDBServer(db database.Database) *grpc.Server { diff --git a/vms/rpcchainvm/vm_server.go b/vms/rpcchainvm/vm_server.go index 96132792725d..67a55187426a 100644 --- a/vms/rpcchainvm/vm_server.go +++ b/vms/rpcchainvm/vm_server.go @@ -225,7 +225,7 @@ func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) Keystore: keystoreClient, SharedMemory: sharedMemoryClient, BCLookup: bcLookupClient, - Metrics: metrics.NewOptionalGatherer(), + Metrics: metrics.NewMultiGatherer(), // Signs warp messages WarpSigner: warpSignerClient, From 75b9564891113362a58ac13fbafcc8fde479fc59 Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed, 29 May 2024 13:20:24 -0400 Subject: [PATCH 024/102] [vms/platformvm] Return the correct owner in `platform.GetSubnets` after transfer (#3054) --- vms/platformvm/service.go | 18 ++++++++---- vms/platformvm/service_test.go | 50 ++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 5 deletions(-) diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index dfe970a88b0a..fe874c2d1d05 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -544,15 +544,23 @@ func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *Ge continue } - unsignedTx := subnet.Unsigned.(*txs.CreateSubnetTx) - owner := unsignedTx.Owner.(*secp256k1fx.OutputOwners) - controlAddrs := []string{} - for _, controlKeyID := range owner.Addrs { + subnetOwner, err := s.vm.state.GetSubnetOwner(subnetID) + if err != nil { + return err + } + + owner, ok := subnetOwner.(*secp256k1fx.OutputOwners) + if !ok { + return fmt.Errorf("expected *secp256k1fx.OutputOwners but got %T", subnetOwner) + } + + controlAddrs := make([]string, len(owner.Addrs)) + for i, controlKeyID := range owner.Addrs { addr, err := s.addrManager.FormatLocalAddress(controlKeyID) if err != nil { return fmt.Errorf("problem formatting address: %w", err) } - controlAddrs = append(controlAddrs, addr) + controlAddrs[i] = addr } response.Subnets[i] = APISubnet{ ID: subnetID, diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 95e2f98c3228..d72e11f6379c 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -1041,3 +1041,53 @@ func TestServiceGetBlockByHeight(t *testing.T) { }) } } + +func TestServiceGetSubnets(t *testing.T) { + require := require.New(t) + service, _, _ := defaultService(t) + + testSubnet1ID := testSubnet1.ID() + + var response GetSubnetsResponse + require.NoError(service.GetSubnets(nil, &GetSubnetsArgs{}, &response)) + require.Equal([]APISubnet{ + { + ID: testSubnet1ID, + ControlKeys: []string{ + "P-testing1d6kkj0qh4wcmus3tk59npwt3rluc6en72ngurd", + "P-testing17fpqs358de5lgu7a5ftpw2t8axf0pm33983krk", + "P-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e", + }, + Threshold: 2, + }, + { + ID: constants.PrimaryNetworkID, + ControlKeys: []string{}, + Threshold: 0, + }, + }, response.Subnets) + + newOwnerIDStr := "P-testing1t73fa4p4dypa4s3kgufuvr6hmprjclw66mgqgm" + newOwnerID, err := service.addrManager.ParseLocalAddress(newOwnerIDStr) + require.NoError(err) + service.vm.state.SetSubnetOwner(testSubnet1ID, &secp256k1fx.OutputOwners{ + Addrs: []ids.ShortID{newOwnerID}, + Threshold: 1, + }) + + require.NoError(service.GetSubnets(nil, &GetSubnetsArgs{}, &response)) + require.Equal([]APISubnet{ + { + ID: testSubnet1ID, + ControlKeys: []string{ + newOwnerIDStr, + }, + Threshold: 1, + }, + { + ID: constants.PrimaryNetworkID, + ControlKeys: []string{}, + Threshold: 0, + }, + }, response.Subnets) +} From 6f7e78aa10be2e8f88dc0e10af60d46d3c7926e2 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 29 May 2024 14:38:33 -0400 Subject: [PATCH 025/102] Add metrics client (#3057) Signed-off-by: Stephen Buttolph Co-authored-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> --- api/metrics/client.go | 68 ++++++++++++++++++++++++ go.mod | 2 +- tests/e2e/x/transfer/virtuous.go | 33 +++++++----- tests/http.go | 89 ++++++++------------------------ 4 files changed, 111 insertions(+), 81 deletions(-) create mode 100644 api/metrics/client.go diff --git a/api/metrics/client.go b/api/metrics/client.go new file mode 100644 index 000000000000..0b402622cff5 --- /dev/null +++ b/api/metrics/client.go @@ -0,0 +1,68 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "bytes" + "context" + "fmt" + "net/http" + "net/url" + + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" +) + +// Client for requesting metrics from a remote AvalancheGo instance +type Client struct { + uri string +} + +// NewClient returns a new Metrics API Client +func NewClient(uri string) *Client { + return &Client{ + uri: uri + "/ext/metrics", + } +} + +// GetMetrics returns the metrics from the connected node. The metrics are +// returned as a map of metric family name to the metric family. +func (c *Client) GetMetrics(ctx context.Context) (map[string]*dto.MetricFamily, error) { + uri, err := url.Parse(c.uri) + if err != nil { + return nil, err + } + + request, err := http.NewRequestWithContext( + ctx, + http.MethodGet, + uri.String(), + bytes.NewReader(nil), + ) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + resp, err := http.DefaultClient.Do(request) + if err != nil { + return nil, fmt.Errorf("failed to issue request: %w", err) + } + + // Return an error for any non successful status code + if resp.StatusCode < 200 || resp.StatusCode > 299 { + // Drop any error during close to report the original error + _ = resp.Body.Close() + return nil, fmt.Errorf("received status code: %d", resp.StatusCode) + } + + var parser expfmt.TextParser + metrics, err := parser.TextToMetricFamilies(resp.Body) + if err != nil { + // Drop any error during close to report the original error + _ = resp.Body.Close() + return nil, err + } + return metrics, resp.Body.Close() +} diff --git a/go.mod b/go.mod index a1df1b0537e4..5ea4ce4f6408 100644 --- a/go.mod +++ b/go.mod @@ -35,6 +35,7 @@ require ( github.com/pires/go-proxyproto v0.6.2 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 + github.com/prometheus/common v0.42.0 github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.11+incompatible github.com/spf13/cast v1.5.0 @@ -136,7 +137,6 @@ require ( github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect diff --git a/tests/e2e/x/transfer/virtuous.go b/tests/e2e/x/transfer/virtuous.go index 886f7bbe834e..994c6a845059 100644 --- a/tests/e2e/x/transfer/virtuous.go +++ b/tests/e2e/x/transfer/virtuous.go @@ -28,8 +28,8 @@ import ( const ( totalRounds = 50 - metricBlksProcessing = "avalanche_X_blks_processing" - metricBlksAccepted = "avalanche_X_blks_accepted_count" + xBlksProcessingMetric = "avalanche_X_blks_processing" + xBlksAcceptedMetric = "avalanche_X_blks_accepted_count" ) // This test requires that the network not have ongoing blocks and @@ -48,10 +48,15 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { // test avoids the case of a previous test having initiated block // processing but not having completed it. e2e.Eventually(func() bool { - allNodeMetrics, err := tests.GetNodesMetrics(rpcEps, metricBlksProcessing) + allNodeMetrics, err := tests.GetNodesMetrics( + e2e.DefaultContext(), + rpcEps, + ) require.NoError(err) + for _, metrics := range allNodeMetrics { - if metrics[metricBlksProcessing] > 0 { + xBlksProcessing, ok := tests.GetFirstMetricValue(metrics, xBlksProcessingMetric) + if !ok || xBlksProcessing > 0 { return false } } @@ -62,11 +67,6 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { "The cluster is generating ongoing blocks. Is this test being run in parallel?", ) - allMetrics := []string{ - metricBlksProcessing, - metricBlksAccepted, - } - // Ensure the same set of 10 keys is used for all tests // by retrieving them outside of runFunc. testKeys := e2e.Env.AllocatePreFundedKeys(10) @@ -102,7 +102,10 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { ) } - metricsBeforeTx, err := tests.GetNodesMetrics(rpcEps, allMetrics...) + metricsBeforeTx, err := tests.GetNodesMetrics( + e2e.DefaultContext(), + rpcEps, + ) require.NoError(err) for _, uri := range rpcEps { tests.Outf("{{green}}metrics at %q:{{/}} %v\n", uri, metricsBeforeTx[uri]) @@ -238,17 +241,21 @@ RECEIVER NEW BALANCE (AFTER) : %21d AVAX require.NoError(err) require.Equal(choices.Accepted, status) - mm, err := tests.GetNodeMetrics(u, allMetrics...) + mm, err := tests.GetNodeMetrics(e2e.DefaultContext(), u) require.NoError(err) prev := metricsBeforeTx[u] // +0 since X-chain tx must have been processed and accepted // by now - require.Equal(mm[metricBlksProcessing], prev[metricBlksProcessing]) + currentXBlksProcessing, _ := tests.GetFirstMetricValue(mm, xBlksProcessingMetric) + previousXBlksProcessing, _ := tests.GetFirstMetricValue(prev, xBlksProcessingMetric) + require.Equal(currentXBlksProcessing, previousXBlksProcessing) // +1 since X-chain tx must have been accepted by now - require.Equal(mm[metricBlksAccepted], prev[metricBlksAccepted]+1) + currentXBlksAccepted, _ := tests.GetFirstMetricValue(mm, xBlksAcceptedMetric) + previousXBlksAccepted, _ := tests.GetFirstMetricValue(prev, xBlksAcceptedMetric) + require.Equal(currentXBlksAccepted, previousXBlksAccepted+1) metricsBeforeTx[u] = mm } diff --git a/tests/http.go b/tests/http.go index 073b6d2df126..b5a7b0ffe097 100644 --- a/tests/http.go +++ b/tests/http.go @@ -4,33 +4,32 @@ package tests import ( - "bufio" "context" "fmt" - "io" - "net/http" - "strconv" - "strings" + + "github.com/ava-labs/avalanchego/api/metrics" + + dto "github.com/prometheus/client_model/go" ) // "metric name" -> "metric value" -type NodeMetrics map[string]float64 +type NodeMetrics map[string]*dto.MetricFamily // URI -> "metric name" -> "metric value" type NodesMetrics map[string]NodeMetrics // GetNodeMetrics retrieves the specified metrics the provided node URI. -func GetNodeMetrics(nodeURI string, metricNames ...string) (NodeMetrics, error) { - uri := nodeURI + "/ext/metrics" - return GetMetricsValue(uri, metricNames...) +func GetNodeMetrics(ctx context.Context, nodeURI string) (NodeMetrics, error) { + client := metrics.NewClient(nodeURI) + return client.GetMetrics(ctx) } // GetNodesMetrics retrieves the specified metrics for the provided node URIs. -func GetNodesMetrics(nodeURIs []string, metricNames ...string) (NodesMetrics, error) { +func GetNodesMetrics(ctx context.Context, nodeURIs []string) (NodesMetrics, error) { metrics := make(NodesMetrics, len(nodeURIs)) for _, u := range nodeURIs { var err error - metrics[u], err = GetNodeMetrics(u, metricNames...) + metrics[u], err = GetNodeMetrics(ctx, u) if err != nil { return nil, fmt.Errorf("failed to retrieve metrics for %s: %w", u, err) } @@ -38,63 +37,19 @@ func GetNodesMetrics(nodeURIs []string, metricNames ...string) (NodesMetrics, er return metrics, nil } -func GetMetricsValue(url string, metrics ...string) (map[string]float64, error) { - lines, err := getHTTPLines(url) - if err != nil { - return nil, err - } - mm := make(map[string]float64, len(metrics)) - for _, line := range lines { - if strings.HasPrefix(line, "# ") { - continue - } - found, name := false, "" - for _, name = range metrics { - if !strings.HasPrefix(line, name) { - continue - } - found = true - break - } - if !found || name == "" { // no matched metric found - continue - } - ll := strings.Split(line, " ") - if len(ll) != 2 { - continue - } - fv, err := strconv.ParseFloat(ll[1], 64) - if err != nil { - return nil, fmt.Errorf("failed to parse %q (%w)", ll, err) - } - mm[name] = fv - } - return mm, nil -} - -func getHTTPLines(url string) ([]string, error) { - req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, url, nil) - if err != nil { - return nil, err +func GetFirstMetricValue(metrics NodeMetrics, name string) (float64, bool) { + metricFamily, ok := metrics[name] + if !ok || len(metricFamily.Metric) < 1 { + return 0, false } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - - rd := bufio.NewReader(resp.Body) - lines := []string{} - for { - line, err := rd.ReadString('\n') - if err != nil { - if err == io.EOF { - break - } - _ = resp.Body.Close() - return nil, err - } - lines = append(lines, strings.TrimSpace(line)) + metric := metricFamily.Metric[0] + switch { + case metric.Gauge != nil: + return metric.Gauge.GetValue(), true + case metric.Counter != nil: + return metric.Counter.GetValue(), true + default: + return 0, false } - return lines, resp.Body.Close() } From e3d889cbfc8a9614767ac90fd97b06bd37691723 Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed, 29 May 2024 19:45:11 -0400 Subject: [PATCH 026/102] [vms/platformvm] Replace `GetSubnets` with `GetSubnetIDs` in `State` (#3055) --- vms/platformvm/service.go | 14 +++--- vms/platformvm/state/diff.go | 10 ++-- vms/platformvm/state/diff_test.go | 26 +++++------ vms/platformvm/state/mock_state.go | 36 +++++++-------- vms/platformvm/state/state.go | 46 ++++++++----------- vms/platformvm/state/state_test.go | 2 +- .../txs/executor/standard_tx_executor.go | 2 +- vms/platformvm/vm.go | 6 +-- vms/platformvm/vm_test.go | 25 ++++------ 9 files changed, 76 insertions(+), 91 deletions(-) diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index fe874c2d1d05..d1bdf60a6529 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -527,14 +527,13 @@ func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *Ge getAll := len(args.IDs) == 0 if getAll { - subnets, err := s.vm.state.GetSubnets() // all subnets + subnetIDs, err := s.vm.state.GetSubnetIDs() // all subnets if err != nil { return fmt.Errorf("error getting subnets from database: %w", err) } - response.Subnets = make([]APISubnet, len(subnets)+1) - for i, subnet := range subnets { - subnetID := subnet.ID() + response.Subnets = make([]APISubnet, len(subnetIDs)+1) + for i, subnetID := range subnetIDs { if _, err := s.vm.state.GetSubnetTransformation(subnetID); err == nil { response.Subnets[i] = APISubnet{ ID: subnetID, @@ -569,7 +568,7 @@ func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *Ge } } // Include primary network - response.Subnets[len(subnets)] = APISubnet{ + response.Subnets[len(subnetIDs)] = APISubnet{ ID: constants.PrimaryNetworkID, ControlKeys: []string{}, Threshold: avajson.Uint32(0), @@ -1234,14 +1233,13 @@ func (s *Service) GetBlockchains(_ *http.Request, _ *struct{}, response *GetBloc s.vm.ctx.Lock.Lock() defer s.vm.ctx.Lock.Unlock() - subnets, err := s.vm.state.GetSubnets() + subnetIDs, err := s.vm.state.GetSubnetIDs() if err != nil { return fmt.Errorf("couldn't retrieve subnets: %w", err) } response.Blockchains = []APIBlockchain{} - for _, subnet := range subnets { - subnetID := subnet.ID() + for _, subnetID := range subnetIDs { chains, err := s.vm.state.GetChains(subnetID) if err != nil { return fmt.Errorf( diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 907c3c56ef7d..91fb01d08fc9 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -43,7 +43,7 @@ type diff struct { modifiedDelegateeRewards map[ids.ID]map[ids.NodeID]uint64 pendingStakerDiffs diffStakers - addedSubnets []*txs.Tx + addedSubnetIDs []ids.ID // Subnet ID --> Owner of the subnet subnetOwners map[ids.ID]fx.Owner // Subnet ID --> Tx that transforms the subnet @@ -272,8 +272,8 @@ func (d *diff) GetPendingStakerIterator() (StakerIterator, error) { return d.pendingStakerDiffs.GetStakerIterator(parentIterator), nil } -func (d *diff) AddSubnet(createSubnetTx *txs.Tx) { - d.addedSubnets = append(d.addedSubnets, createSubnetTx) +func (d *diff) AddSubnet(subnetID ids.ID) { + d.addedSubnetIDs = append(d.addedSubnetIDs, subnetID) } func (d *diff) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { @@ -451,8 +451,8 @@ func (d *diff) Apply(baseState Chain) error { } } } - for _, subnet := range d.addedSubnets { - baseState.AddSubnet(subnet) + for _, subnetID := range d.addedSubnetIDs { + baseState.AddSubnet(subnetID) } for _, tx := range d.transformedSubnets { baseState.AddSubnetTransformation(tx) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 87fd59714029..9b8fe1e6486b 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -257,14 +257,14 @@ func TestDiffSubnet(t *testing.T) { Owner: fx.NewMockOwner(ctrl), }, } - state.AddSubnet(parentStateCreateSubnetTx) + state.AddSubnet(parentStateCreateSubnetTx.ID()) // Verify parent returns one subnet - subnets, err := state.GetSubnets() + subnetIDs, err := state.GetSubnetIDs() require.NoError(err) - require.Equal([]*txs.Tx{ - parentStateCreateSubnetTx, - }, subnets) + require.Equal([]ids.ID{ + parentStateCreateSubnetTx.ID(), + }, subnetIDs) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -279,18 +279,18 @@ func TestDiffSubnet(t *testing.T) { Owner: fx.NewMockOwner(ctrl), }, } - diff.AddSubnet(createSubnetTx) + diff.AddSubnet(createSubnetTx.ID()) // Apply diff to parent state require.NoError(diff.Apply(state)) // Verify parent now returns two subnets - subnets, err = state.GetSubnets() + subnetIDs, err = state.GetSubnetIDs() require.NoError(err) - require.Equal([]*txs.Tx{ - parentStateCreateSubnetTx, - createSubnetTx, - }, subnets) + require.Equal([]ids.ID{ + parentStateCreateSubnetTx.ID(), + createSubnetTx.ID(), + }, subnetIDs) } func TestDiffChain(t *testing.T) { @@ -547,7 +547,7 @@ func TestDiffSubnetOwner(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) require.Nil(owner) - state.AddSubnet(createSubnetTx) + state.AddSubnet(subnetID) state.SetSubnetOwner(subnetID, owner1) owner, err = state.GetSubnetOwner(subnetID) @@ -610,7 +610,7 @@ func TestDiffStacking(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) require.Nil(owner) - state.AddSubnet(createSubnetTx) + state.AddSubnet(subnetID) state.SetSubnetOwner(subnetID, owner1) owner, err = state.GetSubnetOwner(subnetID) diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 201c5ef7c5fd..c1321567e6a9 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -75,7 +75,7 @@ func (mr *MockChainMockRecorder) AddRewardUTXO(arg0, arg1 any) *gomock.Call { } // AddSubnet mocks base method. -func (m *MockChain) AddSubnet(arg0 *txs.Tx) { +func (m *MockChain) AddSubnet(arg0 ids.ID) { m.ctrl.T.Helper() m.ctrl.Call(m, "AddSubnet", arg0) } @@ -523,7 +523,7 @@ func (mr *MockDiffMockRecorder) AddRewardUTXO(arg0, arg1 any) *gomock.Call { } // AddSubnet mocks base method. -func (m *MockDiff) AddSubnet(arg0 *txs.Tx) { +func (m *MockDiff) AddSubnet(arg0 ids.ID) { m.ctrl.T.Helper() m.ctrl.Call(m, "AddSubnet", arg0) } @@ -1009,7 +1009,7 @@ func (mr *MockStateMockRecorder) AddStatelessBlock(arg0 any) *gomock.Call { } // AddSubnet mocks base method. -func (m *MockState) AddSubnet(arg0 *txs.Tx) { +func (m *MockState) AddSubnet(arg0 ids.ID) { m.ctrl.T.Helper() m.ctrl.Call(m, "AddSubnet", arg0) } @@ -1410,6 +1410,21 @@ func (mr *MockStateMockRecorder) GetStatelessBlock(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockState)(nil).GetStatelessBlock), arg0) } +// GetSubnetIDs mocks base method. +func (m *MockState) GetSubnetIDs() ([]ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetIDs") + ret0, _ := ret[0].([]ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetIDs indicates an expected call of GetSubnetIDs. +func (mr *MockStateMockRecorder) GetSubnetIDs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetIDs", reflect.TypeOf((*MockState)(nil).GetSubnetIDs)) +} + // GetSubnetOwner mocks base method. func (m *MockState) GetSubnetOwner(arg0 ids.ID) (fx.Owner, error) { m.ctrl.T.Helper() @@ -1440,21 +1455,6 @@ func (mr *MockStateMockRecorder) GetSubnetTransformation(arg0 any) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockState)(nil).GetSubnetTransformation), arg0) } -// GetSubnets mocks base method. -func (m *MockState) GetSubnets() ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnets") - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnets indicates an expected call of GetSubnets. -func (mr *MockStateMockRecorder) GetSubnets() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnets", reflect.TypeOf((*MockState)(nil).GetSubnets)) -} - // GetTimestamp mocks base method. func (m *MockState) GetTimestamp() time.Time { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 7f06f02e86d6..295e67c52ff3 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -104,7 +104,7 @@ type Chain interface { AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) - AddSubnet(createSubnetTx *txs.Tx) + AddSubnet(subnetID ids.ID) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) @@ -134,7 +134,7 @@ type State interface { GetBlockIDAtHeight(height uint64) (ids.ID, error) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) - GetSubnets() ([]*txs.Tx, error) + GetSubnetIDs() ([]ids.ID, error) GetChains(subnetID ids.ID) ([]*txs.Tx, error) // ApplyValidatorWeightDiffs iterates from [startHeight] towards the genesis @@ -330,10 +330,10 @@ type state struct { utxoDB database.Database utxoState avax.UTXOState - cachedSubnets []*txs.Tx // nil if the subnets haven't been loaded - addedSubnets []*txs.Tx - subnetBaseDB database.Database - subnetDB linkeddb.LinkedDB + cachedSubnetIDs []ids.ID // nil if the subnets haven't been loaded + addedSubnetIDs []ids.ID + subnetBaseDB database.Database + subnetDB linkeddb.LinkedDB // Subnet ID --> Owner of the subnet subnetOwners map[ids.ID]fx.Owner @@ -728,39 +728,35 @@ func (s *state) doneInit() error { return s.singletonDB.Put(InitializedKey, nil) } -func (s *state) GetSubnets() ([]*txs.Tx, error) { - if s.cachedSubnets != nil { - return s.cachedSubnets, nil +func (s *state) GetSubnetIDs() ([]ids.ID, error) { + if s.cachedSubnetIDs != nil { + return s.cachedSubnetIDs, nil } subnetDBIt := s.subnetDB.NewIterator() defer subnetDBIt.Release() - txs := []*txs.Tx(nil) + subnetIDs := []ids.ID{} for subnetDBIt.Next() { subnetIDBytes := subnetDBIt.Key() subnetID, err := ids.ToID(subnetIDBytes) if err != nil { return nil, err } - subnetTx, _, err := s.GetTx(subnetID) - if err != nil { - return nil, err - } - txs = append(txs, subnetTx) + subnetIDs = append(subnetIDs, subnetID) } if err := subnetDBIt.Error(); err != nil { return nil, err } - txs = append(txs, s.addedSubnets...) - s.cachedSubnets = txs - return txs, nil + subnetIDs = append(subnetIDs, s.addedSubnetIDs...) + s.cachedSubnetIDs = subnetIDs + return subnetIDs, nil } -func (s *state) AddSubnet(createSubnetTx *txs.Tx) { - s.addedSubnets = append(s.addedSubnets, createSubnetTx) - if s.cachedSubnets != nil { - s.cachedSubnets = append(s.cachedSubnets, createSubnetTx) +func (s *state) AddSubnet(subnetID ids.ID) { + s.addedSubnetIDs = append(s.addedSubnetIDs, subnetID) + if s.cachedSubnetIDs != nil { + s.cachedSubnetIDs = append(s.cachedSubnetIDs, subnetID) } } @@ -2185,14 +2181,12 @@ func (s *state) writeUTXOs() error { } func (s *state) writeSubnets() error { - for _, subnet := range s.addedSubnets { - subnetID := subnet.ID() - + for _, subnetID := range s.addedSubnetIDs { if err := s.subnetDB.Put(subnetID[:], nil); err != nil { return fmt.Errorf("failed to write subnet: %w", err) } } - s.addedSubnets = nil + s.addedSubnetIDs = nil return nil } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index abfd1f34feee..c6241ddc8cc4 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1388,7 +1388,7 @@ func TestStateSubnetOwner(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) require.Nil(owner) - state.AddSubnet(createSubnetTx) + state.AddSubnet(subnetID) state.SetSubnetOwner(subnetID, owner1) owner, err = state.GetSubnetOwner(subnetID) diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index 725f1aaff814..2de7d3392ba8 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -140,7 +140,7 @@ func (e *StandardTxExecutor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { // Produce the UTXOS avax.Produce(e.State, txID, tx.Outs) // Add the new subnet to the database - e.State.AddSubnet(e.Tx) + e.State.AddSubnet(txID) e.State.SetSubnetOwner(txID, tx.Owner) return nil } diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index f33451b18d48..b6417bbd0c45 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -302,12 +302,12 @@ func (vm *VM) initBlockchains() error { } } } else { - subnets, err := vm.state.GetSubnets() + subnetIDs, err := vm.state.GetSubnetIDs() if err != nil { return err } - for _, subnet := range subnets { - if err := vm.createSubnet(subnet.ID()); err != nil { + for _, subnetID := range subnetIDs { + if err := vm.createSubnet(subnetID); err != nil { return err } } diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 13802ad4dae2..dbd766b2a6e1 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -941,6 +941,7 @@ func TestCreateSubnet(t *testing.T) { }), ) require.NoError(err) + subnetID := createSubnetTx.ID() vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(createSubnetTx)) @@ -954,21 +955,13 @@ func TestCreateSubnet(t *testing.T) { require.NoError(blk.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - _, txStatus, err := vm.state.GetTx(createSubnetTx.ID()) + _, txStatus, err := vm.state.GetTx(subnetID) require.NoError(err) require.Equal(status.Committed, txStatus) - subnets, err := vm.state.GetSubnets() + subnetIDs, err := vm.state.GetSubnetIDs() require.NoError(err) - - found := false - for _, subnet := range subnets { - if subnet.ID() == createSubnetTx.ID() { - found = true - break - } - } - require.True(found) + require.Contains(subnetIDs, subnetID) // Now that we've created a new subnet, add a validator to that subnet startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) @@ -982,7 +975,7 @@ func TestCreateSubnet(t *testing.T) { End: uint64(endTime.Unix()), Wght: defaultWeight, }, - Subnet: createSubnetTx.ID(), + Subnet: subnetID, }, []*secp256k1.PrivateKey{keys[0]}, ) @@ -1004,10 +997,10 @@ func TestCreateSubnet(t *testing.T) { require.NoError(err) require.Equal(status.Committed, txStatus) - _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) + _, err = vm.state.GetPendingValidator(subnetID, nodeID) require.ErrorIs(err, database.ErrNotFound) - _, err = vm.state.GetCurrentValidator(createSubnetTx.ID(), nodeID) + _, err = vm.state.GetCurrentValidator(subnetID, nodeID) require.NoError(err) // fast forward clock to time validator should stop validating @@ -1017,10 +1010,10 @@ func TestCreateSubnet(t *testing.T) { require.NoError(blk.Verify(context.Background())) require.NoError(blk.Accept(context.Background())) // remove validator from current validator set - _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) + _, err = vm.state.GetPendingValidator(subnetID, nodeID) require.ErrorIs(err, database.ErrNotFound) - _, err = vm.state.GetCurrentValidator(createSubnetTx.ID(), nodeID) + _, err = vm.state.GetCurrentValidator(subnetID, nodeID) require.ErrorIs(err, database.ErrNotFound) } From 3ccc4cb176658a0738e78c842db8c3368ec49725 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 30 May 2024 10:33:23 -0400 Subject: [PATCH 027/102] Implement `constants.VMName` (#3058) --- tests/fixture/subnet/xsvm.go | 4 +-- utils/constants/vm_ids.go | 29 +++++++++++++++++++ vms/example/xsvm/cmd/chain/create/cmd.go | 4 +-- vms/example/xsvm/cmd/version/cmd.go | 5 ++-- vms/example/xsvm/constants.go | 21 ++++---------- vms/example/xsvm/vm.go | 3 +- .../primary/examples/create-chain/main.go | 4 +-- 7 files changed, 46 insertions(+), 24 deletions(-) diff --git a/tests/fixture/subnet/xsvm.go b/tests/fixture/subnet/xsvm.go index 28fb017da5a9..c5bb03bc2026 100644 --- a/tests/fixture/subnet/xsvm.go +++ b/tests/fixture/subnet/xsvm.go @@ -8,8 +8,8 @@ import ( "time" "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/example/xsvm" "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" ) @@ -35,7 +35,7 @@ func NewXSVMOrPanic(name string, key *secp256k1.PrivateKey, nodes ...*tmpnet.Nod Name: name, Chains: []*tmpnet.Chain{ { - VMID: xsvm.ID, + VMID: constants.XSVMID, Genesis: genesisBytes, PreFundedKey: key, }, diff --git a/utils/constants/vm_ids.go b/utils/constants/vm_ids.go index 9fda498f1f31..c0c4773590a5 100644 --- a/utils/constants/vm_ids.go +++ b/utils/constants/vm_ids.go @@ -5,8 +5,37 @@ package constants import "github.com/ava-labs/avalanchego/ids" +const ( + PlatformVMName = "platformvm" + AVMName = "avm" + EVMName = "evm" + SubnetEVMName = "subnetevm" + XSVMName = "xsvm" +) + var ( PlatformVMID = ids.ID{'p', 'l', 'a', 't', 'f', 'o', 'r', 'm', 'v', 'm'} AVMID = ids.ID{'a', 'v', 'm'} EVMID = ids.ID{'e', 'v', 'm'} + SubnetEVMID = ids.ID{'s', 'u', 'b', 'n', 'e', 't', 'e', 'v', 'm'} + XSVMID = ids.ID{'x', 's', 'v', 'm'} ) + +// VMName returns the name of the VM with the provided ID. If a human readable +// name isn't known, then the formatted ID is returned. +func VMName(vmID ids.ID) string { + switch vmID { + case PlatformVMID: + return PlatformVMName + case AVMID: + return AVMName + case EVMID: + return EVMName + case SubnetEVMID: + return SubnetEVMName + case XSVMID: + return XSVMName + default: + return vmID.String() + } +} diff --git a/vms/example/xsvm/cmd/chain/create/cmd.go b/vms/example/xsvm/cmd/chain/create/cmd.go index 984ff45df8b0..a08edf507117 100644 --- a/vms/example/xsvm/cmd/chain/create/cmd.go +++ b/vms/example/xsvm/cmd/chain/create/cmd.go @@ -9,8 +9,8 @@ import ( "github.com/spf13/cobra" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/example/xsvm" "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" @@ -72,7 +72,7 @@ func createFunc(c *cobra.Command, args []string) error { createChainTxID, err := pWallet.IssueCreateChainTx( config.SubnetID, genesisBytes, - xsvm.ID, + constants.XSVMID, nil, config.Name, common.WithContext(ctx), diff --git a/vms/example/xsvm/cmd/version/cmd.go b/vms/example/xsvm/cmd/version/cmd.go index 1c956c6a9b00..471ccfd10aa6 100644 --- a/vms/example/xsvm/cmd/version/cmd.go +++ b/vms/example/xsvm/cmd/version/cmd.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/example/xsvm" ) @@ -29,8 +30,8 @@ func Command() *cobra.Command { func versionFunc(*cobra.Command, []string) error { fmt.Printf( format, - xsvm.Name, - xsvm.ID, + constants.XSVMName, + constants.XSVMID, xsvm.Version, version.RPCChainVMProtocol, ) diff --git a/vms/example/xsvm/constants.go b/vms/example/xsvm/constants.go index eb2199211ef7..7628cc56b176 100644 --- a/vms/example/xsvm/constants.go +++ b/vms/example/xsvm/constants.go @@ -3,19 +3,10 @@ package xsvm -import ( - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/version" -) +import "github.com/ava-labs/avalanchego/version" -const Name = "xsvm" - -var ( - ID = ids.ID{'x', 's', 'v', 'm'} - - Version = &version.Semantic{ - Major: 1, - Minor: 0, - Patch: 4, - } -) +var Version = &version.Semantic{ + Major: 1, + Minor: 0, + Patch: 4, +} diff --git a/vms/example/xsvm/vm.go b/vms/example/xsvm/vm.go index ef59e6c51657..526fc47c499d 100644 --- a/vms/example/xsvm/vm.go +++ b/vms/example/xsvm/vm.go @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/example/xsvm/api" @@ -124,7 +125,7 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { ) return map[string]http.Handler{ "": server, - }, server.RegisterService(api, Name) + }, server.RegisterService(api, constants.XSVMName) } func (*VM) HealthCheck(context.Context) (interface{}, error) { diff --git a/wallet/subnet/primary/examples/create-chain/main.go b/wallet/subnet/primary/examples/create-chain/main.go index 0382b8bb815f..c626086bce89 100644 --- a/wallet/subnet/primary/examples/create-chain/main.go +++ b/wallet/subnet/primary/examples/create-chain/main.go @@ -11,8 +11,8 @@ import ( "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/example/xsvm" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" @@ -33,7 +33,7 @@ func main() { }, }, } - vmID := xsvm.ID + vmID := constants.XSVMID name := "let there" subnetID, err := ids.FromString(subnetIDStr) From ae4f88464556c31685d5a5886a69da02a72c0af6 Mon Sep 17 00:00:00 2001 From: marun Date: Thu, 30 May 2024 19:59:13 +0200 Subject: [PATCH 028/102] [testing] Remove superfluous gomega dep (#3063) --- go.mod | 2 -- tests/e2e/e2e_test.go | 3 --- tests/upgrade/upgrade_test.go | 2 -- 3 files changed, 7 deletions(-) diff --git a/go.mod b/go.mod index 5ea4ce4f6408..63a9d5ec5152 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,6 @@ require ( github.com/mr-tron/base58 v1.2.0 github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d github.com/onsi/ginkgo/v2 v2.13.1 - github.com/onsi/gomega v1.29.0 github.com/pires/go-proxyproto v0.6.2 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 @@ -110,7 +109,6 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect - github.com/google/go-cmp v0.6.0 // indirect github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index 9d235bc363da..f33a3524d2a8 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -6,8 +6,6 @@ package e2e_test import ( "testing" - "github.com/onsi/gomega" - // ensure test packages are scanned by ginkgo _ "github.com/ava-labs/avalanchego/tests/e2e/banff" _ "github.com/ava-labs/avalanchego/tests/e2e/c" @@ -24,7 +22,6 @@ import ( ) func TestE2E(t *testing.T) { - gomega.RegisterFailHandler(ginkgo.Fail) ginkgo.RunSpecs(t, "e2e test suites") } diff --git a/tests/upgrade/upgrade_test.go b/tests/upgrade/upgrade_test.go index d3632853bc31..c885a0821b8c 100644 --- a/tests/upgrade/upgrade_test.go +++ b/tests/upgrade/upgrade_test.go @@ -9,7 +9,6 @@ import ( "testing" "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/tests/fixture/e2e" @@ -17,7 +16,6 @@ import ( ) func TestUpgrade(t *testing.T) { - gomega.RegisterFailHandler(ginkgo.Fail) ginkgo.RunSpecs(t, "upgrade test suites") } From 8d18b618be290b0e0fedbbedc76398afba68e6ec Mon Sep 17 00:00:00 2001 From: marun Date: Thu, 30 May 2024 21:45:52 +0200 Subject: [PATCH 029/102] [antithesis] Enable workload instrumentation (#3059) Signed-off-by: marun Co-authored-by: Stephen Buttolph --- go.mod | 3 +- go.sum | 8 +- scripts/build_antithesis_images.sh | 28 ++++- scripts/build_test.sh | 6 + tests/antithesis/README.md | 15 +++ .../Dockerfile.builder-instrumented | 46 +++++++ .../Dockerfile.builder-uninstrumented | 17 +++ tests/antithesis/avalanchego/Dockerfile.node | 43 +------ .../avalanchego/Dockerfile.workload | 22 ++-- tests/antithesis/avalanchego/main.go | 117 +++++++++++++++++- tests/antithesis/xsvm/Dockerfile.node | 50 ++------ tests/antithesis/xsvm/Dockerfile.workload | 22 ++-- tests/antithesis/xsvm/main.go | 18 +++ 13 files changed, 282 insertions(+), 113 deletions(-) create mode 100644 tests/antithesis/avalanchego/Dockerfile.builder-instrumented create mode 100644 tests/antithesis/avalanchego/Dockerfile.builder-uninstrumented diff --git a/go.mod b/go.mod index 63a9d5ec5152..d7965b58658f 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ go 1.21.10 require ( github.com/DataDog/zstd v1.5.2 github.com/NYTimes/gziphandler v1.1.1 + github.com/antithesishq/antithesis-sdk-go v0.3.8 github.com/ava-labs/coreth v0.13.5-remove-optional-gatherer.2 github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 github.com/btcsuite/btcd/btcutil v1.1.3 @@ -158,7 +159,7 @@ require ( go.uber.org/multierr v1.10.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.16.0 // indirect + golang.org/x/tools v0.17.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 7b1761abed8a..da0883a3048d 100644 --- a/go.sum +++ b/go.sum @@ -59,6 +59,8 @@ github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBA github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/antithesishq/antithesis-sdk-go v0.3.8 h1:OvGoHxIcOXFJLyn9IJQ5DzByZ3YVAWNBc394ObzDRb8= +github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/ava-labs/coreth v0.13.5-remove-optional-gatherer.2 h1:RX9DcvgWxq42B2aiGzk77Y8w2bcB7ApO/Cdj9hA6QoE= github.com/ava-labs/coreth v0.13.5-remove-optional-gatherer.2/go.mod h1:cm5c12xo5NiTgtbmeduv8i2nYdzgkczz9Wm3yiwwTRU= @@ -700,8 +702,6 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -916,8 +916,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= -golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/scripts/build_antithesis_images.sh b/scripts/build_antithesis_images.sh index 8e49d1bcb700..964b966cf177 100755 --- a/scripts/build_antithesis_images.sh +++ b/scripts/build_antithesis_images.sh @@ -41,19 +41,32 @@ function build_images { local node_image_name="${base_image_name}-node:${TAG}" local workload_image_name="${base_image_name}-workload:${TAG}" local config_image_name="${base_image_name}-config:${TAG}" + # The same builder image is used to build node and workload images for all test + # setups. It is not intended to be pushed. + local builder_image_name="antithesis-avalanchego-builder:${TAG}" # Define dockerfiles local base_dockerfile="${AVALANCHE_PATH}/tests/antithesis/${test_setup}/Dockerfile" + local builder_dockerfile="${base_dockerfile}.builder-instrumented" local node_dockerfile="${base_dockerfile}.node" + # Working directory for instrumented builds + local builder_workdir="/avalanchego_instrumented/customer" if [[ "$(go env GOARCH)" == "arm64" ]]; then - # Antithesis instrumentation is only supported on amd64. On apple silicon (arm64), the - # uninstrumented Dockerfile will be used to build the node image to enable local test - # development. + # Antithesis instrumentation is only supported on amd64. On apple silicon (arm64), + # uninstrumented Dockerfiles will be used to enable local test development. + builder_dockerfile="${base_dockerfile}.builder-uninstrumented" node_dockerfile="${uninstrumented_node_dockerfile}" + # Working directory for uninstrumented builds + builder_workdir="/build" fi # Define default build command - local docker_cmd="docker buildx build --build-arg GO_VERSION=${GO_VERSION} --build-arg NODE_IMAGE=${node_image_name}" + local docker_cmd="docker buildx build\ + --build-arg GO_VERSION=${GO_VERSION}\ + --build-arg NODE_IMAGE=${node_image_name}\ + --build-arg BUILDER_IMAGE=${builder_image_name}\ + --build-arg BUILDER_WORKDIR=${builder_workdir}\ + --build-arg TAG=${TAG}" if [[ "${test_setup}" == "xsvm" ]]; then # The xsvm node image is built on the avalanchego node image, which is assumed to have already been @@ -69,6 +82,13 @@ function build_images { docker_cmd="${docker_cmd} --push" fi + if [[ "${test_setup}" == "avalanchego" ]]; then + # Build the image that enables compiling golang binaries for the node and workload + # image builds. The builder image is intended to enable building instrumented binaries + # if built on amd64 and non-instrumented binaries if built on arm64. + ${docker_cmd} -t "${builder_image_name}" -f "${builder_dockerfile}" "${AVALANCHE_PATH}" + fi + # Build node image first to allow the workload image to use it. ${docker_cmd} -t "${node_image_name}" -f "${node_dockerfile}" "${AVALANCHE_PATH}" diff --git a/scripts/build_test.sh b/scripts/build_test.sh index cfff1a2fc9fd..c0c9b72e3230 100755 --- a/scripts/build_test.sh +++ b/scripts/build_test.sh @@ -9,6 +9,12 @@ source "$AVALANCHE_PATH"/scripts/constants.sh EXCLUDED_TARGETS="| grep -v /mocks | grep -v proto | grep -v tests/e2e | grep -v tests/upgrade" +if [[ "$(go env GOOS)" == "windows" ]]; then + # Test discovery for the antithesis test setups is broken due to + # their dependence on the linux-only Antithesis SDK. + EXCLUDED_TARGETS="${EXCLUDED_TARGETS} | grep -v tests/antithesis" +fi + TEST_TARGETS="$(eval "go list ./... ${EXCLUDED_TARGETS}")" # shellcheck disable=SC2086 diff --git a/tests/antithesis/README.md b/tests/antithesis/README.md index c838b64780a4..3acb7746104a 100644 --- a/tests/antithesis/README.md +++ b/tests/antithesis/README.md @@ -49,6 +49,21 @@ In addition, github workflows are suggested to ensure `scripts/tests.build_antithesis_images.sh` runs against PRs and `scripts/build_antithesis_images.sh` runs against pushes. +### Use of a builder image + +To simplify building instrumented (for running in CI) and +non-instrumented (for running locally) versions of the workload and +node images, a common builder image is used. If on an amd64 host, +`tests/antithesis/avalanchego/Dockerfile.builder-instrumented` is used +to create an instrumented builder. On an arm64 host, +`tests/antithesis/avalanchego/Dockerfile.builder-uninstrumented` is +used to create an uninstrumented builder. In both cases, the builder +image is based on the default golang image and will include the source +code necessary to build the node and workload binaries. The +alternative would require duplicating builder setup for instrumented +and non-instrumented builds for the workload and node images of each +test setup. + ## Troubleshooting a test setup ### Running a workload directly diff --git a/tests/antithesis/avalanchego/Dockerfile.builder-instrumented b/tests/antithesis/avalanchego/Dockerfile.builder-instrumented new file mode 100644 index 000000000000..ffc05256e30e --- /dev/null +++ b/tests/antithesis/avalanchego/Dockerfile.builder-instrumented @@ -0,0 +1,46 @@ +# The version is supplied as a build argument rather than hard-coded +# to minimize the cost of version changes. +ARG GO_VERSION + +# Antithesis: Getting the Antithesis golang instrumentation library +FROM docker.io/antithesishq/go-instrumentor AS instrumentor + +# ============= Instrumentation Stage ================ +FROM golang:$GO_VERSION-bullseye + +WORKDIR /build +# Copy and download avalanche dependencies using go mod +COPY go.mod . +COPY go.sum . +RUN go mod download + +# Copy the code into the container +COPY . . + +# Ensure pre-existing builds are not available for inclusion in the final image +RUN [ -d ./build ] && rm -rf ./build/* || true + +# Keep the commit hash to easily verify the exact version that is running +RUN git rev-parse HEAD > ./commit_hash.txt + +# Copy the instrumentor and supporting files to their correct locations +COPY --from=instrumentor /opt/antithesis /opt/antithesis +COPY --from=instrumentor /opt/antithesis/lib /lib + +# Create the destination output directory for the instrumented code +RUN mkdir -p /avalanchego_instrumented + +# Park the .git file in a safe location +RUN mkdir -p /opt/tmp/ +RUN cp -r .git /opt/tmp/ + +# Instrument avalanchego +RUN /opt/antithesis/bin/goinstrumentor \ + -stderrthreshold=INFO \ + -antithesis /opt/antithesis/instrumentation \ + . \ + /avalanchego_instrumented + +WORKDIR /avalanchego_instrumented/customer +RUN go mod download +RUN ln -s /opt/tmp/.git .git diff --git a/tests/antithesis/avalanchego/Dockerfile.builder-uninstrumented b/tests/antithesis/avalanchego/Dockerfile.builder-uninstrumented new file mode 100644 index 000000000000..07d3fe8b882c --- /dev/null +++ b/tests/antithesis/avalanchego/Dockerfile.builder-uninstrumented @@ -0,0 +1,17 @@ +# The version is supplied as a build argument rather than hard-coded +# to minimize the cost of version changes. +ARG GO_VERSION + +FROM golang:$GO_VERSION-bullseye + +WORKDIR /build +# Copy and download avalanche dependencies using go mod +COPY go.mod . +COPY go.sum . +RUN go mod download + +# Copy the code into the container +COPY . . + +# Ensure pre-existing builds are not available for inclusion in the final image +RUN [ -d ./build ] && rm -rf ./build/* || true diff --git a/tests/antithesis/avalanchego/Dockerfile.node b/tests/antithesis/avalanchego/Dockerfile.node index 6dc1cb782cef..2b19adbb93c1 100644 --- a/tests/antithesis/avalanchego/Dockerfile.node +++ b/tests/antithesis/avalanchego/Dockerfile.node @@ -1,46 +1,11 @@ -# The version is supplied as a build argument rather than hard-coded -# to minimize the cost of version changes. -ARG GO_VERSION - -# Antithesis: Getting the Antithesis golang instrumentation library -FROM docker.io/antithesishq/go-instrumentor AS instrumentor +# TAG should identify the builder image +ARG TAG # ============= Compilation Stage ================ -FROM golang:$GO_VERSION-bullseye AS builder - -WORKDIR /build -# Copy and download avalanche dependencies using go mod -COPY go.mod . -COPY go.sum . -RUN go mod download - -# Copy the code into the container -COPY . . - -# Keep the commit hash to easily verify the exact version that is running -RUN git rev-parse HEAD > ./commit_hash.txt - -# Copy the instrumentor and supporting files to their correct locations -COPY --from=instrumentor /opt/antithesis /opt/antithesis -COPY --from=instrumentor /opt/antithesis/lib /lib - -# Create the destination output directory for the instrumented code -RUN mkdir -p /avalanchego_instrumented - -# Park the .git file in a safe location -RUN mkdir -p /opt/tmp/ -RUN cp -r .git /opt/tmp/ - -# Instrument avalanchego -RUN /opt/antithesis/bin/goinstrumentor \ - -stderrthreshold=INFO \ - -antithesis /opt/antithesis/instrumentation \ - . \ - /avalanchego_instrumented +FROM antithesis-avalanchego-builder:$TAG AS builder +# The workdir is hard-coded since this Dockerfile is only intended for instrumented builds. WORKDIR /avalanchego_instrumented/customer -RUN go mod download -RUN ln -s /opt/tmp/.git .git # Build avalanchego with race detection (-r) enabled. RUN ./scripts/build.sh -r diff --git a/tests/antithesis/avalanchego/Dockerfile.workload b/tests/antithesis/avalanchego/Dockerfile.workload index c5f31243dd9b..4cd37123773e 100644 --- a/tests/antithesis/avalanchego/Dockerfile.workload +++ b/tests/antithesis/avalanchego/Dockerfile.workload @@ -1,21 +1,16 @@ -# The version is supplied as a build argument rather than hard-coded -# to minimize the cost of version changes. -ARG GO_VERSION +# TAG should identify the builder image +ARG TAG # NODE_IMAGE needs to identify an existing node image and should include the tag ARG NODE_IMAGE # ============= Compilation Stage ================ -FROM golang:$GO_VERSION-bullseye AS builder +FROM antithesis-avalanchego-builder:$TAG AS builder -WORKDIR /build -# Copy and download avalanche dependencies using go mod -COPY go.mod . -COPY go.sum . -RUN go mod download +# The builder workdir will vary between instrumented and non-instrumented builders +ARG BUILDER_WORKDIR -# Copy the code into the container -COPY . . +WORKDIR $BUILDER_WORKDIR # Build the workload RUN ./scripts/build_antithesis_avalanchego_workload.sh @@ -24,7 +19,10 @@ RUN ./scripts/build_antithesis_avalanchego_workload.sh # Base the workflow on the node image to support bootstrap testing FROM $NODE_IMAGE AS execution +# The builder workdir will vary between instrumented and non-instrumented builders +ARG BUILDER_WORKDIR + # Copy the executable into the container -COPY --from=builder /build/build/antithesis-avalanchego-workload ./workload +COPY --from=builder $BUILDER_WORKDIR/build/antithesis-avalanchego-workload ./workload CMD [ "./workload" ] diff --git a/tests/antithesis/avalanchego/main.go b/tests/antithesis/avalanchego/main.go index 57b12a51b6c2..26a29fab749b 100644 --- a/tests/antithesis/avalanchego/main.go +++ b/tests/antithesis/avalanchego/main.go @@ -11,6 +11,9 @@ import ( "os" "time" + "github.com/antithesishq/antithesis-sdk-go/assert" + "github.com/antithesishq/antithesis-sdk-go/lifecycle" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" @@ -128,6 +131,11 @@ func main() { } } + lifecycle.SetupComplete(map[string]any{ + "msg": "initialized workers", + "numWorkers": NumKeys, + }) + for _, w := range workloads[1:] { go w.run(ctx) } @@ -168,6 +176,11 @@ func (w *workload) run(ctx context.Context) { pAVAX = pBalances[avaxAssetID] ) log.Printf("wallet starting with %d X-chain nAVAX and %d P-chain nAVAX", xAVAX, pAVAX) + assert.Reachable("wallet starting", map[string]any{ + "worker": w.id, + "xBalance": xAVAX, + "pBalance": pAVAX, + }) for { val, err := rand.Int(rand.Reader, big.NewInt(5)) @@ -212,6 +225,10 @@ func (w *workload) issueXChainBaseTx(ctx context.Context) { balances, err := xBuilder.GetFTBalance() if err != nil { log.Printf("failed to fetch X-chain balances: %s", err) + assert.Unreachable("failed to fetch X-chain balances", map[string]any{ + "worker": w.id, + "err": err, + }) return } @@ -246,6 +263,10 @@ func (w *workload) issueXChainBaseTx(ctx context.Context) { ) if err != nil { log.Printf("failed to issue X-chain baseTx: %s", err) + assert.Unreachable("failed to issue X-chain baseTx", map[string]any{ + "worker": w.id, + "err": err, + }) return } log.Printf("issued new X-chain baseTx %s in %s", baseTx.ID(), time.Since(baseStartTime)) @@ -262,6 +283,10 @@ func (w *workload) issueXChainCreateAssetTx(ctx context.Context) { balances, err := xBuilder.GetFTBalance() if err != nil { log.Printf("failed to fetch X-chain balances: %s", err) + assert.Unreachable("failed to fetch X-chain balances", map[string]any{ + "worker": w.id, + "err": err, + }) return } @@ -295,6 +320,10 @@ func (w *workload) issueXChainCreateAssetTx(ctx context.Context) { ) if err != nil { log.Printf("failed to issue X-chain create asset transaction: %s", err) + assert.Unreachable("failed to issue X-chain create asset transaction", map[string]any{ + "worker": w.id, + "err": err, + }) return } log.Printf("created new X-chain asset %s in %s", createAssetTx.ID(), time.Since(createAssetStartTime)) @@ -311,6 +340,10 @@ func (w *workload) issueXChainOperationTx(ctx context.Context) { balances, err := xBuilder.GetFTBalance() if err != nil { log.Printf("failed to fetch X-chain balances: %s", err) + assert.Unreachable("failed to fetch X-chain balances", map[string]any{ + "worker": w.id, + "err": err, + }) return } @@ -345,6 +378,10 @@ func (w *workload) issueXChainOperationTx(ctx context.Context) { ) if err != nil { log.Printf("failed to issue X-chain create asset transaction: %s", err) + assert.Unreachable("failed to issue X-chain create asset transaction", map[string]any{ + "worker": w.id, + "err": err, + }) return } log.Printf("created new X-chain asset %s in %s", createAssetTx.ID(), time.Since(createAssetStartTime)) @@ -356,6 +393,10 @@ func (w *workload) issueXChainOperationTx(ctx context.Context) { ) if err != nil { log.Printf("failed to issue X-chain operation transaction: %s", err) + assert.Unreachable("failed to issue X-chain operation transaction", map[string]any{ + "worker": w.id, + "err": err, + }) return } log.Printf("issued X-chain operation tx %s in %s", operationTx.ID(), time.Since(operationStartTime)) @@ -375,6 +416,10 @@ func (w *workload) issueXToPTransfer(ctx context.Context) { balances, err := xBuilder.GetFTBalance() if err != nil { log.Printf("failed to fetch X-chain balances: %s", err) + assert.Unreachable("failed to fetch X-chain balances", map[string]any{ + "worker": w.id, + "err": err, + }) return } @@ -411,6 +456,10 @@ func (w *workload) issueXToPTransfer(ctx context.Context) { ) if err != nil { log.Printf("failed to issue X-chain export transaction: %s", err) + assert.Unreachable("failed to issue X-chain export transaction", map[string]any{ + "worker": w.id, + "err": err, + }) return } log.Printf("created X-chain export transaction %s in %s", exportTx.ID(), time.Since(exportStartTime)) @@ -425,6 +474,10 @@ func (w *workload) issueXToPTransfer(ctx context.Context) { ) if err != nil { log.Printf("failed to issue P-chain import transaction: %s", err) + assert.Unreachable("failed to issue P-chain import transaction", map[string]any{ + "worker": w.id, + "err": err, + }) return } log.Printf("created P-chain import transaction %s in %s", importTx.ID(), time.Since(importStartTime)) @@ -445,6 +498,10 @@ func (w *workload) issuePToXTransfer(ctx context.Context) { balances, err := pBuilder.GetBalance() if err != nil { log.Printf("failed to fetch P-chain balances: %s", err) + assert.Unreachable("failed to fetch P-chain balances", map[string]any{ + "worker": w.id, + "err": err, + }) return } @@ -481,6 +538,10 @@ func (w *workload) issuePToXTransfer(ctx context.Context) { ) if err != nil { log.Printf("failed to issue P-chain export transaction: %s", err) + assert.Unreachable("failed to issue P-chain export transaction", map[string]any{ + "worker": w.id, + "err": err, + }) return } log.Printf("created P-chain export transaction %s in %s", exportTx.ID(), time.Since(exportStartTime)) @@ -492,6 +553,10 @@ func (w *workload) issuePToXTransfer(ctx context.Context) { ) if err != nil { log.Printf("failed to issue X-chain import transaction: %s", err) + assert.Unreachable("failed to issue X-chain import transaction", map[string]any{ + "worker": w.id, + "err": err, + }) return } log.Printf("created X-chain import transaction %s in %s", importTx.ID(), time.Since(importStartTime)) @@ -519,10 +584,22 @@ func (w *workload) confirmXChainTx(ctx context.Context, tx *xtxs.Tx) { status, err := client.ConfirmTx(ctx, txID, 100*time.Millisecond) if err != nil { log.Printf("failed to confirm X-chain transaction %s on %s: %s", txID, uri, err) + assert.Unreachable("failed to determine the status of an X-chain transaction", map[string]any{ + "worker": w.id, + "txID": txID, + "uri": uri, + "err": err, + }) return } if status != choices.Accepted { log.Printf("failed to confirm X-chain transaction %s on %s: status == %s", txID, uri, status) + assert.Unreachable("failed to confirm an X-chain transaction", map[string]any{ + "worker": w.id, + "txID": txID, + "uri": uri, + "status": status, + }) return } log.Printf("confirmed X-chain transaction %s on %s", txID, uri) @@ -536,11 +613,23 @@ func (w *workload) confirmPChainTx(ctx context.Context, tx *ptxs.Tx) { client := platformvm.NewClient(uri) s, err := client.AwaitTxDecided(ctx, txID, 100*time.Millisecond) if err != nil { - log.Printf("failed to confirm P-chain transaction %s on %s: %s", txID, uri, err) + log.Printf("failed to determine the status of a P-chain transaction %s on %s: %s", txID, uri, err) + assert.Unreachable("failed to determine the status of a P-chain transaction", map[string]any{ + "worker": w.id, + "txID": txID, + "uri": uri, + "err": err, + }) return } if s.Status != status.Committed { log.Printf("failed to confirm P-chain transaction %s on %s: status == %s", txID, uri, s.Status) + assert.Unreachable("failed to confirm a P-chain transaction", map[string]any{ + "worker": w.id, + "txID": txID, + "uri": uri, + "status": s.Status, + }) return } log.Printf("confirmed P-chain transaction %s on %s", txID, uri) @@ -566,6 +655,12 @@ func (w *workload) verifyXChainTxConsumedUTXOs(ctx context.Context, tx *xtxs.Tx) ) if err != nil { log.Printf("failed to fetch X-chain UTXOs on %s: %s", uri, err) + assert.Unreachable("failed to fetch X-chain UTXOs", map[string]any{ + "worker": w.id, + "txID": txID, + "uri": uri, + "err": err, + }) return } @@ -574,6 +669,13 @@ func (w *workload) verifyXChainTxConsumedUTXOs(ctx context.Context, tx *xtxs.Tx) _, err := utxos.GetUTXO(ctx, chainID, chainID, input) if err != database.ErrNotFound { log.Printf("failed to verify that X-chain UTXO %s was deleted on %s after %s", input, uri, txID) + assert.Unreachable("failed to verify that X-chain UTXO was deleted", map[string]any{ + "worker": w.id, + "uri": uri, + "txID": txID, + "utxoID": input, + "err": err, + }) return } } @@ -599,6 +701,12 @@ func (w *workload) verifyPChainTxConsumedUTXOs(ctx context.Context, tx *ptxs.Tx) ) if err != nil { log.Printf("failed to fetch P-chain UTXOs on %s: %s", uri, err) + assert.Unreachable("failed to fetch P-chain UTXOs", map[string]any{ + "worker": w.id, + "uri": uri, + "txID": txID, + "err": err, + }) return } @@ -607,6 +715,13 @@ func (w *workload) verifyPChainTxConsumedUTXOs(ctx context.Context, tx *ptxs.Tx) _, err := utxos.GetUTXO(ctx, constants.PlatformChainID, constants.PlatformChainID, input) if err != database.ErrNotFound { log.Printf("failed to verify that P-chain UTXO %s was deleted on %s after %s", input, uri, txID) + assert.Unreachable("failed to verify that P-chain UTXO was deleted", map[string]any{ + "worker": w.id, + "uri": uri, + "txID": txID, + "utxoID": input, + "err": err, + }) return } } diff --git a/tests/antithesis/xsvm/Dockerfile.node b/tests/antithesis/xsvm/Dockerfile.node index 1d8d673026d8..67a1aa01fca9 100644 --- a/tests/antithesis/xsvm/Dockerfile.node +++ b/tests/antithesis/xsvm/Dockerfile.node @@ -1,49 +1,16 @@ -# The version is supplied as a build argument rather than hard-coded -# to minimize the cost of version changes. -ARG GO_VERSION +# TAG should identify the builder image +ARG TAG # AVALANCHEGO_NODE_IMAGE needs to identify an existing avalanchego node image and should include the tag ARG AVALANCHEGO_NODE_IMAGE -# Antithesis: Getting the Antithesis golang instrumentation library -FROM docker.io/antithesishq/go-instrumentor AS instrumentor - # ============= Compilation Stage ================ -FROM golang:$GO_VERSION-bullseye AS builder - -WORKDIR /build -# Copy and download avalanche dependencies using go mod -COPY go.mod . -COPY go.sum . -RUN go mod download - -# Copy the code into the container -COPY . . - -# Keep the commit hash to easily verify the exact version that is running -RUN git rev-parse HEAD > ./commit_hash.txt +FROM antithesis-avalanchego-builder:$TAG AS builder -# Copy the instrumentor and supporting files to their correct locations -COPY --from=instrumentor /opt/antithesis /opt/antithesis -COPY --from=instrumentor /opt/antithesis/lib /lib +# The builder workdir will vary between instrumented and non-instrumented builders +ARG BUILDER_WORKDIR -# Create the destination output directory for the instrumented code -RUN mkdir -p /avalanchego_instrumented - -# Park the .git file in a safe location -RUN mkdir -p /opt/tmp/ -RUN cp -r .git /opt/tmp/ - -# Instrument avalanchego -RUN /opt/antithesis/bin/goinstrumentor \ - -stderrthreshold=INFO \ - -antithesis /opt/antithesis/instrumentation \ - . \ - /avalanchego_instrumented - -WORKDIR /avalanchego_instrumented/customer -RUN go mod download -RUN ln -s /opt/tmp/.git .git +WORKDIR $BUILDER_WORKDIR # Build xsvm VM RUN ./scripts/build_xsvm.sh @@ -53,9 +20,12 @@ FROM $AVALANCHEGO_NODE_IMAGE AS execution # The commit hash and antithesis dependencies should be part of the base image. +# The builder workdir will vary between instrumented and non-instrumented builders +ARG BUILDER_WORKDIR + # Copy the executable into the container RUN mkdir -p /root/.avalanchego/plugins -COPY --from=builder /avalanchego_instrumented/customer/build/xsvm \ +COPY --from=builder $BUILDER_WORKDIR/build/xsvm \ /root/.avalanchego/plugins/v3m4wPxaHpvGr8qfMeyK6PRW3idZrPHmYcMTt7oXdK47yurVH # The node image's entrypoint will be reused. diff --git a/tests/antithesis/xsvm/Dockerfile.workload b/tests/antithesis/xsvm/Dockerfile.workload index f9da9009fb05..1ca2f56b862a 100644 --- a/tests/antithesis/xsvm/Dockerfile.workload +++ b/tests/antithesis/xsvm/Dockerfile.workload @@ -1,21 +1,16 @@ -# The version is supplied as a build argument rather than hard-coded -# to minimize the cost of version changes. -ARG GO_VERSION +# TAG should identify the builder image +ARG TAG # NODE_IMAGE needs to identify an existing node image and should include the tag ARG NODE_IMAGE # ============= Compilation Stage ================ -FROM golang:$GO_VERSION-bullseye AS builder +FROM antithesis-avalanchego-builder:$TAG AS builder -WORKDIR /build -# Copy and download avalanche dependencies using go mod -COPY go.mod . -COPY go.sum . -RUN go mod download +# The builder workdir will vary between instrumented and non-instrumented builders +ARG BUILDER_WORKDIR -# Copy the code into the container -COPY . . +WORKDIR $BUILDER_WORKDIR # Build the workload RUN ./scripts/build_antithesis_xsvm_workload.sh @@ -24,7 +19,10 @@ RUN ./scripts/build_antithesis_xsvm_workload.sh # Base the workflow on the node image to support bootstrap testing FROM $NODE_IMAGE AS execution +# The builder workdir will vary between instrumented and non-instrumented builders +ARG BUILDER_WORKDIR + # Copy the executable into the container -COPY --from=builder /build/build/antithesis-xsvm-workload ./workload +COPY --from=builder $BUILDER_WORKDIR/build/antithesis-xsvm-workload ./workload CMD [ "./workload" ] diff --git a/tests/antithesis/xsvm/main.go b/tests/antithesis/xsvm/main.go index 84371752d4ef..ecda411fc5ad 100644 --- a/tests/antithesis/xsvm/main.go +++ b/tests/antithesis/xsvm/main.go @@ -11,6 +11,9 @@ import ( "os" "time" + "github.com/antithesishq/antithesis-sdk-go/assert" + "github.com/antithesishq/antithesis-sdk-go/lifecycle" + "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests/antithesis" @@ -92,6 +95,11 @@ func main() { } } + lifecycle.SetupComplete(map[string]any{ + "msg": "initialized workers", + "numWorkers": NumKeys, + }) + for _, w := range workloads[1:] { go w.run(ctx) } @@ -120,6 +128,10 @@ func (w *workload) run(ctx context.Context) { log.Fatalf("failed to fetch balance: %s", err) } log.Printf("worker %d starting with a balance of %d", w.id, balance) + assert.Reachable("worker starting", map[string]any{ + "worker": w.id, + "balance": balance, + }) for { log.Printf("worker %d executing transfer", w.id) @@ -161,6 +173,12 @@ func (w *workload) confirmTransferTx(ctx context.Context, tx *status.TxIssuance) client := api.NewClient(uri, w.chainID.String()) if err := api.WaitForAcceptance(ctx, client, w.key.Address(), tx.Nonce); err != nil { log.Printf("worker %d failed to confirm transaction %s on %s: %s", w.id, tx.TxID, uri, err) + assert.Unreachable("failed to confirm transaction", map[string]any{ + "worker": w.id, + "txID": tx.TxID, + "uri": uri, + "err": err, + }) return } } From 349b25a549827feb3cd0cc18d0e8fa53ec3cc579 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 30 May 2024 16:57:35 -0400 Subject: [PATCH 030/102] Add pebbledb to docs (#3061) --- config/config.md | 2 +- config/flags.go | 4 ++-- database/{pebble => pebbledb}/batch.go | 2 +- database/{pebble => pebbledb}/batch_test.go | 2 +- database/{pebble => pebbledb}/db.go | 4 ++-- database/{pebble => pebbledb}/db_test.go | 2 +- database/{pebble => pebbledb}/iterator.go | 2 +- node/node.go | 14 +++++++------- 8 files changed, 16 insertions(+), 16 deletions(-) rename database/{pebble => pebbledb}/batch.go (99%) rename database/{pebble => pebbledb}/batch_test.go (98%) rename database/{pebble => pebbledb}/db.go (99%) rename database/{pebble => pebbledb}/db_test.go (99%) rename database/{pebble => pebbledb}/iterator.go (99%) diff --git a/config/config.md b/config/config.md index e21c65aea195..8eb419024292 100644 --- a/config/config.md +++ b/config/config.md @@ -272,7 +272,7 @@ Specifies the directory to which the database is persisted. Defaults to `"$HOME/ ##### `--db-type` (string) -Specifies the type of database to use. Must be one of `LevelDB` or `memdb`. +Specifies the type of database to use. Must be one of `leveldb`, `memdb`, or `pebbledb`. `memdb` is an in-memory, non-persisted database. :::note diff --git a/config/flags.go b/config/flags.go index 3dae87125ee6..661eb9e84e66 100644 --- a/config/flags.go +++ b/config/flags.go @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/avalanchego/database/leveldb" "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/pebble" + "github.com/ava-labs/avalanchego/database/pebbledb" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/trace" @@ -109,7 +109,7 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Uint64(AddSubnetDelegatorFeeKey, genesis.LocalParams.AddSubnetDelegatorFee, "Transaction fee, in nAVAX, for transactions that add new subnet delegators") // Database - fs.String(DBTypeKey, leveldb.Name, fmt.Sprintf("Database type to use. Must be one of {%s, %s, %s}", leveldb.Name, memdb.Name, pebble.Name)) + fs.String(DBTypeKey, leveldb.Name, fmt.Sprintf("Database type to use. Must be one of {%s, %s, %s}", leveldb.Name, memdb.Name, pebbledb.Name)) fs.Bool(DBReadOnlyKey, false, "If true, database writes are to memory and never persisted. May still initialize database directory/files on disk if they don't exist") fs.String(DBPathKey, defaultDBDir, "Path to database directory") fs.String(DBConfigFileKey, "", fmt.Sprintf("Path to database config file. Ignored if %s is specified", DBConfigContentKey)) diff --git a/database/pebble/batch.go b/database/pebbledb/batch.go similarity index 99% rename from database/pebble/batch.go rename to database/pebbledb/batch.go index 8778a9473960..a14666749bc9 100644 --- a/database/pebble/batch.go +++ b/database/pebbledb/batch.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package pebble +package pebbledb import ( "fmt" diff --git a/database/pebble/batch_test.go b/database/pebbledb/batch_test.go similarity index 98% rename from database/pebble/batch_test.go rename to database/pebbledb/batch_test.go index 4fcc537d1e84..3a0ad63b7e43 100644 --- a/database/pebble/batch_test.go +++ b/database/pebbledb/batch_test.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package pebble +package pebbledb import ( "testing" diff --git a/database/pebble/db.go b/database/pebbledb/db.go similarity index 99% rename from database/pebble/db.go rename to database/pebbledb/db.go index 0acb10d12c5c..ca048ec5f636 100644 --- a/database/pebble/db.go +++ b/database/pebbledb/db.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package pebble +package pebbledb import ( "context" @@ -21,7 +21,7 @@ import ( ) const ( - Name = "pebble" + Name = "pebbledb" // pebbleByteOverHead is the number of bytes of constant overhead that // should be added to a batch size per operation. diff --git a/database/pebble/db_test.go b/database/pebbledb/db_test.go similarity index 99% rename from database/pebble/db_test.go rename to database/pebbledb/db_test.go index 3b37d9362d92..506221dce5cd 100644 --- a/database/pebble/db_test.go +++ b/database/pebbledb/db_test.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package pebble +package pebbledb import ( "fmt" diff --git a/database/pebble/iterator.go b/database/pebbledb/iterator.go similarity index 99% rename from database/pebble/iterator.go rename to database/pebbledb/iterator.go index 40654dc41d98..5e9786a318c8 100644 --- a/database/pebble/iterator.go +++ b/database/pebbledb/iterator.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package pebble +package pebbledb import ( "errors" diff --git a/node/node.go b/node/node.go index 3138f00c1c38..2103762f658a 100644 --- a/node/node.go +++ b/node/node.go @@ -36,7 +36,7 @@ import ( "github.com/ava-labs/avalanchego/database/leveldb" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/meterdb" - "github.com/ava-labs/avalanchego/database/pebble" + "github.com/ava-labs/avalanchego/database/pebbledb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/genesis" @@ -729,16 +729,16 @@ func (n *Node) initDatabase() error { var err error n.DB, err = leveldb.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, "db_internal", n.MetricsRegisterer) if err != nil { - return fmt.Errorf("couldn't create leveldb at %s: %w", dbPath, err) + return fmt.Errorf("couldn't create %s at %s: %w", leveldb.Name, dbPath, err) } case memdb.Name: n.DB = memdb.New() - case pebble.Name: - dbPath := filepath.Join(n.Config.DatabaseConfig.Path, pebble.Name) + case pebbledb.Name: + dbPath := filepath.Join(n.Config.DatabaseConfig.Path, "pebble") var err error - n.DB, err = pebble.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, "db_internal", n.MetricsRegisterer) + n.DB, err = pebbledb.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, "db_internal", n.MetricsRegisterer) if err != nil { - return fmt.Errorf("couldn't create pebbledb at %s: %w", dbPath, err) + return fmt.Errorf("couldn't create %s at %s: %w", pebbledb.Name, dbPath, err) } default: return fmt.Errorf( @@ -746,7 +746,7 @@ func (n *Node) initDatabase() error { n.Config.DatabaseConfig.Name, leveldb.Name, memdb.Name, - pebble.Name, + pebbledb.Name, ) } From 928d484f3bba95c959efeb81faff7884ab5713ad Mon Sep 17 00:00:00 2001 From: marun Date: Fri, 31 May 2024 15:57:45 +0200 Subject: [PATCH 031/102] [ci] Remove perpetually failing govulncheck job (#3069) --- .github/workflows/ci.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9c6ed3123212..223fe2866d85 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -285,13 +285,3 @@ jobs: run: bash -x scripts/tests.build_antithesis_images.sh env: TEST_SETUP: xsvm - govulncheck: - runs-on: ubuntu-latest - name: govulncheck - steps: - - uses: actions/checkout@v4 - - uses: ./.github/actions/set-go-version-in-env - - id: govulncheck - uses: golang/govulncheck-action@v1 - with: - go-version-input: ${{ env.GO_VERSION }} From b45e136382c29152d47ef08380a44a4e89b792a0 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 31 May 2024 10:01:49 -0400 Subject: [PATCH 032/102] Remove api namespace (#3066) --- api/metrics/multi_gatherer.go | 8 ++++++++ api/server/metrics.go | 17 +++++++---------- api/server/server.go | 3 +-- node/node.go | 14 ++++++++++++-- utils/metric/namespace.go | 7 ++++++- 5 files changed, 34 insertions(+), 15 deletions(-) diff --git a/api/metrics/multi_gatherer.go b/api/metrics/multi_gatherer.go index 4bd0900a0227..d8d4d93d2d76 100644 --- a/api/metrics/multi_gatherer.go +++ b/api/metrics/multi_gatherer.go @@ -93,3 +93,11 @@ func sortMetrics(m []*dto.MetricFamily) { return cmp.Compare(*i.Name, *j.Name) }) } + +func MakeAndRegister(gatherer MultiGatherer, name string) (*prometheus.Registry, error) { + reg := prometheus.NewRegistry() + if err := gatherer.Register(name, reg); err != nil { + return nil, fmt.Errorf("couldn't register %q metrics: %w", name, err) + } + return reg, nil +} diff --git a/api/server/metrics.go b/api/server/metrics.go index e3b2d76c83ea..9734f36eeaa1 100644 --- a/api/server/metrics.go +++ b/api/server/metrics.go @@ -18,29 +18,26 @@ type metrics struct { totalDuration *prometheus.GaugeVec } -func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, error) { +func newMetrics(registerer prometheus.Registerer) (*metrics, error) { m := &metrics{ numProcessing: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "calls_processing", - Help: "The number of calls this API is currently processing", + Name: "calls_processing", + Help: "The number of calls this API is currently processing", }, []string{"base"}, ), numCalls: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "calls", - Help: "The number of calls this API has processed", + Name: "calls", + Help: "The number of calls this API has processed", }, []string{"base"}, ), totalDuration: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "calls_duration", - Help: "The total amount of time, in nanoseconds, spent handling API calls", + Name: "calls_duration", + Help: "The total amount of time, in nanoseconds, spent handling API calls", }, []string{"base"}, ), diff --git a/api/server/server.go b/api/server/server.go index cd712ec88b98..8af570d09bdd 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -108,12 +108,11 @@ func New( nodeID ids.NodeID, tracingEnabled bool, tracer trace.Tracer, - namespace string, registerer prometheus.Registerer, httpConfig HTTPConfig, allowedHosts []string, ) (Server, error) { - m, err := newMetrics(namespace, registerer) + m, err := newMetrics(registerer) if err != nil { return nil, err } diff --git a/node/node.go b/node/node.go index 2103762f658a..5445c626d1d6 100644 --- a/node/node.go +++ b/node/node.go @@ -66,6 +66,7 @@ import ( "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/resource" @@ -89,6 +90,8 @@ const ( httpPortName = constants.AppName + "-http" ipResolutionTimeout = 30 * time.Second + + apiNamespace = constants.PlatformName + metric.NamespaceSeparator + "api" ) var ( @@ -967,6 +970,14 @@ func (n *Node) initAPIServer() error { } n.apiURI = fmt.Sprintf("%s://%s", protocol, listener.Addr()) + apiRegisterer, err := metrics.MakeAndRegister( + n.MetricsGatherer, + apiNamespace, + ) + if err != nil { + return err + } + n.APIServer, err = server.New( n.Log, n.LogFactory, @@ -976,8 +987,7 @@ func (n *Node) initAPIServer() error { n.ID, n.Config.TraceConfig.Enabled, n.tracer, - "api", - n.MetricsRegisterer, + apiRegisterer, n.Config.HTTPConfig.HTTPConfig, n.Config.HTTPAllowedHosts, ) diff --git a/utils/metric/namespace.go b/utils/metric/namespace.go index 4371bb1dc077..8d80a86266f4 100644 --- a/utils/metric/namespace.go +++ b/utils/metric/namespace.go @@ -5,6 +5,11 @@ package metric import "strings" +const ( + NamespaceSeparatorByte = '_' + NamespaceSeparator = string(NamespaceSeparatorByte) +) + func AppendNamespace(prefix, suffix string) string { switch { case len(prefix) == 0: @@ -12,6 +17,6 @@ func AppendNamespace(prefix, suffix string) string { case len(suffix) == 0: return prefix default: - return strings.Join([]string{prefix, suffix}, "_") + return strings.Join([]string{prefix, suffix}, NamespaceSeparator) } } From e8c43f554e4e01354e728df8ae2a28b9916a6e3a Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 31 May 2024 10:10:03 -0400 Subject: [PATCH 033/102] Remove unused metrics namespaces (#3062) --- chains/manager.go | 4 +- snow/consensus/snowman/metrics.go | 68 +++++---------- snow/consensus/snowman/poll/set.go | 8 +- snow/consensus/snowman/poll/set_test.go | 30 +++---- snow/consensus/snowman/topological.go | 1 - .../avalanche/bootstrap/bootstrapper.go | 2 +- snow/engine/avalanche/bootstrap/metrics.go | 25 ++---- snow/engine/avalanche/getter/getter.go | 4 +- snow/engine/common/tracker/peers.go | 17 ++-- snow/engine/snowman/bootstrap/bootstrapper.go | 2 +- snow/engine/snowman/bootstrap/metrics.go | 12 ++- snow/engine/snowman/getter/getter.go | 4 +- snow/engine/snowman/metrics.go | 83 ++++++++----------- snow/engine/snowman/transitive.go | 3 +- snow/engine/snowman/transitive_test.go | 2 +- snow/networking/sender/sender.go | 5 +- utils/metric/api_interceptor.go | 15 ++-- vms/avm/block/builder/builder_test.go | 2 +- vms/avm/metrics/metrics.go | 24 ++---- vms/avm/metrics/tx_metrics.go | 10 +-- vms/avm/vm.go | 2 +- vms/metervm/block_metrics.go | 55 ++++++------ vms/metervm/block_vm.go | 1 - vms/metervm/metrics.go | 4 +- vms/metervm/vertex_metrics.go | 17 ++-- vms/metervm/vertex_vm.go | 2 +- vms/platformvm/block/builder/helpers_test.go | 2 +- vms/platformvm/metrics/block_metrics.go | 12 +-- vms/platformvm/metrics/metrics.go | 49 +++++------ vms/platformvm/metrics/tx_metrics.go | 10 +-- .../validators/manager_benchmark_test.go | 2 +- vms/platformvm/vm.go | 2 +- 32 files changed, 188 insertions(+), 291 deletions(-) diff --git a/chains/manager.go b/chains/manager.go index c5b79dd470e1..7fee70b8f816 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -753,7 +753,7 @@ func (m *manager) createAvalancheChain( sampleK = int(bootstrapWeight) } - connectedValidators, err := tracker.NewMeteredPeers("", ctx.Registerer) + connectedValidators, err := tracker.NewMeteredPeers(ctx.Registerer) if err != nil { return nil, fmt.Errorf("error creating peer tracker: %w", err) } @@ -1098,7 +1098,7 @@ func (m *manager) createSnowmanChain( sampleK = int(bootstrapWeight) } - connectedValidators, err := tracker.NewMeteredPeers("", ctx.Registerer) + connectedValidators, err := tracker.NewMeteredPeers(ctx.Registerer) if err != nil { return nil, fmt.Errorf("error creating peer tracker: %w", err) } diff --git a/snow/consensus/snowman/metrics.go b/snow/consensus/snowman/metrics.go index 6b48e868aaab..1db1bdbc1c53 100644 --- a/snow/consensus/snowman/metrics.go +++ b/snow/consensus/snowman/metrics.go @@ -65,7 +65,6 @@ type metrics struct { func newMetrics( log logging.Logger, - namespace string, reg prometheus.Registerer, lastAcceptedHeight uint64, lastAcceptedTime time.Time, @@ -75,82 +74,61 @@ func newMetrics( log: log, currentMaxVerifiedHeight: lastAcceptedHeight, maxVerifiedHeight: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "max_verified_height", - Help: "highest verified height", + Name: "max_verified_height", + Help: "highest verified height", }), lastAcceptedHeight: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "last_accepted_height", - Help: "last height accepted", + Name: "last_accepted_height", + Help: "last height accepted", }), lastAcceptedTimestamp: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "last_accepted_timestamp", - Help: "timestamp of the last accepted block in unix seconds", + Name: "last_accepted_timestamp", + Help: "timestamp of the last accepted block in unix seconds", }), processingBlocks: linked.NewHashmap[ids.ID, processingStart](), - // e.g., - // "avalanche_X_blks_processing" reports how many blocks are currently processing numProcessing: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blks_processing", - Help: "number of currently processing blocks", + Name: "blks_processing", + Help: "number of currently processing blocks", }), blockSizeAcceptedSum: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blks_accepted_container_size_sum", - Help: "cumulative size of all accepted blocks", + Name: "blks_accepted_container_size_sum", + Help: "cumulative size of all accepted blocks", }), pollsAccepted: metric.NewAveragerWithErrs( - namespace, + "", "blks_polls_accepted", "number of polls from the issuance of a block to its acceptance", reg, &errs, ), - // e.g., - // "avalanche_C_blks_accepted_count" reports how many times "Observe" has been called which is the total number of blocks accepted - // "avalanche_C_blks_accepted_sum" reports the cumulative sum of all block acceptance latencies in nanoseconds - // "avalanche_C_blks_accepted_sum / avalanche_C_blks_accepted_count" is the average block acceptance latency in nanoseconds - // "avalanche_C_blks_accepted_container_size_sum" reports the cumulative sum of all accepted blocks' sizes in bytes - // "avalanche_C_blks_accepted_container_size_sum / avalanche_C_blks_accepted_count" is the average accepted block size in bytes latAccepted: metric.NewAveragerWithErrs( - namespace, + "", "blks_accepted", "time (in ns) from the issuance of a block to its acceptance", reg, &errs, ), buildLatencyAccepted: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blks_build_accept_latency", - Help: "time (in ns) from the timestamp of a block to the time it was accepted", + Name: "blks_build_accept_latency", + Help: "time (in ns) from the timestamp of a block to the time it was accepted", }), blockSizeRejectedSum: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blks_rejected_container_size_sum", - Help: "cumulative size of all rejected blocks", + Name: "blks_rejected_container_size_sum", + Help: "cumulative size of all rejected blocks", }), pollsRejected: metric.NewAveragerWithErrs( - namespace, + "", "blks_polls_rejected", "number of polls from the issuance of a block to its rejection", reg, &errs, ), - // e.g., - // "avalanche_P_blks_rejected_count" reports how many times "Observe" has been called which is the total number of blocks rejected - // "avalanche_P_blks_rejected_sum" reports the cumulative sum of all block rejection latencies in nanoseconds - // "avalanche_P_blks_rejected_sum / avalanche_P_blks_rejected_count" is the average block rejection latency in nanoseconds - // "avalanche_P_blks_rejected_container_size_sum" reports the cumulative sum of all rejected blocks' sizes in bytes - // "avalanche_P_blks_rejected_container_size_sum / avalanche_P_blks_rejected_count" is the average rejected block size in bytes latRejected: metric.NewAveragerWithErrs( - namespace, + "", "blks_rejected", "time (in ns) from the issuance of a block to its rejection", reg, @@ -158,14 +136,12 @@ func newMetrics( ), numSuccessfulPolls: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "polls_successful", - Help: "number of successful polls", + Name: "polls_successful", + Help: "number of successful polls", }), numFailedPolls: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "polls_failed", - Help: "number of failed polls", + Name: "polls_failed", + Help: "number of failed polls", }), } diff --git a/snow/consensus/snowman/poll/set.go b/snow/consensus/snowman/poll/set.go index 87a751584c74..7ef519ea7f50 100644 --- a/snow/consensus/snowman/poll/set.go +++ b/snow/consensus/snowman/poll/set.go @@ -55,20 +55,18 @@ type set struct { func NewSet( factory Factory, log logging.Logger, - namespace string, reg prometheus.Registerer, ) (Set, error) { numPolls := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "polls", - Help: "Number of pending network polls", + Name: "polls", + Help: "Number of pending network polls", }) if err := reg.Register(numPolls); err != nil { return nil, fmt.Errorf("%w: %w", errFailedPollsMetric, err) } durPolls, err := metric.NewAverager( - namespace, + "", "poll_duration", "time (in ns) this poll took to complete", reg, diff --git a/snow/consensus/snowman/poll/set_test.go b/snow/consensus/snowman/poll/set_test.go index 0717242060d9..97166e0e9379 100644 --- a/snow/consensus/snowman/poll/set_test.go +++ b/snow/consensus/snowman/poll/set_test.go @@ -32,15 +32,13 @@ func TestNewSetErrorOnPollsMetrics(t *testing.T) { factory := NewEarlyTermNoTraversalFactory(1, 1) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() require.NoError(registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "polls", + Name: "polls", }))) - _, err := NewSet(factory, log, namespace, registerer) + _, err := NewSet(factory, log, registerer) require.ErrorIs(err, errFailedPollsMetric) } @@ -49,15 +47,13 @@ func TestNewSetErrorOnPollDurationMetrics(t *testing.T) { factory := NewEarlyTermNoTraversalFactory(1, 1) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() require.NoError(registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "poll_duration_count", + Name: "poll_duration_count", }))) - _, err := NewSet(factory, log, namespace, registerer) + _, err := NewSet(factory, log, registerer) require.ErrorIs(err, errFailedPollDurationMetrics) } @@ -69,9 +65,8 @@ func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { factory := NewEarlyTermNoTraversalFactory(alpha, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() - s, err := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, registerer) require.NoError(err) // create two polls for the two blocks @@ -106,9 +101,8 @@ func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { factory := NewEarlyTermNoTraversalFactory(alpha, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() - s, err := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, registerer) require.NoError(err) // create two polls for the two blocks @@ -143,9 +137,8 @@ func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { factory := NewEarlyTermNoTraversalFactory(alpha, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() - s, err := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, registerer) require.NoError(err) // create three polls for the two blocks @@ -188,9 +181,8 @@ func TestCreateAndFinishSuccessfulPoll(t *testing.T) { factory := NewEarlyTermNoTraversalFactory(alpha, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() - s, err := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, registerer) require.NoError(err) require.Zero(s.Len()) @@ -221,9 +213,8 @@ func TestCreateAndFinishFailedPoll(t *testing.T) { factory := NewEarlyTermNoTraversalFactory(alpha, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() - s, err := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, registerer) require.NoError(err) require.Zero(s.Len()) @@ -251,9 +242,8 @@ func TestSetString(t *testing.T) { factory := NewEarlyTermNoTraversalFactory(alpha, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() - s, err := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, registerer) require.NoError(err) expected := `current polls: (Size = 1) diff --git a/snow/consensus/snowman/topological.go b/snow/consensus/snowman/topological.go index 0eb8b55c1f6f..f2ef015654c7 100644 --- a/snow/consensus/snowman/topological.go +++ b/snow/consensus/snowman/topological.go @@ -111,7 +111,6 @@ func (ts *Topological) Initialize( ts.metrics, err = newMetrics( ctx.Log, - "", ctx.Registerer, lastAcceptedHeight, lastAcceptedTime, diff --git a/snow/engine/avalanche/bootstrap/bootstrapper.go b/snow/engine/avalanche/bootstrap/bootstrapper.go index b79c9c8cb5ff..55e3307e9337 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper.go @@ -66,7 +66,7 @@ func New( processedCache: &cache.LRU[ids.ID, struct{}]{Size: cacheSize}, onFinished: onFinished, } - return b, b.metrics.Initialize("bs", config.Ctx.AvalancheRegisterer) + return b, b.metrics.Initialize(config.Ctx.AvalancheRegisterer) } // Note: To align with the Snowman invariant, it should be guaranteed the VM is diff --git a/snow/engine/avalanche/bootstrap/metrics.go b/snow/engine/avalanche/bootstrap/metrics.go index 5ad1b3713647..fdf68f5ecff2 100644 --- a/snow/engine/avalanche/bootstrap/metrics.go +++ b/snow/engine/avalanche/bootstrap/metrics.go @@ -14,30 +14,23 @@ type metrics struct { numFetchedTxs, numAcceptedTxs prometheus.Counter } -func (m *metrics) Initialize( - namespace string, - registerer prometheus.Registerer, -) error { +func (m *metrics) Initialize(registerer prometheus.Registerer) error { m.numFetchedVts = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "fetched_vts", - Help: "Number of vertices fetched during bootstrapping", + Name: "bs_fetched_vts", + Help: "Number of vertices fetched during bootstrapping", }) m.numAcceptedVts = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "accepted_vts", - Help: "Number of vertices accepted during bootstrapping", + Name: "bs_accepted_vts", + Help: "Number of vertices accepted during bootstrapping", }) m.numFetchedTxs = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "fetched_txs", - Help: "Number of transactions fetched during bootstrapping", + Name: "bs_fetched_txs", + Help: "Number of transactions fetched during bootstrapping", }) m.numAcceptedTxs = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "accepted_txs", - Help: "Number of transactions accepted during bootstrapping", + Name: "bs_accepted_txs", + Help: "Number of transactions accepted during bootstrapping", }) return utils.Err( diff --git a/snow/engine/avalanche/getter/getter.go b/snow/engine/avalanche/getter/getter.go index 796cade92fac..6866e7a54ba0 100644 --- a/snow/engine/avalanche/getter/getter.go +++ b/snow/engine/avalanche/getter/getter.go @@ -44,8 +44,8 @@ func New( var err error gh.getAncestorsVtxs, err = metric.NewAverager( - "bs", - "get_ancestors_vtxs", + "", + "bs_get_ancestors_vtxs", "vertices fetched in a call to GetAncestors", reg, ) diff --git a/snow/engine/common/tracker/peers.go b/snow/engine/common/tracker/peers.go index 1e76d42f4268..94ed46764785 100644 --- a/snow/engine/common/tracker/peers.go +++ b/snow/engine/common/tracker/peers.go @@ -113,21 +113,18 @@ type meteredPeers struct { totalWeight prometheus.Gauge } -func NewMeteredPeers(namespace string, reg prometheus.Registerer) (Peers, error) { +func NewMeteredPeers(reg prometheus.Registerer) (Peers, error) { percentConnected := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "percent_connected", - Help: "Percent of connected stake", + Name: "percent_connected", + Help: "Percent of connected stake", }) totalWeight := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "total_weight", - Help: "Total stake", + Name: "total_weight", + Help: "Total stake", }) numValidators := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "num_validators", - Help: "Total number of validators", + Name: "num_validators", + Help: "Total number of validators", }) err := utils.Err( reg.Register(percentConnected), diff --git a/snow/engine/snowman/bootstrap/bootstrapper.go b/snow/engine/snowman/bootstrap/bootstrapper.go index 095ba4e63b17..6b8462f83f63 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/snow/engine/snowman/bootstrap/bootstrapper.go @@ -116,7 +116,7 @@ type Bootstrapper struct { } func New(config Config, onFinished func(ctx context.Context, lastReqID uint32) error) (*Bootstrapper, error) { - metrics, err := newMetrics("bs", config.Ctx.Registerer) + metrics, err := newMetrics(config.Ctx.Registerer) return &Bootstrapper{ Config: config, metrics: metrics, diff --git a/snow/engine/snowman/bootstrap/metrics.go b/snow/engine/snowman/bootstrap/metrics.go index 311ed05f136d..7b28b8b969b7 100644 --- a/snow/engine/snowman/bootstrap/metrics.go +++ b/snow/engine/snowman/bootstrap/metrics.go @@ -13,17 +13,15 @@ type metrics struct { numFetched, numAccepted prometheus.Counter } -func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, error) { +func newMetrics(registerer prometheus.Registerer) (*metrics, error) { m := &metrics{ numFetched: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "fetched", - Help: "Number of blocks fetched during bootstrapping", + Name: "bs_fetched", + Help: "Number of blocks fetched during bootstrapping", }), numAccepted: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "accepted", - Help: "Number of blocks accepted during bootstrapping", + Name: "bs_accepted", + Help: "Number of blocks accepted during bootstrapping", }), } diff --git a/snow/engine/snowman/getter/getter.go b/snow/engine/snowman/getter/getter.go index b58d7eb87428..aed51298cd22 100644 --- a/snow/engine/snowman/getter/getter.go +++ b/snow/engine/snowman/getter/getter.go @@ -43,8 +43,8 @@ func New( var err error gh.getAncestorsBlks, err = metric.NewAverager( - "bs", - "get_ancestors_blks", + "", + "bs_get_ancestors_blks", "blocks fetched in a call to GetAncestors", reg, ) diff --git a/snow/engine/snowman/metrics.go b/snow/engine/snowman/metrics.go index 193b067a14cd..bd46eb002fcf 100644 --- a/snow/engine/snowman/metrics.go +++ b/snow/engine/snowman/metrics.go @@ -38,104 +38,89 @@ type metrics struct { issued *prometheus.CounterVec } -func newMetrics(namespace string, reg prometheus.Registerer) (*metrics, error) { +func newMetrics(reg prometheus.Registerer) (*metrics, error) { errs := wrappers.Errs{} m := &metrics{ bootstrapFinished: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "bootstrap_finished", - Help: "Whether or not bootstrap process has completed. 1 is success, 0 is fail or ongoing.", + Name: "bootstrap_finished", + Help: "Whether or not bootstrap process has completed. 1 is success, 0 is fail or ongoing.", }), numRequests: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "requests", - Help: "Number of outstanding block requests", + Name: "requests", + Help: "Number of outstanding block requests", }), numBlocked: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blocked", - Help: "Number of blocks that are pending issuance", + Name: "blocked", + Help: "Number of blocks that are pending issuance", }), numBlockers: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blockers", - Help: "Number of blocks that are blocking other blocks from being issued because they haven't been issued", + Name: "blockers", + Help: "Number of blocks that are blocking other blocks from being issued because they haven't been issued", }), numNonVerifieds: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "non_verified_blks", - Help: "Number of non-verified blocks in the memory", + Name: "non_verified_blks", + Help: "Number of non-verified blocks in the memory", }), numBuilt: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "blks_built", - Help: "Number of blocks that have been built locally", + Name: "blks_built", + Help: "Number of blocks that have been built locally", }), numBuildsFailed: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "blk_builds_failed", - Help: "Number of BuildBlock calls that have failed", + Name: "blk_builds_failed", + Help: "Number of BuildBlock calls that have failed", }), numUselessPutBytes: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_useless_put_bytes", - Help: "Amount of useless bytes received in Put messages", + Name: "num_useless_put_bytes", + Help: "Amount of useless bytes received in Put messages", }), numUselessPushQueryBytes: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_useless_push_query_bytes", - Help: "Amount of useless bytes received in PushQuery messages", + Name: "num_useless_push_query_bytes", + Help: "Amount of useless bytes received in PushQuery messages", }), numMissingAcceptedBlocks: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_missing_accepted_blocks", - Help: "Number of times an accepted block height was referenced and it wasn't locally available", + Name: "num_missing_accepted_blocks", + Help: "Number of times an accepted block height was referenced and it wasn't locally available", }), numProcessingAncestorFetchesFailed: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_processing_ancestor_fetches_failed", - Help: "Number of votes that were dropped due to unknown blocks", + Name: "num_processing_ancestor_fetches_failed", + Help: "Number of votes that were dropped due to unknown blocks", }), numProcessingAncestorFetchesDropped: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_processing_ancestor_fetches_dropped", - Help: "Number of votes that were dropped due to decided blocks", + Name: "num_processing_ancestor_fetches_dropped", + Help: "Number of votes that were dropped due to decided blocks", }), numProcessingAncestorFetchesSucceeded: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_processing_ancestor_fetches_succeeded", - Help: "Number of votes that were applied to ancestor blocks", + Name: "num_processing_ancestor_fetches_succeeded", + Help: "Number of votes that were applied to ancestor blocks", }), numProcessingAncestorFetchesUnneeded: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_processing_ancestor_fetches_unneeded", - Help: "Number of votes that were directly applied to blocks", + Name: "num_processing_ancestor_fetches_unneeded", + Help: "Number of votes that were directly applied to blocks", }), getAncestorsBlks: metric.NewAveragerWithErrs( - namespace, + "", "get_ancestors_blks", "blocks fetched in a call to GetAncestors", reg, &errs, ), selectedVoteIndex: metric.NewAveragerWithErrs( - namespace, + "", "selected_vote_index", "index of the voteID that was passed into consensus", reg, &errs, ), issuerStake: metric.NewAveragerWithErrs( - namespace, + "", "issuer_stake", "stake weight of the peer who provided a block that was issued into consensus", reg, &errs, ), issued: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "blks_issued", - Help: "number of blocks that have been issued into consensus by discovery mechanism", + Name: "blks_issued", + Help: "number of blocks that have been issued into consensus by discovery mechanism", }, []string{"source"}), } diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index 32bf4ac5d5a4..9e89fedd22b2 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -118,14 +118,13 @@ func New(config Config) (*Transitive, error) { polls, err := poll.NewSet( factory, config.Ctx.Log, - "", config.Ctx.Registerer, ) if err != nil { return nil, err } - metrics, err := newMetrics("", config.Ctx.Registerer) + metrics, err := newMetrics(config.Ctx.Registerer) if err != nil { return nil, err } diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 75040d9a1f53..2961b018c8ce 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -2866,7 +2866,7 @@ func TestGetProcessingAncestor(t *testing.T) { unissuedBlock = snowmantest.BuildChild(issuedBlock) ) - metrics, err := newMetrics("", prometheus.NewRegistry()) + metrics, err := newMetrics(prometheus.NewRegistry()) require.NoError(t, err) c := &snowman.Topological{} diff --git a/snow/networking/sender/sender.go b/snow/networking/sender/sender.go index c13b9e22682a..37076972fe12 100644 --- a/snow/networking/sender/sender.go +++ b/snow/networking/sender/sender.go @@ -67,9 +67,8 @@ func New( timeouts: timeouts, failedDueToBench: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: "", - Name: "failed_benched", - Help: "requests dropped because a node was benched", + Name: "failed_benched", + Help: "requests dropped because a node was benched", }, opLabels, ), diff --git a/utils/metric/api_interceptor.go b/utils/metric/api_interceptor.go index 7d970b22b833..50027fde1478 100644 --- a/utils/metric/api_interceptor.go +++ b/utils/metric/api_interceptor.go @@ -29,27 +29,24 @@ type apiInterceptor struct { requestErrors *prometheus.CounterVec } -func NewAPIInterceptor(namespace string, registerer prometheus.Registerer) (APIInterceptor, error) { +func NewAPIInterceptor(registerer prometheus.Registerer) (APIInterceptor, error) { requestDurationCount := prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "request_duration_count", - Help: "Number of times this type of request was made", + Name: "request_duration_count", + Help: "Number of times this type of request was made", }, []string{"method"}, ) requestDurationSum := prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "request_duration_sum", - Help: "Amount of time in nanoseconds that has been spent handling this type of request", + Name: "request_duration_sum", + Help: "Amount of time in nanoseconds that has been spent handling this type of request", }, []string{"method"}, ) requestErrors := prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "request_error_count", + Name: "request_error_count", }, []string{"method"}, ) diff --git a/vms/avm/block/builder/builder_test.go b/vms/avm/block/builder/builder_test.go index 89f043844b54..36159598b706 100644 --- a/vms/avm/block/builder/builder_test.go +++ b/vms/avm/block/builder/builder_test.go @@ -544,7 +544,7 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { state.AddBlock(parentBlk) state.SetLastAccepted(parentBlk.ID()) - metrics, err := metrics.New("", registerer) + metrics, err := metrics.New(registerer) require.NoError(err) manager := blkexecutor.NewManager(mempool, metrics, state, backend, clk, onAccept) diff --git a/vms/avm/metrics/metrics.go b/vms/avm/metrics/metrics.go index 9e4053e1fcc6..7d122ce6e42e 100644 --- a/vms/avm/metrics/metrics.go +++ b/vms/avm/metrics/metrics.go @@ -66,32 +66,26 @@ func (m *metrics) MarkTxAccepted(tx *txs.Tx) error { return tx.Unsigned.Visit(m.txMetrics) } -func New( - namespace string, - registerer prometheus.Registerer, -) (Metrics, error) { - txMetrics, err := newTxMetrics(namespace, registerer) +func New(registerer prometheus.Registerer) (Metrics, error) { + txMetrics, err := newTxMetrics(registerer) errs := wrappers.Errs{Err: err} m := &metrics{txMetrics: txMetrics} m.numTxRefreshes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "tx_refreshes", - Help: "Number of times unique txs have been refreshed", + Name: "tx_refreshes", + Help: "Number of times unique txs have been refreshed", }) m.numTxRefreshHits = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "tx_refresh_hits", - Help: "Number of times unique txs have not been unique, but were cached", + Name: "tx_refresh_hits", + Help: "Number of times unique txs have not been unique, but were cached", }) m.numTxRefreshMisses = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "tx_refresh_misses", - Help: "Number of times unique txs have not been unique and weren't cached", + Name: "tx_refresh_misses", + Help: "Number of times unique txs have not been unique and weren't cached", }) - apiRequestMetric, err := metric.NewAPIInterceptor(namespace, registerer) + apiRequestMetric, err := metric.NewAPIInterceptor(registerer) m.APIInterceptor = apiRequestMetric errs.Add( err, diff --git a/vms/avm/metrics/tx_metrics.go b/vms/avm/metrics/tx_metrics.go index 8b9bf2c0ed47..3c8d1bac79ad 100644 --- a/vms/avm/metrics/tx_metrics.go +++ b/vms/avm/metrics/tx_metrics.go @@ -21,16 +21,12 @@ type txMetrics struct { numTxs *prometheus.CounterVec } -func newTxMetrics( - namespace string, - registerer prometheus.Registerer, -) (*txMetrics, error) { +func newTxMetrics(registerer prometheus.Registerer) (*txMetrics, error) { m := &txMetrics{ numTxs: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "txs_accepted", - Help: "number of transactions accepted", + Name: "txs_accepted", + Help: "number of transactions accepted", }, txLabels, ), diff --git a/vms/avm/vm.go b/vms/avm/vm.go index b8fe322ef959..ab05b053b393 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -182,7 +182,7 @@ func (vm *VM) Initialize( vm.connectedPeers = make(map[ids.NodeID]*version.Application) // Initialize metrics as soon as possible - vm.metrics, err = metrics.New("", registerer) + vm.metrics, err = metrics.New(registerer) if err != nil { return fmt.Errorf("failed to initialize metrics: %w", err) } diff --git a/vms/metervm/block_metrics.go b/vms/metervm/block_metrics.go index 0a6473f617bd..2bdc247c4062 100644 --- a/vms/metervm/block_metrics.go +++ b/vms/metervm/block_metrics.go @@ -49,43 +49,42 @@ func (m *blockMetrics) Initialize( supportsBlockBuildingWithContext bool, supportsBatchedFetching bool, supportsStateSync bool, - namespace string, reg prometheus.Registerer, ) error { errs := wrappers.Errs{} - m.buildBlock = newAverager(namespace, "build_block", reg, &errs) - m.buildBlockErr = newAverager(namespace, "build_block_err", reg, &errs) - m.parseBlock = newAverager(namespace, "parse_block", reg, &errs) - m.parseBlockErr = newAverager(namespace, "parse_block_err", reg, &errs) - m.getBlock = newAverager(namespace, "get_block", reg, &errs) - m.getBlockErr = newAverager(namespace, "get_block_err", reg, &errs) - m.setPreference = newAverager(namespace, "set_preference", reg, &errs) - m.lastAccepted = newAverager(namespace, "last_accepted", reg, &errs) - m.verify = newAverager(namespace, "verify", reg, &errs) - m.verifyErr = newAverager(namespace, "verify_err", reg, &errs) - m.accept = newAverager(namespace, "accept", reg, &errs) - m.reject = newAverager(namespace, "reject", reg, &errs) - m.shouldVerifyWithContext = newAverager(namespace, "should_verify_with_context", reg, &errs) - m.verifyWithContext = newAverager(namespace, "verify_with_context", reg, &errs) - m.verifyWithContextErr = newAverager(namespace, "verify_with_context_err", reg, &errs) - m.getBlockIDAtHeight = newAverager(namespace, "get_block_id_at_height", reg, &errs) + m.buildBlock = newAverager("build_block", reg, &errs) + m.buildBlockErr = newAverager("build_block_err", reg, &errs) + m.parseBlock = newAverager("parse_block", reg, &errs) + m.parseBlockErr = newAverager("parse_block_err", reg, &errs) + m.getBlock = newAverager("get_block", reg, &errs) + m.getBlockErr = newAverager("get_block_err", reg, &errs) + m.setPreference = newAverager("set_preference", reg, &errs) + m.lastAccepted = newAverager("last_accepted", reg, &errs) + m.verify = newAverager("verify", reg, &errs) + m.verifyErr = newAverager("verify_err", reg, &errs) + m.accept = newAverager("accept", reg, &errs) + m.reject = newAverager("reject", reg, &errs) + m.shouldVerifyWithContext = newAverager("should_verify_with_context", reg, &errs) + m.verifyWithContext = newAverager("verify_with_context", reg, &errs) + m.verifyWithContextErr = newAverager("verify_with_context_err", reg, &errs) + m.getBlockIDAtHeight = newAverager("get_block_id_at_height", reg, &errs) if supportsBlockBuildingWithContext { - m.buildBlockWithContext = newAverager(namespace, "build_block_with_context", reg, &errs) - m.buildBlockWithContextErr = newAverager(namespace, "build_block_with_context_err", reg, &errs) + m.buildBlockWithContext = newAverager("build_block_with_context", reg, &errs) + m.buildBlockWithContextErr = newAverager("build_block_with_context_err", reg, &errs) } if supportsBatchedFetching { - m.getAncestors = newAverager(namespace, "get_ancestors", reg, &errs) - m.batchedParseBlock = newAverager(namespace, "batched_parse_block", reg, &errs) + m.getAncestors = newAverager("get_ancestors", reg, &errs) + m.batchedParseBlock = newAverager("batched_parse_block", reg, &errs) } if supportsStateSync { - m.stateSyncEnabled = newAverager(namespace, "state_sync_enabled", reg, &errs) - m.getOngoingSyncStateSummary = newAverager(namespace, "get_ongoing_state_sync_summary", reg, &errs) - m.getLastStateSummary = newAverager(namespace, "get_last_state_summary", reg, &errs) - m.parseStateSummary = newAverager(namespace, "parse_state_summary", reg, &errs) - m.parseStateSummaryErr = newAverager(namespace, "parse_state_summary_err", reg, &errs) - m.getStateSummary = newAverager(namespace, "get_state_summary", reg, &errs) - m.getStateSummaryErr = newAverager(namespace, "get_state_summary_err", reg, &errs) + m.stateSyncEnabled = newAverager("state_sync_enabled", reg, &errs) + m.getOngoingSyncStateSummary = newAverager("get_ongoing_state_sync_summary", reg, &errs) + m.getLastStateSummary = newAverager("get_last_state_summary", reg, &errs) + m.parseStateSummary = newAverager("parse_state_summary", reg, &errs) + m.parseStateSummaryErr = newAverager("parse_state_summary_err", reg, &errs) + m.getStateSummary = newAverager("get_state_summary", reg, &errs) + m.getStateSummaryErr = newAverager("get_state_summary_err", reg, &errs) } return errs.Err } diff --git a/vms/metervm/block_vm.go b/vms/metervm/block_vm.go index 0ecb982c4742..6d951f344b2b 100644 --- a/vms/metervm/block_vm.go +++ b/vms/metervm/block_vm.go @@ -63,7 +63,6 @@ func (vm *blockVM) Initialize( vm.buildBlockVM != nil, vm.batchedVM != nil, vm.ssVM != nil, - "", registerer, ) if err != nil { diff --git a/vms/metervm/metrics.go b/vms/metervm/metrics.go index d4c9304e7696..09d85a770585 100644 --- a/vms/metervm/metrics.go +++ b/vms/metervm/metrics.go @@ -10,9 +10,9 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -func newAverager(namespace, name string, reg prometheus.Registerer, errs *wrappers.Errs) metric.Averager { +func newAverager(name string, reg prometheus.Registerer, errs *wrappers.Errs) metric.Averager { return metric.NewAveragerWithErrs( - namespace, + "", name, "time (in ns) of a "+name, reg, diff --git a/vms/metervm/vertex_metrics.go b/vms/metervm/vertex_metrics.go index 67caa50b610e..04096f2ae035 100644 --- a/vms/metervm/vertex_metrics.go +++ b/vms/metervm/vertex_metrics.go @@ -19,16 +19,13 @@ type vertexMetrics struct { reject metric.Averager } -func (m *vertexMetrics) Initialize( - namespace string, - reg prometheus.Registerer, -) error { +func (m *vertexMetrics) Initialize(reg prometheus.Registerer) error { errs := wrappers.Errs{} - m.parse = newAverager(namespace, "parse_tx", reg, &errs) - m.parseErr = newAverager(namespace, "parse_tx_err", reg, &errs) - m.verify = newAverager(namespace, "verify_tx", reg, &errs) - m.verifyErr = newAverager(namespace, "verify_tx_err", reg, &errs) - m.accept = newAverager(namespace, "accept", reg, &errs) - m.reject = newAverager(namespace, "reject", reg, &errs) + m.parse = newAverager("parse_tx", reg, &errs) + m.parseErr = newAverager("parse_tx_err", reg, &errs) + m.verify = newAverager("verify_tx", reg, &errs) + m.verifyErr = newAverager("verify_tx_err", reg, &errs) + m.accept = newAverager("accept", reg, &errs) + m.reject = newAverager("reject", reg, &errs) return errs.Err } diff --git a/vms/metervm/vertex_vm.go b/vms/metervm/vertex_vm.go index 7cd112ffde24..7cbd47a67475 100644 --- a/vms/metervm/vertex_vm.go +++ b/vms/metervm/vertex_vm.go @@ -46,7 +46,7 @@ func (vm *vertexVM) Initialize( appSender common.AppSender, ) error { registerer := prometheus.NewRegistry() - if err := vm.vertexMetrics.Initialize("", registerer); err != nil { + if err := vm.vertexMetrics.Initialize(registerer); err != nil { return err } diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index 0108162649d6..909d7adefe5e 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -177,7 +177,7 @@ func newEnvironment(t *testing.T, f fork) *environment { //nolint:unparam return nil } - metrics, err := metrics.New("", registerer) + metrics, err := metrics.New(registerer) require.NoError(err) res.mempool, err = mempool.New("mempool", registerer, nil) diff --git a/vms/platformvm/metrics/block_metrics.go b/vms/platformvm/metrics/block_metrics.go index cc1acd7eb862..bf05de8e8dd8 100644 --- a/vms/platformvm/metrics/block_metrics.go +++ b/vms/platformvm/metrics/block_metrics.go @@ -22,11 +22,8 @@ type blockMetrics struct { numBlocks *prometheus.CounterVec } -func newBlockMetrics( - namespace string, - registerer prometheus.Registerer, -) (*blockMetrics, error) { - txMetrics, err := newTxMetrics(namespace, registerer) +func newBlockMetrics(registerer prometheus.Registerer) (*blockMetrics, error) { + txMetrics, err := newTxMetrics(registerer) if err != nil { return nil, err } @@ -35,9 +32,8 @@ func newBlockMetrics( txMetrics: txMetrics, numBlocks: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "blks_accepted", - Help: "number of blocks accepted", + Name: "blks_accepted", + Help: "number of blocks accepted", }, blkLabels, ), diff --git a/vms/platformvm/metrics/metrics.go b/vms/platformvm/metrics/metrics.go index 98b611a017ed..82b51dc8c34c 100644 --- a/vms/platformvm/metrics/metrics.go +++ b/vms/platformvm/metrics/metrics.go @@ -40,61 +40,50 @@ type Metrics interface { SetTimeUntilSubnetUnstake(subnetID ids.ID, timeUntilUnstake time.Duration) } -func New( - namespace string, - registerer prometheus.Registerer, -) (Metrics, error) { - blockMetrics, err := newBlockMetrics(namespace, registerer) +func New(registerer prometheus.Registerer) (Metrics, error) { + blockMetrics, err := newBlockMetrics(registerer) m := &metrics{ blockMetrics: blockMetrics, timeUntilUnstake: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "time_until_unstake", - Help: "Time (in ns) until this node leaves the Primary Network's validator set", + Name: "time_until_unstake", + Help: "Time (in ns) until this node leaves the Primary Network's validator set", }), timeUntilSubnetUnstake: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "time_until_unstake_subnet", - Help: "Time (in ns) until this node leaves the subnet's validator set", + Name: "time_until_unstake_subnet", + Help: "Time (in ns) until this node leaves the subnet's validator set", }, []string{"subnetID"}, ), localStake: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "local_staked", - Help: "Amount (in nAVAX) of AVAX staked on this node", + Name: "local_staked", + Help: "Amount (in nAVAX) of AVAX staked on this node", }), totalStake: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "total_staked", - Help: "Amount (in nAVAX) of AVAX staked on the Primary Network", + Name: "total_staked", + Help: "Amount (in nAVAX) of AVAX staked on the Primary Network", }), validatorSetsCached: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "validator_sets_cached", - Help: "Total number of validator sets cached", + Name: "validator_sets_cached", + Help: "Total number of validator sets cached", }), validatorSetsCreated: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "validator_sets_created", - Help: "Total number of validator sets created from applying difflayers", + Name: "validator_sets_created", + Help: "Total number of validator sets created from applying difflayers", }), validatorSetsHeightDiff: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "validator_sets_height_diff_sum", - Help: "Total number of validator sets diffs applied for generating validator sets", + Name: "validator_sets_height_diff_sum", + Help: "Total number of validator sets diffs applied for generating validator sets", }), validatorSetsDuration: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "validator_sets_duration_sum", - Help: "Total amount of time generating validator sets in nanoseconds", + Name: "validator_sets_duration_sum", + Help: "Total amount of time generating validator sets in nanoseconds", }), } errs := wrappers.Errs{Err: err} - apiRequestMetrics, err := metric.NewAPIInterceptor(namespace, registerer) + apiRequestMetrics, err := metric.NewAPIInterceptor(registerer) errs.Add(err) m.APIInterceptor = apiRequestMetrics errs.Add( diff --git a/vms/platformvm/metrics/tx_metrics.go b/vms/platformvm/metrics/tx_metrics.go index 5526a6a0be57..02f45f011624 100644 --- a/vms/platformvm/metrics/tx_metrics.go +++ b/vms/platformvm/metrics/tx_metrics.go @@ -21,16 +21,12 @@ type txMetrics struct { numTxs *prometheus.CounterVec } -func newTxMetrics( - namespace string, - registerer prometheus.Registerer, -) (*txMetrics, error) { +func newTxMetrics(registerer prometheus.Registerer) (*txMetrics, error) { m := &txMetrics{ numTxs: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "txs_accepted", - Help: "number of transactions accepted", + Name: "txs_accepted", + Help: "number of transactions accepted", }, txLabels, ), diff --git a/vms/platformvm/validators/manager_benchmark_test.go b/vms/platformvm/validators/manager_benchmark_test.go index 912f3619e3e0..3a756f73c6af 100644 --- a/vms/platformvm/validators/manager_benchmark_test.go +++ b/vms/platformvm/validators/manager_benchmark_test.go @@ -104,7 +104,7 @@ func BenchmarkGetValidatorSet(b *testing.B) { execConfig, err := config.GetExecutionConfig(nil) require.NoError(err) - metrics, err := metrics.New("", prometheus.NewRegistry()) + metrics, err := metrics.New(prometheus.NewRegistry()) require.NoError(err) s, err := state.New( diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index b6417bbd0c45..565960cff599 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -119,7 +119,7 @@ func (vm *VM) Initialize( } // Initialize metrics as soon as possible - vm.metrics, err = metrics.New("", registerer) + vm.metrics, err = metrics.New(registerer) if err != nil { return fmt.Errorf("failed to initialize metrics: %w", err) } From 1d7ba7a6b0993e763f335d85ec2da7eb2646c6bf Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 31 May 2024 11:10:52 -0400 Subject: [PATCH 034/102] Only compact after executing a large number of blocks (#3065) --- snow/engine/snowman/bootstrap/storage.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/snow/engine/snowman/bootstrap/storage.go b/snow/engine/snowman/bootstrap/storage.go index 53c2e735a572..7dafc3a40225 100644 --- a/snow/engine/snowman/bootstrap/storage.go +++ b/snow/engine/snowman/bootstrap/storage.go @@ -135,7 +135,7 @@ func execute( lastAcceptedHeight uint64, ) error { totalNumberToProcess := tree.Len() - if totalNumberToProcess > minBlocksToCompact { + if totalNumberToProcess >= minBlocksToCompact { log("compacting database before executing blocks...") if err := db.Compact(nil, nil); err != nil { // Not a fatal error, log and move on. @@ -170,8 +170,11 @@ func execute( defer func() { iterator.Release() - halted := haltable.Halted() - if !halted { + var ( + numProcessed = totalNumberToProcess - tree.Len() + halted = haltable.Halted() + ) + if numProcessed >= minBlocksToCompact && !halted { log("compacting database after executing blocks...") if err := db.Compact(nil, nil); err != nil { // Not a fatal error, log and move on. @@ -181,7 +184,6 @@ func execute( } } - numProcessed := totalNumberToProcess - tree.Len() log("executed blocks", zap.Uint64("numExecuted", numProcessed), zap.Uint64("numToExecute", totalNumberToProcess), From cb2a1772b7f3d68a8696037b24ae4acb1c80c97f Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 31 May 2024 11:25:50 -0400 Subject: [PATCH 035/102] Remove network namespace (#3067) --- message/creator.go | 4 - message/inbound_msg_builder_test.go | 2 - message/messages.go | 11 +-- message/messages_benchmark_test.go | 4 +- message/messages_test.go | 4 - message/outbound_msg_builder_test.go | 1 - network/config.go | 1 - network/ip_tracker.go | 15 +-- network/ip_tracker_test.go | 2 +- network/metrics.go | 91 ++++++++----------- network/network.go | 8 +- network/network_test.go | 2 - network/peer/metrics.go | 40 +++----- network/peer/peer_test.go | 6 +- network/peer/test_peer.go | 6 +- network/test_network.go | 1 - network/throttling/bandwidth_throttler.go | 8 +- .../throttling/bandwidth_throttler_test.go | 2 +- .../inbound_msg_buffer_throttler.go | 12 +-- .../inbound_msg_buffer_throttler_test.go | 4 +- .../throttling/inbound_msg_byte_throttler.go | 27 +++--- .../inbound_msg_byte_throttler_test.go | 5 - network/throttling/inbound_msg_throttler.go | 9 +- network/throttling/outbound_msg_throttler.go | 30 +++--- .../throttling/outbound_msg_throttler_test.go | 3 - node/node.go | 26 ++++-- snow/networking/sender/sender_test.go | 3 - vms/platformvm/vm_test.go | 2 +- 28 files changed, 121 insertions(+), 208 deletions(-) diff --git a/message/creator.go b/message/creator.go index 8040bccb1861..86c9af1f1076 100644 --- a/message/creator.go +++ b/message/creator.go @@ -10,7 +10,6 @@ import ( "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/metric" ) var _ Creator = (*creator)(nil) @@ -28,14 +27,11 @@ type creator struct { func NewCreator( log logging.Logger, metrics prometheus.Registerer, - parentNamespace string, compressionType compression.Type, maxMessageTimeout time.Duration, ) (Creator, error) { - namespace := metric.AppendNamespace(parentNamespace, "codec") builder, err := newMsgBuilder( log, - namespace, metrics, maxMessageTimeout, ) diff --git a/message/inbound_msg_builder_test.go b/message/inbound_msg_builder_test.go index 09269d1e2ad0..92d18b6836be 100644 --- a/message/inbound_msg_builder_test.go +++ b/message/inbound_msg_builder_test.go @@ -23,7 +23,6 @@ func Test_newMsgBuilder(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "test", prometheus.NewRegistry(), 10*time.Second, ) @@ -393,7 +392,6 @@ func TestAppError(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "", prometheus.NewRegistry(), time.Second, ) diff --git a/message/messages.go b/message/messages.go index 0362ba070e43..06ef3125d69a 100644 --- a/message/messages.go +++ b/message/messages.go @@ -149,7 +149,6 @@ type msgBuilder struct { func newMsgBuilder( log logging.Logger, - namespace string, metrics prometheus.Registerer, maxMessageTimeout time.Duration, ) (*msgBuilder, error) { @@ -164,17 +163,15 @@ func newMsgBuilder( zstdCompressor: zstdCompressor, count: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "compressed_count", - Help: "number of compressed messages", + Name: "codec_compressed_count", + Help: "number of compressed messages", }, metricLabels, ), duration: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "compressed_duration", - Help: "time spent handling compressed messages", + Name: "codec_compressed_duration", + Help: "time spent handling compressed messages", }, metricLabels, ), diff --git a/message/messages_benchmark_test.go b/message/messages_benchmark_test.go index 6abb80b9235d..9a96f1f41911 100644 --- a/message/messages_benchmark_test.go +++ b/message/messages_benchmark_test.go @@ -62,7 +62,7 @@ func BenchmarkMarshalHandshake(b *testing.B) { useBuilder := os.Getenv("USE_BUILDER") != "" - codec, err := newMsgBuilder(logging.NoLog{}, "", prometheus.NewRegistry(), 10*time.Second) + codec, err := newMsgBuilder(logging.NoLog{}, prometheus.NewRegistry(), 10*time.Second) require.NoError(err) b.Logf("proto length %d-byte (use builder %v)", msgLen, useBuilder) @@ -119,7 +119,7 @@ func BenchmarkUnmarshalHandshake(b *testing.B) { require.NoError(err) useBuilder := os.Getenv("USE_BUILDER") != "" - codec, err := newMsgBuilder(logging.NoLog{}, "", prometheus.NewRegistry(), 10*time.Second) + codec, err := newMsgBuilder(logging.NoLog{}, prometheus.NewRegistry(), 10*time.Second) require.NoError(err) b.StartTimer() diff --git a/message/messages_test.go b/message/messages_test.go index bbe8f2377acf..583f26533d13 100644 --- a/message/messages_test.go +++ b/message/messages_test.go @@ -25,7 +25,6 @@ func TestMessage(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "test", prometheus.NewRegistry(), 5*time.Second, ) @@ -668,7 +667,6 @@ func TestInboundMessageToString(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "test", prometheus.NewRegistry(), 5*time.Second, ) @@ -699,7 +697,6 @@ func TestEmptyInboundMessage(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "test", prometheus.NewRegistry(), 5*time.Second, ) @@ -720,7 +717,6 @@ func TestNilInboundMessage(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "test", prometheus.NewRegistry(), 5*time.Second, ) diff --git a/message/outbound_msg_builder_test.go b/message/outbound_msg_builder_test.go index 02e46ef166a5..1f7187cdd437 100644 --- a/message/outbound_msg_builder_test.go +++ b/message/outbound_msg_builder_test.go @@ -20,7 +20,6 @@ func Test_newOutboundBuilder(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "test", prometheus.NewRegistry(), 10*time.Second, ) diff --git a/network/config.go b/network/config.go index ed82ea507e8e..3004a12bdc5b 100644 --- a/network/config.go +++ b/network/config.go @@ -110,7 +110,6 @@ type Config struct { TLSKeyLogFile string `json:"tlsKeyLogFile"` - Namespace string `json:"namespace"` MyNodeID ids.NodeID `json:"myNodeID"` MyIPPort ips.DynamicIPPort `json:"myIP"` NetworkID uint32 `json:"networkID"` diff --git a/network/ip_tracker.go b/network/ip_tracker.go index 03040b15337e..370c7d47da92 100644 --- a/network/ip_tracker.go +++ b/network/ip_tracker.go @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/set" ) @@ -42,25 +41,21 @@ var _ validators.SetCallbackListener = (*ipTracker)(nil) func newIPTracker( log logging.Logger, - namespace string, registerer prometheus.Registerer, ) (*ipTracker, error) { - bloomNamespace := metric.AppendNamespace(namespace, "ip_bloom") - bloomMetrics, err := bloom.NewMetrics(bloomNamespace, registerer) + bloomMetrics, err := bloom.NewMetrics("ip_bloom", registerer) if err != nil { return nil, err } tracker := &ipTracker{ log: log, numTrackedIPs: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "tracked_ips", - Help: "Number of IPs this node is willing to dial", + Name: "tracked_ips", + Help: "Number of IPs this node is willing to dial", }), numGossipableIPs: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "gossipable_ips", - Help: "Number of IPs this node is willing to gossip", + Name: "gossipable_ips", + Help: "Number of IPs this node is willing to gossip", }), bloomMetrics: bloomMetrics, mostRecentTrackedIPs: make(map[ids.NodeID]*ips.ClaimedIPPort), diff --git a/network/ip_tracker_test.go b/network/ip_tracker_test.go index be9ea59a51ee..edae70de5b98 100644 --- a/network/ip_tracker_test.go +++ b/network/ip_tracker_test.go @@ -17,7 +17,7 @@ import ( ) func newTestIPTracker(t *testing.T) *ipTracker { - tracker, err := newIPTracker(logging.NoLog{}, "", prometheus.NewRegistry()) + tracker, err := newIPTracker(logging.NoLog{}, prometheus.NewRegistry()) require.NoError(t, err) return tracker } diff --git a/network/metrics.go b/network/metrics.go index c6b47a1360ab..8cc5155ec102 100644 --- a/network/metrics.go +++ b/network/metrics.go @@ -44,111 +44,92 @@ type metrics struct { } func newMetrics( - namespace string, registerer prometheus.Registerer, trackedSubnets set.Set[ids.ID], ) (*metrics, error) { m := &metrics{ trackedSubnets: trackedSubnets, numPeers: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "peers", - Help: "Number of network peers", + Name: "peers", + Help: "Number of network peers", }), numTracked: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "tracked", - Help: "Number of currently tracked IPs attempting to be connected to", + Name: "tracked", + Help: "Number of currently tracked IPs attempting to be connected to", }), numSubnetPeers: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "peers_subnet", - Help: "Number of peers that are validating a particular subnet", + Name: "peers_subnet", + Help: "Number of peers that are validating a particular subnet", }, []string{"subnetID"}, ), timeSinceLastMsgReceived: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "time_since_last_msg_received", - Help: "Time (in ns) since the last msg was received", + Name: "time_since_last_msg_received", + Help: "Time (in ns) since the last msg was received", }), timeSinceLastMsgSent: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "time_since_last_msg_sent", - Help: "Time (in ns) since the last msg was sent", + Name: "time_since_last_msg_sent", + Help: "Time (in ns) since the last msg was sent", }), sendFailRate: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "send_fail_rate", - Help: "Portion of messages that recently failed to be sent over the network", + Name: "send_fail_rate", + Help: "Portion of messages that recently failed to be sent over the network", }), connected: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "times_connected", - Help: "Times this node successfully completed a handshake with a peer", + Name: "times_connected", + Help: "Times this node successfully completed a handshake with a peer", }), disconnected: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "times_disconnected", - Help: "Times this node disconnected from a peer it had completed a handshake with", + Name: "times_disconnected", + Help: "Times this node disconnected from a peer it had completed a handshake with", }), acceptFailed: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "accept_failed", - Help: "Times this node's listener failed to accept an inbound connection", + Name: "accept_failed", + Help: "Times this node's listener failed to accept an inbound connection", }), inboundConnAllowed: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "inbound_conn_throttler_allowed", - Help: "Times this node allowed (attempted to upgrade) an inbound connection", + Name: "inbound_conn_throttler_allowed", + Help: "Times this node allowed (attempted to upgrade) an inbound connection", }), tlsConnRejected: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "tls_conn_rejected", - Help: "Times this node rejected a connection due to an unsupported TLS certificate", + Name: "tls_conn_rejected", + Help: "Times this node rejected a connection due to an unsupported TLS certificate", }), numUselessPeerListBytes: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_useless_peerlist_bytes", - Help: "Amount of useless bytes (i.e. information about nodes we already knew/don't want to connect to) received in PeerList messages", + Name: "num_useless_peerlist_bytes", + Help: "Amount of useless bytes (i.e. information about nodes we already knew/don't want to connect to) received in PeerList messages", }), inboundConnRateLimited: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "inbound_conn_throttler_rate_limited", - Help: "Times this node rejected an inbound connection due to rate-limiting", + Name: "inbound_conn_throttler_rate_limited", + Help: "Times this node rejected an inbound connection due to rate-limiting", }), nodeUptimeWeightedAverage: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "node_uptime_weighted_average", - Help: "This node's uptime average weighted by observing peer stakes", + Name: "node_uptime_weighted_average", + Help: "This node's uptime average weighted by observing peer stakes", }), nodeUptimeRewardingStake: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "node_uptime_rewarding_stake", - Help: "The percentage of total stake which thinks this node is eligible for rewards", + Name: "node_uptime_rewarding_stake", + Help: "The percentage of total stake which thinks this node is eligible for rewards", }), nodeSubnetUptimeWeightedAverage: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "node_subnet_uptime_weighted_average", - Help: "This node's subnet uptime averages weighted by observing subnet peer stakes", + Name: "node_subnet_uptime_weighted_average", + Help: "This node's subnet uptime averages weighted by observing subnet peer stakes", }, []string{"subnetID"}, ), nodeSubnetUptimeRewardingStake: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "node_subnet_uptime_rewarding_stake", - Help: "The percentage of subnet's total stake which thinks this node is eligible for subnet's rewards", + Name: "node_subnet_uptime_rewarding_stake", + Help: "The percentage of subnet's total stake which thinks this node is eligible for subnet's rewards", }, []string{"subnetID"}, ), peerConnectedLifetimeAverage: prometheus.NewGauge( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "peer_connected_duration_average", - Help: "The average duration of all peer connections in nanoseconds", + Name: "peer_connected_duration_average", + Help: "The average duration of all peer connections in nanoseconds", }, ), peerConnectedStartTimes: make(map[ids.NodeID]float64), diff --git a/network/network.go b/network/network.go index 9963612c0161..51ded9b8cf91 100644 --- a/network/network.go +++ b/network/network.go @@ -202,7 +202,6 @@ func NewNetwork( inboundMsgThrottler, err := throttling.NewInboundMsgThrottler( log, - config.Namespace, metricsRegisterer, config.Validators, config.ThrottlerConfig.InboundMsgThrottlerConfig, @@ -216,7 +215,6 @@ func NewNetwork( outboundMsgThrottler, err := throttling.NewSybilOutboundMsgThrottler( log, - config.Namespace, metricsRegisterer, config.Validators, config.ThrottlerConfig.OutboundMsgThrottlerConfig, @@ -225,17 +223,17 @@ func NewNetwork( return nil, fmt.Errorf("initializing outbound message throttler failed with: %w", err) } - peerMetrics, err := peer.NewMetrics(config.Namespace, metricsRegisterer) + peerMetrics, err := peer.NewMetrics(metricsRegisterer) if err != nil { return nil, fmt.Errorf("initializing peer metrics failed with: %w", err) } - metrics, err := newMetrics(config.Namespace, metricsRegisterer, config.TrackedSubnets) + metrics, err := newMetrics(metricsRegisterer, config.TrackedSubnets) if err != nil { return nil, fmt.Errorf("initializing network metrics failed with: %w", err) } - ipTracker, err := newIPTracker(log, config.Namespace, metricsRegisterer) + ipTracker, err := newIPTracker(log, metricsRegisterer) if err != nil { return nil, fmt.Errorf("initializing ip tracker failed with: %w", err) } diff --git a/network/network_test.go b/network/network_test.go index f8f7b56427ff..5ae2cef5af3e 100644 --- a/network/network_test.go +++ b/network/network_test.go @@ -104,7 +104,6 @@ var ( DialerConfig: defaultDialerConfig, - Namespace: "", NetworkID: 49463, MaxClockDifference: time.Minute, PingFrequency: constants.DefaultPingFrequency, @@ -196,7 +195,6 @@ func newMessageCreator(t *testing.T) message.Creator { mc, err := message.NewCreator( logging.NoLog{}, prometheus.NewRegistry(), - "", constants.DefaultNetworkCompressionType, 10*time.Second, ) diff --git a/network/peer/metrics.go b/network/peer/metrics.go index 94d46ac1e5fa..7547d7a827d4 100644 --- a/network/peer/metrics.go +++ b/network/peer/metrics.go @@ -39,55 +39,45 @@ type Metrics struct { BytesSaved *prometheus.GaugeVec // io + op } -func NewMetrics( - namespace string, - registerer prometheus.Registerer, -) (*Metrics, error) { +func NewMetrics(registerer prometheus.Registerer) (*Metrics, error) { m := &Metrics{ ClockSkewCount: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "clock_skew_count", - Help: "number of handshake timestamps inspected (n)", + Name: "clock_skew_count", + Help: "number of handshake timestamps inspected (n)", }), ClockSkewSum: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "clock_skew_sum", - Help: "sum of (peer timestamp - local timestamp) from handshake messages (s)", + Name: "clock_skew_sum", + Help: "sum of (peer timestamp - local timestamp) from handshake messages (s)", }), NumFailedToParse: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "msgs_failed_to_parse", - Help: "number of received messages that could not be parsed", + Name: "msgs_failed_to_parse", + Help: "number of received messages that could not be parsed", }), NumSendFailed: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "msgs_failed_to_send", - Help: "number of messages that failed to be sent", + Name: "msgs_failed_to_send", + Help: "number of messages that failed to be sent", }, opLabels, ), Messages: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "msgs", - Help: "number of handled messages", + Name: "msgs", + Help: "number of handled messages", }, ioOpCompressedLabels, ), Bytes: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "msgs_bytes", - Help: "number of message bytes", + Name: "msgs_bytes", + Help: "number of message bytes", }, ioOpLabels, ), BytesSaved: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "msgs_bytes_saved", - Help: "number of message bytes saved", + Name: "msgs_bytes_saved", + Help: "number of message bytes saved", }, ioOpLabels, ), diff --git a/network/peer/peer_test.go b/network/peer/peer_test.go index ffd5915aa2ce..4a0399bc3a1e 100644 --- a/network/peer/peer_test.go +++ b/network/peer/peer_test.go @@ -50,7 +50,6 @@ func newMessageCreator(t *testing.T) message.Creator { mc, err := message.NewCreator( logging.NoLog{}, prometheus.NewRegistry(), - "", constants.DefaultNetworkCompressionType, 10*time.Second, ) @@ -63,10 +62,7 @@ func newConfig(t *testing.T) Config { t.Helper() require := require.New(t) - metrics, err := NewMetrics( - "", - prometheus.NewRegistry(), - ) + metrics, err := NewMetrics(prometheus.NewRegistry()) require.NoError(err) resourceTracker, err := tracker.NewResourceTracker( diff --git a/network/peer/test_peer.go b/network/peer/test_peer.go index a8f633ccf659..a4df06b72ee0 100644 --- a/network/peer/test_peer.go +++ b/network/peer/test_peer.go @@ -76,7 +76,6 @@ func StartTestPeer( mc, err := message.NewCreator( logging.NoLog{}, prometheus.NewRegistry(), - "", constants.DefaultNetworkCompressionType, 10*time.Second, ) @@ -84,10 +83,7 @@ func StartTestPeer( return nil, err } - metrics, err := NewMetrics( - "", - prometheus.NewRegistry(), - ) + metrics, err := NewMetrics(prometheus.NewRegistry()) if err != nil { return nil, err } diff --git a/network/test_network.go b/network/test_network.go index 25039ad046ba..6a6bcdfcc08c 100644 --- a/network/test_network.go +++ b/network/test_network.go @@ -82,7 +82,6 @@ func NewTestNetwork( msgCreator, err := message.NewCreator( logging.NoLog{}, metrics, - "", constants.DefaultNetworkCompressionType, constants.DefaultNetworkMaximumInboundTimeout, ) diff --git a/network/throttling/bandwidth_throttler.go b/network/throttling/bandwidth_throttler.go index cde94b96124b..12ca3ac9a841 100644 --- a/network/throttling/bandwidth_throttler.go +++ b/network/throttling/bandwidth_throttler.go @@ -58,7 +58,6 @@ type BandwidthThrottlerConfig struct { func newBandwidthThrottler( log logging.Logger, - namespace string, registerer prometheus.Registerer, config BandwidthThrottlerConfig, ) (bandwidthThrottler, error) { @@ -69,16 +68,15 @@ func newBandwidthThrottler( limiters: make(map[ids.NodeID]*rate.Limiter), metrics: bandwidthThrottlerMetrics{ acquireLatency: metric.NewAveragerWithErrs( - namespace, + "", "bandwidth_throttler_inbound_acquire_latency", "average time (in ns) to acquire bytes from the inbound bandwidth throttler", registerer, &errs, ), awaitingAcquire: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "bandwidth_throttler_inbound_awaiting_acquire", - Help: "Number of inbound messages waiting to acquire bandwidth from the inbound bandwidth throttler", + Name: "bandwidth_throttler_inbound_awaiting_acquire", + Help: "Number of inbound messages waiting to acquire bandwidth from the inbound bandwidth throttler", }), }, } diff --git a/network/throttling/bandwidth_throttler_test.go b/network/throttling/bandwidth_throttler_test.go index 9f9195477b38..da9ac6ded28d 100644 --- a/network/throttling/bandwidth_throttler_test.go +++ b/network/throttling/bandwidth_throttler_test.go @@ -22,7 +22,7 @@ func TestBandwidthThrottler(t *testing.T) { RefillRate: 8, MaxBurstSize: 10, } - throttlerIntf, err := newBandwidthThrottler(logging.NoLog{}, "", prometheus.NewRegistry(), config) + throttlerIntf, err := newBandwidthThrottler(logging.NoLog{}, prometheus.NewRegistry(), config) require.NoError(err) require.IsType(&bandwidthThrottlerImpl{}, throttlerIntf) throttler := throttlerIntf.(*bandwidthThrottlerImpl) diff --git a/network/throttling/inbound_msg_buffer_throttler.go b/network/throttling/inbound_msg_buffer_throttler.go index 65306eea7d51..395b6da1688d 100644 --- a/network/throttling/inbound_msg_buffer_throttler.go +++ b/network/throttling/inbound_msg_buffer_throttler.go @@ -18,7 +18,6 @@ import ( // See inbound_msg_throttler.go func newInboundMsgBufferThrottler( - namespace string, registerer prometheus.Registerer, maxProcessingMsgsPerNode uint64, ) (*inboundMsgBufferThrottler, error) { @@ -27,7 +26,7 @@ func newInboundMsgBufferThrottler( awaitingAcquire: make(map[ids.NodeID]chan struct{}), nodeToNumProcessingMsgs: make(map[ids.NodeID]uint64), } - return t, t.metrics.initialize(namespace, registerer) + return t, t.metrics.initialize(registerer) } // Rate-limits inbound messages based on the number of @@ -130,19 +129,18 @@ type inboundMsgBufferThrottlerMetrics struct { awaitingAcquire prometheus.Gauge } -func (m *inboundMsgBufferThrottlerMetrics) initialize(namespace string, reg prometheus.Registerer) error { +func (m *inboundMsgBufferThrottlerMetrics) initialize(reg prometheus.Registerer) error { errs := wrappers.Errs{} m.acquireLatency = metric.NewAveragerWithErrs( - namespace, + "", "buffer_throttler_inbound_acquire_latency", "average time (in ns) to get space on the inbound message buffer", reg, &errs, ) m.awaitingAcquire = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "buffer_throttler_inbound_awaiting_acquire", - Help: "Number of inbound messages waiting to take space on the inbound message buffer", + Name: "buffer_throttler_inbound_awaiting_acquire", + Help: "Number of inbound messages waiting to take space on the inbound message buffer", }) errs.Add( reg.Register(m.awaitingAcquire), diff --git a/network/throttling/inbound_msg_buffer_throttler_test.go b/network/throttling/inbound_msg_buffer_throttler_test.go index 38e6d735097a..d9f3e4d29bc4 100644 --- a/network/throttling/inbound_msg_buffer_throttler_test.go +++ b/network/throttling/inbound_msg_buffer_throttler_test.go @@ -17,7 +17,7 @@ import ( // Test inboundMsgBufferThrottler func TestMsgBufferThrottler(t *testing.T) { require := require.New(t) - throttler, err := newInboundMsgBufferThrottler("", prometheus.NewRegistry(), 3) + throttler, err := newInboundMsgBufferThrottler(prometheus.NewRegistry(), 3) require.NoError(err) nodeID1, nodeID2 := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() @@ -69,7 +69,7 @@ func TestMsgBufferThrottler(t *testing.T) { // Test inboundMsgBufferThrottler when an acquire is cancelled func TestMsgBufferThrottlerContextCancelled(t *testing.T) { require := require.New(t) - throttler, err := newInboundMsgBufferThrottler("", prometheus.NewRegistry(), 3) + throttler, err := newInboundMsgBufferThrottler(prometheus.NewRegistry(), 3) require.NoError(err) vdr1Context, vdr1ContextCancelFunc := context.WithCancel(context.Background()) diff --git a/network/throttling/inbound_msg_byte_throttler.go b/network/throttling/inbound_msg_byte_throttler.go index 6bdacb28092f..3e20762f85e4 100644 --- a/network/throttling/inbound_msg_byte_throttler.go +++ b/network/throttling/inbound_msg_byte_throttler.go @@ -23,7 +23,6 @@ import ( func newInboundMsgByteThrottler( log logging.Logger, - namespace string, registerer prometheus.Registerer, vdrs validators.Manager, config MsgByteThrottlerConfig, @@ -42,7 +41,7 @@ func newInboundMsgByteThrottler( waitingToAcquire: linked.NewHashmap[uint64, *msgMetadata](), nodeToWaitingMsgID: make(map[ids.NodeID]uint64), } - return t, t.metrics.initialize(namespace, registerer) + return t, t.metrics.initialize(registerer) } // Information about a message waiting to be read. @@ -306,34 +305,30 @@ type inboundMsgByteThrottlerMetrics struct { awaitingRelease prometheus.Gauge } -func (m *inboundMsgByteThrottlerMetrics) initialize(namespace string, reg prometheus.Registerer) error { +func (m *inboundMsgByteThrottlerMetrics) initialize(reg prometheus.Registerer) error { errs := wrappers.Errs{} m.acquireLatency = metric.NewAveragerWithErrs( - namespace, + "", "byte_throttler_inbound_acquire_latency", "average time (in ns) to get space on the inbound message byte buffer", reg, &errs, ) m.remainingAtLargeBytes = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "byte_throttler_inbound_remaining_at_large_bytes", - Help: "Bytes remaining in the at-large byte buffer", + Name: "byte_throttler_inbound_remaining_at_large_bytes", + Help: "Bytes remaining in the at-large byte buffer", }) m.remainingVdrBytes = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "byte_throttler_inbound_remaining_validator_bytes", - Help: "Bytes remaining in the validator byte buffer", + Name: "byte_throttler_inbound_remaining_validator_bytes", + Help: "Bytes remaining in the validator byte buffer", }) m.awaitingAcquire = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "byte_throttler_inbound_awaiting_acquire", - Help: "Number of inbound messages waiting to acquire space on the inbound message byte buffer", + Name: "byte_throttler_inbound_awaiting_acquire", + Help: "Number of inbound messages waiting to acquire space on the inbound message byte buffer", }) m.awaitingRelease = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "byte_throttler_inbound_awaiting_release", - Help: "Number of messages currently being read/handled", + Name: "byte_throttler_inbound_awaiting_release", + Help: "Number of messages currently being read/handled", }) errs.Add( reg.Register(m.remainingAtLargeBytes), diff --git a/network/throttling/inbound_msg_byte_throttler_test.go b/network/throttling/inbound_msg_byte_throttler_test.go index 4fc931e3f374..72ca316de442 100644 --- a/network/throttling/inbound_msg_byte_throttler_test.go +++ b/network/throttling/inbound_msg_byte_throttler_test.go @@ -30,7 +30,6 @@ func TestInboundMsgByteThrottlerCancelContextDeadlock(t *testing.T) { throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, @@ -60,7 +59,6 @@ func TestInboundMsgByteThrottlerCancelContext(t *testing.T) { throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, @@ -118,7 +116,6 @@ func TestInboundMsgByteThrottler(t *testing.T) { throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, @@ -333,7 +330,6 @@ func TestSybilMsgThrottlerMaxNonVdr(t *testing.T) { require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, @@ -384,7 +380,6 @@ func TestMsgThrottlerNextMsg(t *testing.T) { maxBytes := maxVdrBytes throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, diff --git a/network/throttling/inbound_msg_throttler.go b/network/throttling/inbound_msg_throttler.go index ea9167deca15..faf64ed083af 100644 --- a/network/throttling/inbound_msg_throttler.go +++ b/network/throttling/inbound_msg_throttler.go @@ -12,7 +12,6 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/metric" ) var _ InboundMsgThrottler = (*inboundMsgThrottler)(nil) @@ -54,7 +53,6 @@ type InboundMsgThrottlerConfig struct { // Returns a new, sybil-safe inbound message throttler. func NewInboundMsgThrottler( log logging.Logger, - namespace string, registerer prometheus.Registerer, vdrs validators.Manager, throttlerConfig InboundMsgThrottlerConfig, @@ -64,7 +62,6 @@ func NewInboundMsgThrottler( ) (InboundMsgThrottler, error) { byteThrottler, err := newInboundMsgByteThrottler( log, - namespace, registerer, vdrs, throttlerConfig.MsgByteThrottlerConfig, @@ -73,7 +70,6 @@ func NewInboundMsgThrottler( return nil, err } bufferThrottler, err := newInboundMsgBufferThrottler( - namespace, registerer, throttlerConfig.MaxProcessingMsgsPerNode, ) @@ -82,7 +78,6 @@ func NewInboundMsgThrottler( } bandwidthThrottler, err := newBandwidthThrottler( log, - namespace, registerer, throttlerConfig.BandwidthThrottlerConfig, ) @@ -90,7 +85,7 @@ func NewInboundMsgThrottler( return nil, err } cpuThrottler, err := NewSystemThrottler( - metric.AppendNamespace(namespace, "cpu"), + "cpu", registerer, throttlerConfig.CPUThrottlerConfig, resourceTracker.CPUTracker(), @@ -100,7 +95,7 @@ func NewInboundMsgThrottler( return nil, err } diskThrottler, err := NewSystemThrottler( - metric.AppendNamespace(namespace, "disk"), + "disk", registerer, throttlerConfig.DiskThrottlerConfig, resourceTracker.DiskTracker(), diff --git a/network/throttling/outbound_msg_throttler.go b/network/throttling/outbound_msg_throttler.go index d75c53f1548a..b27fe01060dc 100644 --- a/network/throttling/outbound_msg_throttler.go +++ b/network/throttling/outbound_msg_throttler.go @@ -42,7 +42,6 @@ type outboundMsgThrottler struct { func NewSybilOutboundMsgThrottler( log logging.Logger, - namespace string, registerer prometheus.Registerer, vdrs validators.Manager, config MsgByteThrottlerConfig, @@ -59,7 +58,7 @@ func NewSybilOutboundMsgThrottler( nodeToAtLargeBytesUsed: make(map[ids.NodeID]uint64), }, } - return t, t.metrics.initialize(namespace, registerer) + return t, t.metrics.initialize(registerer) } func (t *outboundMsgThrottler) Acquire(msg message.OutboundMessage, nodeID ids.NodeID) bool { @@ -176,31 +175,26 @@ type outboundMsgThrottlerMetrics struct { awaitingRelease prometheus.Gauge } -func (m *outboundMsgThrottlerMetrics) initialize(namespace string, registerer prometheus.Registerer) error { +func (m *outboundMsgThrottlerMetrics) initialize(registerer prometheus.Registerer) error { m.acquireSuccesses = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "throttler_outbound_acquire_successes", - Help: "Outbound messages not dropped due to rate-limiting", + Name: "throttler_outbound_acquire_successes", + Help: "Outbound messages not dropped due to rate-limiting", }) m.acquireFailures = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "throttler_outbound_acquire_failures", - Help: "Outbound messages dropped due to rate-limiting", + Name: "throttler_outbound_acquire_failures", + Help: "Outbound messages dropped due to rate-limiting", }) m.remainingAtLargeBytes = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "throttler_outbound_remaining_at_large_bytes", - Help: "Bytes remaining in the at large byte allocation", + Name: "throttler_outbound_remaining_at_large_bytes", + Help: "Bytes remaining in the at large byte allocation", }) m.remainingVdrBytes = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "throttler_outbound_remaining_validator_bytes", - Help: "Bytes remaining in the validator byte allocation", + Name: "throttler_outbound_remaining_validator_bytes", + Help: "Bytes remaining in the validator byte allocation", }) m.awaitingRelease = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "throttler_outbound_awaiting_release", - Help: "Number of messages waiting to be sent", + Name: "throttler_outbound_awaiting_release", + Help: "Number of messages waiting to be sent", }) return utils.Err( registerer.Register(m.acquireSuccesses), diff --git a/network/throttling/outbound_msg_throttler_test.go b/network/throttling/outbound_msg_throttler_test.go index 664449adadd6..ab883b8fa4e4 100644 --- a/network/throttling/outbound_msg_throttler_test.go +++ b/network/throttling/outbound_msg_throttler_test.go @@ -32,7 +32,6 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr2ID, nil, ids.Empty, 1)) throttlerIntf, err := NewSybilOutboundMsgThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, @@ -174,7 +173,6 @@ func TestSybilOutboundMsgThrottlerMaxNonVdr(t *testing.T) { require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) throttlerIntf, err := NewSybilOutboundMsgThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, @@ -221,7 +219,6 @@ func TestBypassThrottling(t *testing.T) { require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) throttlerIntf, err := NewSybilOutboundMsgThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, diff --git a/node/node.go b/node/node.go index 5445c626d1d6..bdbe89d0db5a 100644 --- a/node/node.go +++ b/node/node.go @@ -91,7 +91,8 @@ const ( ipResolutionTimeout = 30 * time.Second - apiNamespace = constants.PlatformName + metric.NamespaceSeparator + "api" + apiNamespace = constants.PlatformName + metric.NamespaceSeparator + "api" + networkNamespace = constants.PlatformName + metric.NamespaceSeparator + "network" ) var ( @@ -187,11 +188,18 @@ func New( // It must be initiated before networking (initNetworking), chain manager (initChainManager) // and the engine (initChains) but after the metrics (initMetricsAPI) // message.Creator currently record metrics under network namespace - n.networkNamespace = "network" + + networkRegisterer, err := metrics.MakeAndRegister( + n.MetricsGatherer, + networkNamespace, + ) + if err != nil { + return nil, err + } + n.msgCreator, err = message.NewCreator( n.Log, - n.MetricsRegisterer, - n.networkNamespace, + networkRegisterer, n.Config.NetworkConfig.CompressionType, n.Config.NetworkConfig.MaximumInboundMessageTimeout, ) @@ -209,7 +217,7 @@ func New( } n.initCPUTargeter(&config.CPUTargeterConfig) n.initDiskTargeter(&config.DiskTargeterConfig) - if err := n.initNetworking(); err != nil { // Set up networking layer. + if err := n.initNetworking(networkRegisterer); err != nil { // Set up networking layer. return nil, fmt.Errorf("problem initializing networking: %w", err) } @@ -313,8 +321,7 @@ type Node struct { VertexAcceptorGroup snow.AcceptorGroup // Net runs the networking stack - networkNamespace string - Net network.Network + Net network.Network // The staking address will optionally be written to a process context // file to enable other nodes to be configured to use this node as a @@ -390,7 +397,7 @@ type Node struct { // Initialize the networking layer. // Assumes [n.vdrs], [n.CPUTracker], and [n.CPUTargeter] have been initialized. -func (n *Node) initNetworking() error { +func (n *Node) initNetworking(reg prometheus.Registerer) error { // Providing either loopback address - `::1` for ipv6 and `127.0.0.1` for ipv4 - as the listen // host will avoid the need for a firewall exception on recent MacOS: // @@ -587,7 +594,6 @@ func (n *Node) initNetworking() error { } // add node configs to network config - n.Config.NetworkConfig.Namespace = n.networkNamespace n.Config.NetworkConfig.MyNodeID = n.ID n.Config.NetworkConfig.MyIPPort = dynamicIP n.Config.NetworkConfig.NetworkID = n.Config.NetworkID @@ -606,7 +612,7 @@ func (n *Node) initNetworking() error { n.Net, err = network.NewNetwork( &n.Config.NetworkConfig, n.msgCreator, - n.MetricsRegisterer, + reg, n.Log, listener, dialer.NewDialer(constants.NetworkType, n.Config.NetworkConfig.DialerConfig, n.Log), diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 2005c25da2a1..6bd2bc558c9c 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -70,7 +70,6 @@ func TestTimeout(t *testing.T) { mc, err := message.NewCreator( logging.NoLog{}, metrics, - "dummyNamespace", constants.DefaultNetworkCompressionType, 10*time.Second, ) @@ -347,7 +346,6 @@ func TestReliableMessages(t *testing.T) { mc, err := message.NewCreator( logging.NoLog{}, metrics, - "dummyNamespace", constants.DefaultNetworkCompressionType, 10*time.Second, ) @@ -504,7 +502,6 @@ func TestReliableMessagesToMyself(t *testing.T) { mc, err := message.NewCreator( logging.NoLog{}, metrics, - "dummyNamespace", constants.DefaultNetworkCompressionType, 10*time.Second, ) diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index dbd766b2a6e1..34b7d54b76e7 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -1418,7 +1418,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { chainRouter := &router.ChainRouter{} metrics := prometheus.NewRegistry() - mc, err := message.NewCreator(logging.NoLog{}, metrics, "dummyNamespace", constants.DefaultNetworkCompressionType, 10*time.Second) + mc, err := message.NewCreator(logging.NoLog{}, metrics, constants.DefaultNetworkCompressionType, 10*time.Second) require.NoError(err) require.NoError(chainRouter.Initialize( From 0e9ab7803c2d0f14701cacc25466a3809fb67fc2 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 31 May 2024 12:01:46 -0400 Subject: [PATCH 036/102] Remove db namespace (#3068) --- database/leveldb/db.go | 4 +- database/leveldb/db_test.go | 4 +- database/leveldb/metrics.go | 92 ++++++++----------- database/pebbledb/batch_test.go | 2 +- database/pebbledb/db.go | 2 +- database/pebbledb/db_test.go | 2 +- node/node.go | 16 +++- .../validators/manager_benchmark_test.go | 1 - 8 files changed, 55 insertions(+), 68 deletions(-) diff --git a/database/leveldb/db.go b/database/leveldb/db.go index 6c09606128db..7c54b1d86e32 100644 --- a/database/leveldb/db.go +++ b/database/leveldb/db.go @@ -186,7 +186,7 @@ type config struct { } // New returns a wrapped LevelDB object. -func New(file string, configBytes []byte, log logging.Logger, namespace string, reg prometheus.Registerer) (database.Database, error) { +func New(file string, configBytes []byte, log logging.Logger, reg prometheus.Registerer) (database.Database, error) { parsedConfig := config{ BlockCacheCapacity: DefaultBlockCacheSize, DisableSeeksCompaction: true, @@ -236,7 +236,7 @@ func New(file string, configBytes []byte, log logging.Logger, namespace string, closeCh: make(chan struct{}), } if parsedConfig.MetricUpdateFrequency > 0 { - metrics, err := newMetrics(namespace, reg) + metrics, err := newMetrics(reg) if err != nil { // Drop any close error to report the original error _ = db.Close() diff --git a/database/leveldb/db_test.go b/database/leveldb/db_test.go index 8352e53bd532..65214d080846 100644 --- a/database/leveldb/db_test.go +++ b/database/leveldb/db_test.go @@ -18,7 +18,7 @@ func TestInterface(t *testing.T) { for name, test := range database.Tests { t.Run(name, func(t *testing.T) { folder := t.TempDir() - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) + db, err := New(folder, nil, logging.NoLog{}, prometheus.NewRegistry()) require.NoError(t, err) test(t, db) @@ -30,7 +30,7 @@ func TestInterface(t *testing.T) { func newDB(t testing.TB) database.Database { folder := t.TempDir() - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) + db, err := New(folder, nil, logging.NoLog{}, prometheus.NewRegistry()) require.NoError(t, err) return db } diff --git a/database/leveldb/metrics.go b/database/leveldb/metrics.go index 5ad2e2b369d7..d1edab6f98e7 100644 --- a/database/leveldb/metrics.go +++ b/database/leveldb/metrics.go @@ -62,117 +62,99 @@ type metrics struct { priorStats, currentStats *leveldb.DBStats } -func newMetrics(namespace string, reg prometheus.Registerer) (metrics, error) { +func newMetrics(reg prometheus.Registerer) (metrics, error) { m := metrics{ writesDelayedCount: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "writes_delayed", - Help: "number of cumulative writes that have been delayed due to compaction", + Name: "writes_delayed", + Help: "number of cumulative writes that have been delayed due to compaction", }), writesDelayedDuration: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "writes_delayed_duration", - Help: "amount of time (in ns) that writes have been delayed due to compaction", + Name: "writes_delayed_duration", + Help: "amount of time (in ns) that writes have been delayed due to compaction", }), writeIsDelayed: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "write_delayed", - Help: "1 if there is currently a write that is being delayed due to compaction", + Name: "write_delayed", + Help: "1 if there is currently a write that is being delayed due to compaction", }), aliveSnapshots: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "alive_snapshots", - Help: "number of currently alive snapshots", + Name: "alive_snapshots", + Help: "number of currently alive snapshots", }), aliveIterators: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "alive_iterators", - Help: "number of currently alive iterators", + Name: "alive_iterators", + Help: "number of currently alive iterators", }), ioWrite: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "io_write", - Help: "cumulative amount of io write during compaction", + Name: "io_write", + Help: "cumulative amount of io write during compaction", }), ioRead: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "io_read", - Help: "cumulative amount of io read during compaction", + Name: "io_read", + Help: "cumulative amount of io read during compaction", }), blockCacheSize: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "block_cache_size", - Help: "total size of cached blocks", + Name: "block_cache_size", + Help: "total size of cached blocks", }), openTables: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "open_tables", - Help: "number of currently opened tables", + Name: "open_tables", + Help: "number of currently opened tables", }), levelTableCount: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "table_count", - Help: "number of tables allocated by level", + Name: "table_count", + Help: "number of tables allocated by level", }, levelLabels, ), levelSize: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "size", - Help: "amount of bytes allocated by level", + Name: "size", + Help: "amount of bytes allocated by level", }, levelLabels, ), levelDuration: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "duration", - Help: "amount of time (in ns) spent in compaction by level", + Name: "duration", + Help: "amount of time (in ns) spent in compaction by level", }, levelLabels, ), levelReads: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "reads", - Help: "amount of bytes read during compaction by level", + Name: "reads", + Help: "amount of bytes read during compaction by level", }, levelLabels, ), levelWrites: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "writes", - Help: "amount of bytes written during compaction by level", + Name: "writes", + Help: "amount of bytes written during compaction by level", }, levelLabels, ), memCompactions: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "mem_comps", - Help: "total number of memory compactions performed", + Name: "mem_comps", + Help: "total number of memory compactions performed", }), level0Compactions: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "level_0_comps", - Help: "total number of level 0 compactions performed", + Name: "level_0_comps", + Help: "total number of level 0 compactions performed", }), nonLevel0Compactions: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "non_level_0_comps", - Help: "total number of non-level 0 compactions performed", + Name: "non_level_0_comps", + Help: "total number of non-level 0 compactions performed", }), seekCompactions: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "seek_comps", - Help: "total number of seek compactions performed", + Name: "seek_comps", + Help: "total number of seek compactions performed", }), priorStats: &leveldb.DBStats{}, diff --git a/database/pebbledb/batch_test.go b/database/pebbledb/batch_test.go index 3a0ad63b7e43..98ab0e28eb35 100644 --- a/database/pebbledb/batch_test.go +++ b/database/pebbledb/batch_test.go @@ -17,7 +17,7 @@ func TestBatch(t *testing.T) { require := require.New(t) dirName := t.TempDir() - db, err := New(dirName, nil, logging.NoLog{}, "", prometheus.NewRegistry()) + db, err := New(dirName, nil, logging.NoLog{}, prometheus.NewRegistry()) require.NoError(err) batchIntf := db.NewBatch() diff --git a/database/pebbledb/db.go b/database/pebbledb/db.go index ca048ec5f636..1de9c2ce5de5 100644 --- a/database/pebbledb/db.go +++ b/database/pebbledb/db.go @@ -64,7 +64,7 @@ type Config struct { } // TODO: Add metrics -func New(file string, configBytes []byte, log logging.Logger, _ string, _ prometheus.Registerer) (database.Database, error) { +func New(file string, configBytes []byte, log logging.Logger, _ prometheus.Registerer) (database.Database, error) { cfg := DefaultConfig if len(configBytes) > 0 { if err := json.Unmarshal(configBytes, &cfg); err != nil { diff --git a/database/pebbledb/db_test.go b/database/pebbledb/db_test.go index 506221dce5cd..7d48a00c627a 100644 --- a/database/pebbledb/db_test.go +++ b/database/pebbledb/db_test.go @@ -16,7 +16,7 @@ import ( func newDB(t testing.TB) *Database { folder := t.TempDir() - db, err := New(folder, nil, logging.NoLog{}, "pebble", prometheus.NewRegistry()) + db, err := New(folder, nil, logging.NoLog{}, prometheus.NewRegistry()) require.NoError(t, err) return db.(*Database) } diff --git a/node/node.go b/node/node.go index bdbe89d0db5a..847979b9b63a 100644 --- a/node/node.go +++ b/node/node.go @@ -92,6 +92,7 @@ const ( ipResolutionTimeout = 30 * time.Second apiNamespace = constants.PlatformName + metric.NamespaceSeparator + "api" + dbNamespace = constants.PlatformName + metric.NamespaceSeparator + "db_internal" networkNamespace = constants.PlatformName + metric.NamespaceSeparator + "network" ) @@ -729,14 +730,21 @@ func (n *Node) Dispatch() error { */ func (n *Node) initDatabase() error { + dbRegisterer, err := metrics.MakeAndRegister( + n.MetricsGatherer, + dbNamespace, + ) + if err != nil { + return err + } + // start the db switch n.Config.DatabaseConfig.Name { case leveldb.Name: // Prior to v1.10.15, the only on-disk database was leveldb, and its // files went to [dbPath]/[networkID]/v1.4.5. dbPath := filepath.Join(n.Config.DatabaseConfig.Path, version.CurrentDatabase.String()) - var err error - n.DB, err = leveldb.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, "db_internal", n.MetricsRegisterer) + n.DB, err = leveldb.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, dbRegisterer) if err != nil { return fmt.Errorf("couldn't create %s at %s: %w", leveldb.Name, dbPath, err) } @@ -744,8 +752,7 @@ func (n *Node) initDatabase() error { n.DB = memdb.New() case pebbledb.Name: dbPath := filepath.Join(n.Config.DatabaseConfig.Path, "pebble") - var err error - n.DB, err = pebbledb.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, "db_internal", n.MetricsRegisterer) + n.DB, err = pebbledb.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, dbRegisterer) if err != nil { return fmt.Errorf("couldn't create %s at %s: %w", pebbledb.Name, dbPath, err) } @@ -763,7 +770,6 @@ func (n *Node) initDatabase() error { n.DB = versiondb.New(n.DB) } - var err error n.DB, err = meterdb.New("db", n.MetricsRegisterer, n.DB) if err != nil { return err diff --git a/vms/platformvm/validators/manager_benchmark_test.go b/vms/platformvm/validators/manager_benchmark_test.go index 3a756f73c6af..8215a54475a4 100644 --- a/vms/platformvm/validators/manager_benchmark_test.go +++ b/vms/platformvm/validators/manager_benchmark_test.go @@ -49,7 +49,6 @@ func BenchmarkGetValidatorSet(b *testing.B) { b.TempDir(), nil, logging.NoLog{}, - "", prometheus.NewRegistry(), ) require.NoError(err) From b419c2844982c891e8145c521e653c4010b325ff Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 31 May 2024 13:18:46 -0400 Subject: [PATCH 037/102] Remove averager metrics namespace (#3072) --- network/throttling/bandwidth_throttler.go | 1 - .../throttling/inbound_msg_buffer_throttler.go | 1 - network/throttling/inbound_msg_byte_throttler.go | 1 - snow/consensus/snowman/metrics.go | 4 ---- snow/consensus/snowman/poll/set.go | 1 - snow/engine/avalanche/getter/getter.go | 1 - snow/engine/snowman/getter/getter.go | 1 - snow/engine/snowman/metrics.go | 3 --- utils/metric/averager.go | 16 +++++++--------- vms/metervm/metrics.go | 1 - 10 files changed, 7 insertions(+), 23 deletions(-) diff --git a/network/throttling/bandwidth_throttler.go b/network/throttling/bandwidth_throttler.go index 12ca3ac9a841..58938f31c11a 100644 --- a/network/throttling/bandwidth_throttler.go +++ b/network/throttling/bandwidth_throttler.go @@ -68,7 +68,6 @@ func newBandwidthThrottler( limiters: make(map[ids.NodeID]*rate.Limiter), metrics: bandwidthThrottlerMetrics{ acquireLatency: metric.NewAveragerWithErrs( - "", "bandwidth_throttler_inbound_acquire_latency", "average time (in ns) to acquire bytes from the inbound bandwidth throttler", registerer, diff --git a/network/throttling/inbound_msg_buffer_throttler.go b/network/throttling/inbound_msg_buffer_throttler.go index 395b6da1688d..73ebc4ed9778 100644 --- a/network/throttling/inbound_msg_buffer_throttler.go +++ b/network/throttling/inbound_msg_buffer_throttler.go @@ -132,7 +132,6 @@ type inboundMsgBufferThrottlerMetrics struct { func (m *inboundMsgBufferThrottlerMetrics) initialize(reg prometheus.Registerer) error { errs := wrappers.Errs{} m.acquireLatency = metric.NewAveragerWithErrs( - "", "buffer_throttler_inbound_acquire_latency", "average time (in ns) to get space on the inbound message buffer", reg, diff --git a/network/throttling/inbound_msg_byte_throttler.go b/network/throttling/inbound_msg_byte_throttler.go index 3e20762f85e4..237041f00a09 100644 --- a/network/throttling/inbound_msg_byte_throttler.go +++ b/network/throttling/inbound_msg_byte_throttler.go @@ -308,7 +308,6 @@ type inboundMsgByteThrottlerMetrics struct { func (m *inboundMsgByteThrottlerMetrics) initialize(reg prometheus.Registerer) error { errs := wrappers.Errs{} m.acquireLatency = metric.NewAveragerWithErrs( - "", "byte_throttler_inbound_acquire_latency", "average time (in ns) to get space on the inbound message byte buffer", reg, diff --git a/snow/consensus/snowman/metrics.go b/snow/consensus/snowman/metrics.go index 1db1bdbc1c53..bab57e5c371d 100644 --- a/snow/consensus/snowman/metrics.go +++ b/snow/consensus/snowman/metrics.go @@ -98,14 +98,12 @@ func newMetrics( Help: "cumulative size of all accepted blocks", }), pollsAccepted: metric.NewAveragerWithErrs( - "", "blks_polls_accepted", "number of polls from the issuance of a block to its acceptance", reg, &errs, ), latAccepted: metric.NewAveragerWithErrs( - "", "blks_accepted", "time (in ns) from the issuance of a block to its acceptance", reg, @@ -121,14 +119,12 @@ func newMetrics( Help: "cumulative size of all rejected blocks", }), pollsRejected: metric.NewAveragerWithErrs( - "", "blks_polls_rejected", "number of polls from the issuance of a block to its rejection", reg, &errs, ), latRejected: metric.NewAveragerWithErrs( - "", "blks_rejected", "time (in ns) from the issuance of a block to its rejection", reg, diff --git a/snow/consensus/snowman/poll/set.go b/snow/consensus/snowman/poll/set.go index 7ef519ea7f50..aa7e7342542a 100644 --- a/snow/consensus/snowman/poll/set.go +++ b/snow/consensus/snowman/poll/set.go @@ -66,7 +66,6 @@ func NewSet( } durPolls, err := metric.NewAverager( - "", "poll_duration", "time (in ns) this poll took to complete", reg, diff --git a/snow/engine/avalanche/getter/getter.go b/snow/engine/avalanche/getter/getter.go index 6866e7a54ba0..1e1105c7675b 100644 --- a/snow/engine/avalanche/getter/getter.go +++ b/snow/engine/avalanche/getter/getter.go @@ -44,7 +44,6 @@ func New( var err error gh.getAncestorsVtxs, err = metric.NewAverager( - "", "bs_get_ancestors_vtxs", "vertices fetched in a call to GetAncestors", reg, diff --git a/snow/engine/snowman/getter/getter.go b/snow/engine/snowman/getter/getter.go index aed51298cd22..b501aeef2680 100644 --- a/snow/engine/snowman/getter/getter.go +++ b/snow/engine/snowman/getter/getter.go @@ -43,7 +43,6 @@ func New( var err error gh.getAncestorsBlks, err = metric.NewAverager( - "", "bs_get_ancestors_blks", "blocks fetched in a call to GetAncestors", reg, diff --git a/snow/engine/snowman/metrics.go b/snow/engine/snowman/metrics.go index bd46eb002fcf..922b18200d47 100644 --- a/snow/engine/snowman/metrics.go +++ b/snow/engine/snowman/metrics.go @@ -98,21 +98,18 @@ func newMetrics(reg prometheus.Registerer) (*metrics, error) { Help: "Number of votes that were directly applied to blocks", }), getAncestorsBlks: metric.NewAveragerWithErrs( - "", "get_ancestors_blks", "blocks fetched in a call to GetAncestors", reg, &errs, ), selectedVoteIndex: metric.NewAveragerWithErrs( - "", "selected_vote_index", "index of the voteID that was passed into consensus", reg, &errs, ), issuerStake: metric.NewAveragerWithErrs( - "", "issuer_stake", "stake weight of the peer who provided a block that was issued into consensus", reg, diff --git a/utils/metric/averager.go b/utils/metric/averager.go index e63e0007c0b8..d84e7875276a 100644 --- a/utils/metric/averager.go +++ b/utils/metric/averager.go @@ -23,23 +23,21 @@ type averager struct { sum prometheus.Gauge } -func NewAverager(namespace, name, desc string, reg prometheus.Registerer) (Averager, error) { +func NewAverager(name, desc string, reg prometheus.Registerer) (Averager, error) { errs := wrappers.Errs{} - a := NewAveragerWithErrs(namespace, name, desc, reg, &errs) + a := NewAveragerWithErrs(name, desc, reg, &errs) return a, errs.Err } -func NewAveragerWithErrs(namespace, name, desc string, reg prometheus.Registerer, errs *wrappers.Errs) Averager { +func NewAveragerWithErrs(name, desc string, reg prometheus.Registerer, errs *wrappers.Errs) Averager { a := averager{ count: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: name + "_count", - Help: "Total # of observations of " + desc, + Name: AppendNamespace(name, "count"), + Help: "Total # of observations of " + desc, }), sum: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: name + "_sum", - Help: "Sum of " + desc, + Name: AppendNamespace(name, "sum"), + Help: "Sum of " + desc, }), } diff --git a/vms/metervm/metrics.go b/vms/metervm/metrics.go index 09d85a770585..4cad7d153f83 100644 --- a/vms/metervm/metrics.go +++ b/vms/metervm/metrics.go @@ -12,7 +12,6 @@ import ( func newAverager(name string, reg prometheus.Registerer, errs *wrappers.Errs) metric.Averager { return metric.NewAveragerWithErrs( - "", name, "time (in ns) of a "+name, reg, From b14689b4e902066c60d7d4ffee407dfcdd3b31c5 Mon Sep 17 00:00:00 2001 From: stellrust Date: Sun, 2 Jun 2024 00:59:43 +0800 Subject: [PATCH 038/102] chore: fix function name (#3075) --- vms/rpcchainvm/grpcutils/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/rpcchainvm/grpcutils/util.go b/vms/rpcchainvm/grpcutils/util.go index 69e165c29832..280ab5c1aa3b 100644 --- a/vms/rpcchainvm/grpcutils/util.go +++ b/vms/rpcchainvm/grpcutils/util.go @@ -24,7 +24,7 @@ func Errorf(code int, tmpl string, args ...interface{}) error { }) } -// GetGRPCErrorFromHTTPRespone takes an HandleSimpleHTTPResponse as input and returns a gRPC error. +// GetGRPCErrorFromHTTPResponse takes an HandleSimpleHTTPResponse as input and returns a gRPC error. func GetGRPCErrorFromHTTPResponse(resp *httppb.HandleSimpleHTTPResponse) error { a, err := anypb.New(resp) if err != nil { From a982d257137c7181ea9cdf34821a253017312c13 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 3 Jun 2024 10:30:50 -0400 Subject: [PATCH 039/102] Select metric by label in e2e tests (#3073) --- tests/e2e/x/transfer/virtuous.go | 10 +++---- tests/{http.go => metrics.go} | 48 +++++++++++++++++++++++++------- 2 files changed, 43 insertions(+), 15 deletions(-) rename tests/{http.go => metrics.go} (53%) diff --git a/tests/e2e/x/transfer/virtuous.go b/tests/e2e/x/transfer/virtuous.go index 994c6a845059..35d6afe1b17e 100644 --- a/tests/e2e/x/transfer/virtuous.go +++ b/tests/e2e/x/transfer/virtuous.go @@ -55,7 +55,7 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { require.NoError(err) for _, metrics := range allNodeMetrics { - xBlksProcessing, ok := tests.GetFirstMetricValue(metrics, xBlksProcessingMetric) + xBlksProcessing, ok := tests.GetMetricValue(metrics, xBlksProcessingMetric, nil) if !ok || xBlksProcessing > 0 { return false } @@ -248,13 +248,13 @@ RECEIVER NEW BALANCE (AFTER) : %21d AVAX // +0 since X-chain tx must have been processed and accepted // by now - currentXBlksProcessing, _ := tests.GetFirstMetricValue(mm, xBlksProcessingMetric) - previousXBlksProcessing, _ := tests.GetFirstMetricValue(prev, xBlksProcessingMetric) + currentXBlksProcessing, _ := tests.GetMetricValue(mm, xBlksProcessingMetric, nil) + previousXBlksProcessing, _ := tests.GetMetricValue(prev, xBlksProcessingMetric, nil) require.Equal(currentXBlksProcessing, previousXBlksProcessing) // +1 since X-chain tx must have been accepted by now - currentXBlksAccepted, _ := tests.GetFirstMetricValue(mm, xBlksAcceptedMetric) - previousXBlksAccepted, _ := tests.GetFirstMetricValue(prev, xBlksAcceptedMetric) + currentXBlksAccepted, _ := tests.GetMetricValue(mm, xBlksAcceptedMetric, nil) + previousXBlksAccepted, _ := tests.GetMetricValue(prev, xBlksAcceptedMetric, nil) require.Equal(currentXBlksAccepted, previousXBlksAccepted+1) metricsBeforeTx[u] = mm diff --git a/tests/http.go b/tests/metrics.go similarity index 53% rename from tests/http.go rename to tests/metrics.go index b5a7b0ffe097..2caa11ece5e8 100644 --- a/tests/http.go +++ b/tests/metrics.go @@ -7,6 +7,8 @@ import ( "context" "fmt" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/api/metrics" dto "github.com/prometheus/client_model/go" @@ -37,19 +39,45 @@ func GetNodesMetrics(ctx context.Context, nodeURIs []string) (NodesMetrics, erro return metrics, nil } -func GetFirstMetricValue(metrics NodeMetrics, name string) (float64, bool) { +// GetMetricValue returns the value of the specified metric which has the +// required labels. +// +// If multiple metrics match the provided labels, the first metric found is +// returned. +// +// Only Counter and Gauge metrics are supported. +func GetMetricValue(metrics NodeMetrics, name string, labels prometheus.Labels) (float64, bool) { metricFamily, ok := metrics[name] - if !ok || len(metricFamily.Metric) < 1 { + if !ok { return 0, false } - metric := metricFamily.Metric[0] - switch { - case metric.Gauge != nil: - return metric.Gauge.GetValue(), true - case metric.Counter != nil: - return metric.Counter.GetValue(), true - default: - return 0, false + for _, metric := range metricFamily.Metric { + if !labelsMatch(metric, labels) { + continue + } + + switch { + case metric.Gauge != nil: + return metric.Gauge.GetValue(), true + case metric.Counter != nil: + return metric.Counter.GetValue(), true + } + } + return 0, false +} + +func labelsMatch(metric *dto.Metric, labels prometheus.Labels) bool { + var found int + for _, label := range metric.Label { + expectedValue, ok := labels[label.GetName()] + if !ok { + continue + } + if label.GetValue() != expectedValue { + return false + } + found++ } + return found == len(labels) } From 9b30547a41b9ad424db5a5586f655086a6a19391 Mon Sep 17 00:00:00 2001 From: marun Date: Mon, 3 Jun 2024 18:06:17 +0200 Subject: [PATCH 040/102] [tmpnet] Bootstrap subnets with a single node (#3005) Signed-off-by: marun Co-authored-by: Stephen Buttolph --- .github/workflows/ci.yml | 4 +- scripts/tests.e2e.existing.sh | 5 +- tests/antithesis/init_db.go | 4 +- tests/fixture/e2e/flags.go | 19 +-- tests/fixture/e2e/helpers.go | 2 +- tests/fixture/tmpnet/README.md | 2 +- tests/fixture/tmpnet/cmd/main.go | 2 +- tests/fixture/tmpnet/flags.go | 14 +++ tests/fixture/tmpnet/network.go | 210 ++++++++++++++++++++++--------- tests/fixture/tmpnet/node.go | 4 + 10 files changed, 194 insertions(+), 72 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 223fe2866d85..b5b38796821a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -82,7 +82,7 @@ jobs: FILTER_BY_OWNER: avalanchego-e2e - name: Run e2e tests shell: bash - run: E2E_SERIAL=1 ./scripts/tests.e2e.sh + run: E2E_SERIAL=1 ./scripts/tests.e2e.sh --delay-network-shutdown env: GH_REPO: ${{ github.repository }} GH_WORKFLOW: ${{ github.workflow }} @@ -132,7 +132,7 @@ jobs: GH_JOB_ID: ${{ github.job }} - name: Run e2e tests with existing network shell: bash - run: E2E_SERIAL=1 ./scripts/tests.e2e.existing.sh + run: E2E_SERIAL=1 ./scripts/tests.e2e.existing.sh --delay-network-shutdown env: GH_REPO: ${{ github.repository }} GH_WORKFLOW: ${{ github.workflow }} diff --git a/scripts/tests.e2e.existing.sh b/scripts/tests.e2e.existing.sh index 30ab1b473469..4b28fc1ad271 100755 --- a/scripts/tests.e2e.existing.sh +++ b/scripts/tests.e2e.existing.sh @@ -6,6 +6,7 @@ set -euo pipefail # e.g., # ./scripts/build.sh +# ./scripts/tests.e2e.sh --ginkgo.label-filter=x # All arguments are supplied to ginkgo # AVALANCHEGO_PATH=./build/avalanchego ./scripts/tests.e2e.existing.sh # Customization of avalanchego path if ! [[ "$0" =~ scripts/tests.e2e.existing.sh ]]; then echo "must be run from repository root" @@ -27,7 +28,7 @@ trap cleanup EXIT print_separator echo "starting initial test run that should create the reusable network" -./scripts/tests.e2e.sh --reuse-network --ginkgo.focus-file=xsvm.go +./scripts/tests.e2e.sh --reuse-network --ginkgo.focus-file=xsvm.go "${@}" print_separator echo "determining the network path of the reusable network created by the first test run" @@ -36,7 +37,7 @@ INITIAL_NETWORK_DIR="$(realpath "${SYMLINK_PATH}")" print_separator echo "starting second test run that should reuse the network created by the first run" -./scripts/tests.e2e.sh --reuse-network --ginkgo.focus-file=xsvm.go +./scripts/tests.e2e.sh --reuse-network --ginkgo.focus-file=xsvm.go "${@}" SUBSEQUENT_NETWORK_DIR="$(realpath "${SYMLINK_PATH}")" echo "checking that the symlink path remains the same, indicating that the network was reused" diff --git a/tests/antithesis/init_db.go b/tests/antithesis/init_db.go index cad82f623b1d..e1d6c5d537a9 100644 --- a/tests/antithesis/init_db.go +++ b/tests/antithesis/init_db.go @@ -30,7 +30,7 @@ func GetBootstrapVolumePath(targetPath string) (string, error) { func InitBootstrapDB(network *tmpnet.Network, avalancheGoPath string, pluginDir string, destPath string) error { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*2) defer cancel() - if err := tmpnet.StartNewNetwork( + if err := tmpnet.BootstrapNewNetwork( ctx, os.Stdout, network, @@ -38,7 +38,7 @@ func InitBootstrapDB(network *tmpnet.Network, avalancheGoPath string, pluginDir avalancheGoPath, pluginDir, ); err != nil { - return fmt.Errorf("failed to start network: %w", err) + return fmt.Errorf("failed to bootstrap network: %w", err) } // Since the goal is to initialize the DB, we can stop the network after it has been started successfully if err := network.Stop(ctx); err != nil { diff --git a/tests/fixture/e2e/flags.go b/tests/fixture/e2e/flags.go index bc752adaf7df..9e55d0cda16c 100644 --- a/tests/fixture/e2e/flags.go +++ b/tests/fixture/e2e/flags.go @@ -17,7 +17,7 @@ type FlagVars struct { pluginDir string networkDir string reuseNetwork bool - networkShutdownDelay time.Duration + delayNetworkShutdown bool stopNetwork bool nodeCount int } @@ -45,7 +45,12 @@ func (v *FlagVars) ReuseNetwork() bool { } func (v *FlagVars) NetworkShutdownDelay() time.Duration { - return v.networkShutdownDelay + if v.delayNetworkShutdown { + // Only return a non-zero value if the delay is enabled. Make sure this value takes + // into account the scrape_interval defined in scripts/run_prometheus.sh. + return 12 * time.Second + } + return 0 } func (v *FlagVars) StopNetwork() bool { @@ -82,11 +87,11 @@ func RegisterFlags() *FlagVars { false, "[optional] reuse an existing network. If an existing network is not already running, create a new one and leave it running for subsequent usage.", ) - flag.DurationVar( - &vars.networkShutdownDelay, - "network-shutdown-delay", - 12*time.Second, // Make sure this value takes into account the scrape_interval defined in scripts/run_prometheus.sh - "[optional] the duration to wait before shutting down the test network at the end of the test run. A value greater than the scrape interval is suggested. 0 avoids waiting for shutdown.", + flag.BoolVar( + &vars.delayNetworkShutdown, + "delay-network-shutdown", + false, + "[optional] whether to delay network shutdown to allow a final metrics scrape.", ) flag.BoolVar( &vars.stopNetwork, diff --git a/tests/fixture/e2e/helpers.go b/tests/fixture/e2e/helpers.go index 358f07946055..6f6e5382dc7a 100644 --- a/tests/fixture/e2e/helpers.go +++ b/tests/fixture/e2e/helpers.go @@ -226,7 +226,7 @@ func StartNetwork( require := require.New(ginkgo.GinkgoT()) require.NoError( - tmpnet.StartNewNetwork( + tmpnet.BootstrapNewNetwork( DefaultContext(), ginkgo.GinkgoWriter, network, diff --git a/tests/fixture/tmpnet/README.md b/tests/fixture/tmpnet/README.md index 3c0679b4410d..b059da9a554d 100644 --- a/tests/fixture/tmpnet/README.md +++ b/tests/fixture/tmpnet/README.md @@ -103,7 +103,7 @@ network := &tmpnet.Network{ // Configure non-default values fo }, } -_ := tmpnet.StartNewNetwork( // Start the network +_ := tmpnet.BootstrapNewNetwork( // Bootstrap the network ctx, // Context used to limit duration of waiting for network health ginkgo.GinkgoWriter, // Writer to report progress of initialization network, diff --git a/tests/fixture/tmpnet/cmd/main.go b/tests/fixture/tmpnet/cmd/main.go index 0e6bcb1fd0a0..d2b60682323b 100644 --- a/tests/fixture/tmpnet/cmd/main.go +++ b/tests/fixture/tmpnet/cmd/main.go @@ -74,7 +74,7 @@ func main() { ctx, cancel := context.WithTimeout(context.Background(), networkStartTimeout) defer cancel() - err := tmpnet.StartNewNetwork( + err := tmpnet.BootstrapNewNetwork( ctx, os.Stdout, network, diff --git a/tests/fixture/tmpnet/flags.go b/tests/fixture/tmpnet/flags.go index 6afb7c9d4ac8..c3ec5aabc192 100644 --- a/tests/fixture/tmpnet/flags.go +++ b/tests/fixture/tmpnet/flags.go @@ -55,6 +55,20 @@ func (f FlagsMap) GetStringVal(key string) (string, error) { return val, nil } +// GetBoolVal simplifies retrieving a map value as a bool. +func (f FlagsMap) GetBoolVal(key string, defaultVal bool) (bool, error) { + rawVal, ok := f[key] + if !ok { + return defaultVal, nil + } + + val, err := cast.ToBoolE(rawVal) + if err != nil { + return false, fmt.Errorf("failed to cast value for %q: %w", key, err) + } + return val, nil +} + // Write simplifies writing a FlagsMap to the provided path. The // description is used in error messages. func (f FlagsMap) Write(path string, description string) error { diff --git a/tests/fixture/tmpnet/network.go b/tests/fixture/tmpnet/network.go index e3efdd88cf5c..97e6a9a48c21 100644 --- a/tests/fixture/tmpnet/network.go +++ b/tests/fixture/tmpnet/network.go @@ -11,6 +11,7 @@ import ( "io" "os" "path/filepath" + "slices" "strconv" "strings" "time" @@ -52,7 +53,7 @@ var ( // TODO(marun) Remove when subnet-evm configures the genesis with this key. HardhatKey *secp256k1.PrivateKey - errInsufficientNodes = errors.New("network needs at least one node to start") + errInsufficientNodes = errors.New("at least one node is required") ) func init() { @@ -127,7 +128,7 @@ func toCanonicalDir(dir string) (string, error) { return filepath.EvalSymlinks(absDir) } -func StartNewNetwork( +func BootstrapNewNetwork( ctx context.Context, w io.Writer, network *Network, @@ -144,10 +145,7 @@ func StartNewNetwork( if err := network.Create(rootNetworkDir); err != nil { return err } - if err := network.Start(ctx, w); err != nil { - return err - } - return network.CreateSubnets(ctx, w) + return network.Bootstrap(ctx, w) } // Stops the nodes of the network configured in the provided directory. @@ -325,26 +323,39 @@ func (n *Network) Create(rootDir string) error { return n.Write() } -// Starts all nodes in the network -func (n *Network) Start(ctx context.Context, w io.Writer) error { - if _, err := fmt.Fprintf(w, "Starting network %s (UUID: %s)\n", n.Dir, n.UUID); err != nil { - return err +// Starts the specified nodes +func (n *Network) StartNodes(ctx context.Context, w io.Writer, nodesToStart ...*Node) error { + if len(nodesToStart) == 0 { + return errInsufficientNodes + } + nodesToWaitFor := nodesToStart + if !slices.Contains(nodesToStart, n.Nodes[0]) { + // If starting all nodes except the bootstrap node (because the bootstrap node is already + // running), ensure that the health of the bootstrap node will be logged by including it in + // the set of nodes to wait for. + nodesToWaitFor = n.Nodes + } else { + // Simplify output by only logging network start when starting all nodes or when starting + // the first node by itself to bootstrap subnet creation. + if _, err := fmt.Fprintf(w, "Starting network %s (UUID: %s)\n", n.Dir, n.UUID); err != nil { + return err + } } // Record the time before nodes are started to ensure visibility of subsequently collected metrics via the emitted link startTime := time.Now() // Configure the networking for each node and start - for _, node := range n.Nodes { + for _, node := range nodesToStart { if err := n.StartNode(ctx, w, node); err != nil { return err } } - if _, err := fmt.Fprint(w, "Waiting for all nodes to report healthy...\n\n"); err != nil { + if _, err := fmt.Fprint(w, "Waiting for nodes to report healthy...\n\n"); err != nil { return err } - if err := n.WaitForHealthy(ctx, w); err != nil { + if err := waitForHealthy(ctx, w, nodesToWaitFor); err != nil { return err } if _, err := fmt.Fprintf(w, "\nStarted network %s (UUID: %s)\n", n.Dir, n.UUID); err != nil { @@ -358,6 +369,85 @@ func (n *Network) Start(ctx context.Context, w io.Writer) error { return nil } +// Start the network for the first time +func (n *Network) Bootstrap(ctx context.Context, w io.Writer) error { + if len(n.Subnets) == 0 { + // Without the need to coordinate subnet configuration, + // starting all nodes at once is the simplest option. + return n.StartNodes(ctx, w, n.Nodes...) + } + + // The node that will be used to create subnets and bootstrap the network + bootstrapNode := n.Nodes[0] + + // Whether sybil protection will need to be re-enabled after subnet creation + reEnableSybilProtection := false + + if len(n.Nodes) > 1 { + // Reduce the cost of subnet creation for a network of multiple nodes by + // creating subnets with a single node with sybil protection + // disabled. This allows the creation of initial subnet state without + // requiring coordination between multiple nodes. + + if _, err := fmt.Fprintln(w, "Starting a single-node network with sybil protection disabled for quicker subnet creation"); err != nil { + return err + } + + // If sybil protection is enabled, it should be re-enabled before the node is used to bootstrap the other nodes + var err error + reEnableSybilProtection, err = bootstrapNode.Flags.GetBoolVal(config.SybilProtectionEnabledKey, true) + if err != nil { + return fmt.Errorf("failed to read sybil protection flag: %w", err) + } + + // Ensure sybil protection is disabled for the bootstrap node. + bootstrapNode.Flags[config.SybilProtectionEnabledKey] = false + } + + if err := n.StartNodes(ctx, w, bootstrapNode); err != nil { + return err + } + + // Don't restart the node during subnet creation since it will always be restarted afterwards. + if err := n.CreateSubnets(ctx, w, false /* restartRequired */); err != nil { + return err + } + + if reEnableSybilProtection { + if _, err := fmt.Fprintf(w, "Re-enabling sybil protection for %s\n", bootstrapNode.NodeID); err != nil { + return err + } + delete(bootstrapNode.Flags, config.SybilProtectionEnabledKey) + } + + if _, err := fmt.Fprintf(w, "Restarting bootstrap node %s\n", bootstrapNode.NodeID); err != nil { + return err + } + + if len(n.Nodes) == 1 { + // Ensure the node is restarted to pick up subnet and chain configuration + return n.RestartNode(ctx, w, bootstrapNode) + } + + // TODO(marun) This last restart of the bootstrap node might be unnecessary if: + // - sybil protection didn't change + // - the node is not a subnet validator + + // Ensure the bootstrap node is restarted to pick up configuration changes. Avoid using + // RestartNode since the node won't be able to report healthy until other nodes are started. + if err := bootstrapNode.Stop(ctx); err != nil { + return fmt.Errorf("failed to stop node %s: %w", bootstrapNode.NodeID, err) + } + if err := n.StartNode(ctx, w, bootstrapNode); err != nil { + return fmt.Errorf("failed to start node %s: %w", bootstrapNode.NodeID, err) + } + + if _, err := fmt.Fprintln(w, "Starting remaining nodes..."); err != nil { + return err + } + return n.StartNodes(ctx, w, n.Nodes[1:]...) +} + // Starts the provided node after configuring it for the network. func (n *Network) StartNode(ctx context.Context, w io.Writer, node *Node) error { if err := n.EnsureNodeConfig(node); err != nil { @@ -415,41 +505,6 @@ func (n *Network) RestartNode(ctx context.Context, w io.Writer, node *Node) erro return WaitForHealthy(ctx, node) } -// Waits until all nodes in the network are healthy. -func (n *Network) WaitForHealthy(ctx context.Context, w io.Writer) error { - ticker := time.NewTicker(networkHealthCheckInterval) - defer ticker.Stop() - - healthyNodes := set.NewSet[ids.NodeID](len(n.Nodes)) - for healthyNodes.Len() < len(n.Nodes) { - for _, node := range n.Nodes { - if healthyNodes.Contains(node.NodeID) { - continue - } - - healthy, err := node.IsHealthy(ctx) - if err != nil && !errors.Is(err, ErrNotRunning) { - return err - } - if !healthy { - continue - } - - healthyNodes.Add(node.NodeID) - if _, err := fmt.Fprintf(w, "%s is healthy @ %s\n", node.NodeID, node.URI); err != nil { - return err - } - } - - select { - case <-ctx.Done(): - return fmt.Errorf("failed to see all nodes healthy before timeout: %w", ctx.Err()) - case <-ticker.C: - } - } - return nil -} - // Stops all nodes in the network. func (n *Network) Stop(ctx context.Context) error { // Target all nodes, including the ephemeral ones @@ -589,8 +644,9 @@ func (n *Network) GetSubnet(name string) *Subnet { return nil } -// Ensure that each subnet on the network is created. -func (n *Network) CreateSubnets(ctx context.Context, w io.Writer) error { +// Ensure that each subnet on the network is created. If restartRequired is false, node restart +// to pick up configuration changes becomes the responsibility of the caller. +func (n *Network) CreateSubnets(ctx context.Context, w io.Writer, restartRequired bool) error { createdSubnets := make([]*Subnet, 0, len(n.Subnets)) for _, subnet := range n.Subnets { if len(subnet.ValidatorIDs) == 0 { @@ -645,9 +701,6 @@ func (n *Network) CreateSubnets(ctx context.Context, w io.Writer) error { return err } - if _, err := fmt.Fprintln(w, "Restarting node(s) to enable them to track the new subnet(s)"); err != nil { - return err - } reconfiguredNodes := []*Node{} for _, node := range n.Nodes { existingTrackedSubnets, err := node.Flags.GetStringVal(config.TrackSubnetsKey) @@ -661,10 +714,21 @@ func (n *Network) CreateSubnets(ctx context.Context, w io.Writer) error { node.Flags[config.TrackSubnetsKey] = trackedSubnets reconfiguredNodes = append(reconfiguredNodes, node) } - for _, node := range reconfiguredNodes { - if err := n.RestartNode(ctx, w, node); err != nil { + + if restartRequired { + if _, err := fmt.Fprintln(w, "Restarting node(s) to enable them to track the new subnet(s)"); err != nil { return err } + + for _, node := range reconfiguredNodes { + if len(node.URI) == 0 { + // Only running nodes should be restarted + continue + } + if err := n.RestartNode(ctx, w, node); err != nil { + return err + } + } } // Add validators for the subnet @@ -718,7 +782,7 @@ func (n *Network) CreateSubnets(ctx context.Context, w io.Writer) error { } } - if len(validatorsToRestart) == 0 { + if !restartRequired || len(validatorsToRestart) == 0 { return nil } @@ -781,6 +845,40 @@ func (n *Network) getBootstrapIPsAndIDs(skippedNode *Node) ([]string, []string, return bootstrapIPs, bootstrapIDs, nil } +// Waits until the provided nodes are healthy. +func waitForHealthy(ctx context.Context, w io.Writer, nodes []*Node) error { + ticker := time.NewTicker(networkHealthCheckInterval) + defer ticker.Stop() + + unhealthyNodes := set.Of(nodes...) + for { + for node := range unhealthyNodes { + healthy, err := node.IsHealthy(ctx) + if err != nil && !errors.Is(err, ErrNotRunning) { + return err + } + if !healthy { + continue + } + + unhealthyNodes.Remove(node) + if _, err := fmt.Fprintf(w, "%s is healthy @ %s\n", node.NodeID, node.URI); err != nil { + return err + } + } + + if unhealthyNodes.Len() == 0 { + return nil + } + + select { + case <-ctx.Done(): + return fmt.Errorf("failed to see all nodes healthy before timeout: %w", ctx.Err()) + case <-ticker.C: + } + } +} + // Retrieves the root dir for tmpnet data. func getTmpnetPath() (string, error) { homeDir, err := os.UserHomeDir() diff --git a/tests/fixture/tmpnet/node.go b/tests/fixture/tmpnet/node.go index e29cb92a2960..3a6076af1283 100644 --- a/tests/fixture/tmpnet/node.go +++ b/tests/fixture/tmpnet/node.go @@ -369,6 +369,10 @@ func (n *Node) EnsureNodeID() error { // labeling of metrics. func (n *Node) SaveAPIPort() error { hostPort := strings.TrimPrefix(n.URI, "http://") + if len(hostPort) == 0 { + // Without an API URI there is nothing to save + return nil + } _, port, err := net.SplitHostPort(hostPort) if err != nil { return err From 0893516dd74e70b24de5b2030ad196e047ca139a Mon Sep 17 00:00:00 2001 From: marun Date: Mon, 3 Jun 2024 18:06:53 +0200 Subject: [PATCH 041/102] [antithesis] Skip push for builder image (#3070) --- scripts/build_antithesis_images.sh | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/scripts/build_antithesis_images.sh b/scripts/build_antithesis_images.sh index 964b966cf177..958f80a250b9 100755 --- a/scripts/build_antithesis_images.sh +++ b/scripts/build_antithesis_images.sh @@ -74,6 +74,18 @@ function build_images { docker_cmd="${docker_cmd} --build-arg AVALANCHEGO_NODE_IMAGE=antithesis-avalanchego-node:${TAG}" fi + if [[ "${test_setup}" == "avalanchego" ]]; then + # Build the image that enables compiling golang binaries for the node and workload + # image builds. The builder image is intended to enable building instrumented binaries + # if built on amd64 and non-instrumented binaries if built on arm64. + # + # The builder image is not intended to be pushed so it needs to be built in advance of + # adding `--push` to docker_cmd. Since it is never prefixed with `[registry]/[repo]`, + # attempting to push will result in an error like `unauthorized: access token has + # insufficient scopes`. + ${docker_cmd} -t "${builder_image_name}" -f "${builder_dockerfile}" "${AVALANCHE_PATH}" + fi + if [[ -n "${image_prefix}" && -z "${node_only}" ]]; then # Push images with an image prefix since the prefix defines a # registry location, and only if building all images. When @@ -82,13 +94,6 @@ function build_images { docker_cmd="${docker_cmd} --push" fi - if [[ "${test_setup}" == "avalanchego" ]]; then - # Build the image that enables compiling golang binaries for the node and workload - # image builds. The builder image is intended to enable building instrumented binaries - # if built on amd64 and non-instrumented binaries if built on arm64. - ${docker_cmd} -t "${builder_image_name}" -f "${builder_dockerfile}" "${AVALANCHE_PATH}" - fi - # Build node image first to allow the workload image to use it. ${docker_cmd} -t "${node_image_name}" -f "${node_dockerfile}" "${AVALANCHE_PATH}" From 2cf7bd6a053029a808c6f0a6fd244dd50330f166 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 4 Jun 2024 12:39:04 -0400 Subject: [PATCH 042/102] Implement label gatherer (#3074) --- api/metrics/gatherer_test.go | 11 +- api/metrics/label_gatherer.go | 76 ++++++++++ api/metrics/label_gatherer_test.go | 217 ++++++++++++++++++++++++++++ api/metrics/multi_gatherer.go | 76 ++-------- api/metrics/multi_gatherer_test.go | 137 ------------------ api/metrics/prefix_gatherer.go | 66 +++++++++ api/metrics/prefix_gatherer_test.go | 150 +++++++++++++++++++ 7 files changed, 527 insertions(+), 206 deletions(-) create mode 100644 api/metrics/label_gatherer.go create mode 100644 api/metrics/label_gatherer_test.go delete mode 100644 api/metrics/multi_gatherer_test.go create mode 100644 api/metrics/prefix_gatherer.go create mode 100644 api/metrics/prefix_gatherer_test.go diff --git a/api/metrics/gatherer_test.go b/api/metrics/gatherer_test.go index 334c361ebcc0..83a438867fb9 100644 --- a/api/metrics/gatherer_test.go +++ b/api/metrics/gatherer_test.go @@ -4,14 +4,15 @@ package metrics import ( + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" ) -var ( - hello = "hello" - world = "world" - helloWorld = "hello_world" -) +var counterOpts = prometheus.CounterOpts{ + Name: "counter", + Help: "help", +} type testGatherer struct { mfs []*dto.MetricFamily diff --git a/api/metrics/label_gatherer.go b/api/metrics/label_gatherer.go new file mode 100644 index 000000000000..3b8951a75b77 --- /dev/null +++ b/api/metrics/label_gatherer.go @@ -0,0 +1,76 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "errors" + "fmt" + "slices" + + "github.com/prometheus/client_golang/prometheus" + + dto "github.com/prometheus/client_model/go" +) + +var ( + _ MultiGatherer = (*prefixGatherer)(nil) + + errDuplicateGatherer = errors.New("attempt to register duplicate gatherer") +) + +// NewLabelGatherer returns a new MultiGatherer that merges metrics by adding a +// new label. +func NewLabelGatherer(labelName string) MultiGatherer { + return &labelGatherer{ + labelName: labelName, + } +} + +type labelGatherer struct { + multiGatherer + + labelName string +} + +func (g *labelGatherer) Register(labelValue string, gatherer prometheus.Gatherer) error { + g.lock.Lock() + defer g.lock.Unlock() + + if slices.Contains(g.names, labelValue) { + return fmt.Errorf("%w: for %q with label %q", + errDuplicateGatherer, + g.labelName, + labelValue, + ) + } + + g.names = append(g.names, labelValue) + g.gatherers = append(g.gatherers, &labeledGatherer{ + labelName: g.labelName, + labelValue: labelValue, + gatherer: gatherer, + }) + return nil +} + +type labeledGatherer struct { + labelName string + labelValue string + gatherer prometheus.Gatherer +} + +func (g *labeledGatherer) Gather() ([]*dto.MetricFamily, error) { + // Gather returns partially filled metrics in the case of an error. So, it + // is expected to still return the metrics in the case an error is returned. + metricFamilies, err := g.gatherer.Gather() + for _, metricFamily := range metricFamilies { + for _, metric := range metricFamily.Metric { + metric.Label = append(metric.Label, &dto.LabelPair{ + Name: &g.labelName, + Value: &g.labelValue, + }) + } + } + return metricFamilies, err +} diff --git a/api/metrics/label_gatherer_test.go b/api/metrics/label_gatherer_test.go new file mode 100644 index 000000000000..d5f30fd6529b --- /dev/null +++ b/api/metrics/label_gatherer_test.go @@ -0,0 +1,217 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +func TestLabelGatherer_Gather(t *testing.T) { + const ( + labelName = "smith" + labelValueA = "rick" + labelValueB = "morty" + customLabelName = "tag" + customLabelValueA = "a" + customLabelValueB = "b" + ) + tests := []struct { + name string + labelName string + expectedMetrics []*dto.Metric + expectErr bool + }{ + { + name: "no overlap", + labelName: customLabelName, + expectedMetrics: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String(labelName), + Value: proto.String(labelValueB), + }, + { + Name: proto.String(customLabelName), + Value: proto.String(customLabelValueB), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + { + Label: []*dto.LabelPair{ + { + Name: proto.String(labelName), + Value: proto.String(labelValueA), + }, + { + Name: proto.String(customLabelName), + Value: proto.String(customLabelValueA), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(0), + }, + }, + }, + expectErr: false, + }, + { + name: "has overlap", + labelName: labelName, + expectedMetrics: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String(labelName), + Value: proto.String(labelValueB), + }, + { + Name: proto.String(customLabelName), + Value: proto.String(customLabelValueB), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + expectErr: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + gatherer := NewLabelGatherer(labelName) + require.NotNil(gatherer) + + registerA := prometheus.NewRegistry() + require.NoError(gatherer.Register(labelValueA, registerA)) + { + counterA := prometheus.NewCounterVec( + counterOpts, + []string{test.labelName}, + ) + counterA.With(prometheus.Labels{test.labelName: customLabelValueA}) + require.NoError(registerA.Register(counterA)) + } + + registerB := prometheus.NewRegistry() + require.NoError(gatherer.Register(labelValueB, registerB)) + { + counterB := prometheus.NewCounterVec( + counterOpts, + []string{customLabelName}, + ) + counterB.With(prometheus.Labels{customLabelName: customLabelValueB}).Inc() + require.NoError(registerB.Register(counterB)) + } + + metrics, err := gatherer.Gather() + if test.expectErr { + require.Error(err) //nolint:forbidigo // the error is not exported + } else { + require.NoError(err) + } + require.Equal( + []*dto.MetricFamily{ + { + Name: proto.String(counterOpts.Name), + Help: proto.String(counterOpts.Help), + Type: dto.MetricType_COUNTER.Enum(), + Metric: test.expectedMetrics, + }, + }, + metrics, + ) + }) + } +} + +func TestLabelGatherer_Register(t *testing.T) { + firstLabeledGatherer := &labeledGatherer{ + labelValue: "first", + gatherer: &testGatherer{}, + } + firstLabelGatherer := func() *labelGatherer { + return &labelGatherer{ + multiGatherer: multiGatherer{ + names: []string{firstLabeledGatherer.labelValue}, + gatherers: prometheus.Gatherers{ + firstLabeledGatherer, + }, + }, + } + } + secondLabeledGatherer := &labeledGatherer{ + labelValue: "second", + gatherer: &testGatherer{ + mfs: []*dto.MetricFamily{{}}, + }, + } + secondLabelGatherer := &labelGatherer{ + multiGatherer: multiGatherer{ + names: []string{ + firstLabeledGatherer.labelValue, + secondLabeledGatherer.labelValue, + }, + gatherers: prometheus.Gatherers{ + firstLabeledGatherer, + secondLabeledGatherer, + }, + }, + } + + tests := []struct { + name string + labelGatherer *labelGatherer + labelValue string + gatherer prometheus.Gatherer + expectedErr error + expectedLabelGatherer *labelGatherer + }{ + { + name: "first registration", + labelGatherer: &labelGatherer{}, + labelValue: "first", + gatherer: firstLabeledGatherer.gatherer, + expectedErr: nil, + expectedLabelGatherer: firstLabelGatherer(), + }, + { + name: "second registration", + labelGatherer: firstLabelGatherer(), + labelValue: "second", + gatherer: secondLabeledGatherer.gatherer, + expectedErr: nil, + expectedLabelGatherer: secondLabelGatherer, + }, + { + name: "conflicts with previous registration", + labelGatherer: firstLabelGatherer(), + labelValue: "first", + gatherer: secondLabeledGatherer.gatherer, + expectedErr: errDuplicateGatherer, + expectedLabelGatherer: firstLabelGatherer(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + err := test.labelGatherer.Register(test.labelValue, test.gatherer) + require.ErrorIs(err, test.expectedErr) + require.Equal(test.expectedLabelGatherer, test.labelGatherer) + }) + } +} diff --git a/api/metrics/multi_gatherer.go b/api/metrics/multi_gatherer.go index d8d4d93d2d76..b2fede55643c 100644 --- a/api/metrics/multi_gatherer.go +++ b/api/metrics/multi_gatherer.go @@ -4,94 +4,42 @@ package metrics import ( - "cmp" - "errors" "fmt" - "slices" "sync" "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/metric" - dto "github.com/prometheus/client_model/go" ) -var ( - _ MultiGatherer = (*multiGatherer)(nil) - - errReregisterGatherer = errors.New("attempt to register existing gatherer") -) - // MultiGatherer extends the Gatherer interface by allowing additional gatherers // to be registered. type MultiGatherer interface { prometheus.Gatherer // Register adds the outputs of [gatherer] to the results of future calls to - // Gather with the provided [namespace] added to the metrics. - Register(namespace string, gatherer prometheus.Gatherer) error + // Gather with the provided [name] added to the metrics. + Register(name string, gatherer prometheus.Gatherer) error } -type multiGatherer struct { - lock sync.RWMutex - gatherers map[string]prometheus.Gatherer +// Deprecated: Use NewPrefixGatherer instead. +// +// TODO: Remove once coreth is updated. +func NewMultiGatherer() MultiGatherer { + return NewPrefixGatherer() } -func NewMultiGatherer() MultiGatherer { - return &multiGatherer{ - gatherers: make(map[string]prometheus.Gatherer), - } +type multiGatherer struct { + lock sync.RWMutex + names []string + gatherers prometheus.Gatherers } func (g *multiGatherer) Gather() ([]*dto.MetricFamily, error) { g.lock.RLock() defer g.lock.RUnlock() - var results []*dto.MetricFamily - for namespace, gatherer := range g.gatherers { - gatheredMetrics, err := gatherer.Gather() - if err != nil { - return nil, err - } - for _, gatheredMetric := range gatheredMetrics { - var name string - if gatheredMetric.Name != nil { - name = metric.AppendNamespace(namespace, *gatheredMetric.Name) - } else { - name = namespace - } - gatheredMetric.Name = &name - results = append(results, gatheredMetric) - } - } - // Because we overwrite every metric's name, we are guaranteed that there - // are no metrics with nil names. - sortMetrics(results) - return results, nil -} - -func (g *multiGatherer) Register(namespace string, gatherer prometheus.Gatherer) error { - g.lock.Lock() - defer g.lock.Unlock() - - if existingGatherer, exists := g.gatherers[namespace]; exists { - return fmt.Errorf("%w for namespace %q; existing: %#v; new: %#v", - errReregisterGatherer, - namespace, - existingGatherer, - gatherer, - ) - } - - g.gatherers[namespace] = gatherer - return nil -} - -func sortMetrics(m []*dto.MetricFamily) { - slices.SortFunc(m, func(i, j *dto.MetricFamily) int { - return cmp.Compare(*i.Name, *j.Name) - }) + return g.gatherers.Gather() } func MakeAndRegister(gatherer MultiGatherer, name string) (*prometheus.Registry, error) { diff --git a/api/metrics/multi_gatherer_test.go b/api/metrics/multi_gatherer_test.go deleted file mode 100644 index 51b548d18a68..000000000000 --- a/api/metrics/multi_gatherer_test.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import ( - "errors" - "testing" - - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" - - dto "github.com/prometheus/client_model/go" -) - -func TestMultiGathererEmptyGather(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - - mfs, err := g.Gather() - require.NoError(err) - require.Empty(mfs) -} - -func TestMultiGathererDuplicatedPrefix(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - og := prometheus.NewRegistry() - - require.NoError(g.Register("", og)) - - err := g.Register("", og) - require.ErrorIs(err, errReregisterGatherer) - - require.NoError(g.Register("lol", og)) -} - -func TestMultiGathererAddedError(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - - errTest := errors.New("non-nil error") - tg := &testGatherer{ - err: errTest, - } - - require.NoError(g.Register("", tg)) - - mfs, err := g.Gather() - require.ErrorIs(err, errTest) - require.Empty(mfs) -} - -func TestMultiGathererNoAddedPrefix(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - - tg := &testGatherer{ - mfs: []*dto.MetricFamily{{ - Name: &hello, - }}, - } - - require.NoError(g.Register("", tg)) - - mfs, err := g.Gather() - require.NoError(err) - require.Len(mfs, 1) - require.Equal(&hello, mfs[0].Name) -} - -func TestMultiGathererAddedPrefix(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - - tg := &testGatherer{ - mfs: []*dto.MetricFamily{{ - Name: &world, - }}, - } - - require.NoError(g.Register(hello, tg)) - - mfs, err := g.Gather() - require.NoError(err) - require.Len(mfs, 1) - require.Equal(&helloWorld, mfs[0].Name) -} - -func TestMultiGathererJustPrefix(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - - tg := &testGatherer{ - mfs: []*dto.MetricFamily{{}}, - } - - require.NoError(g.Register(hello, tg)) - - mfs, err := g.Gather() - require.NoError(err) - require.Len(mfs, 1) - require.Equal(&hello, mfs[0].Name) -} - -func TestMultiGathererSorted(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - - name0 := "a" - name1 := "z" - tg := &testGatherer{ - mfs: []*dto.MetricFamily{ - { - Name: &name1, - }, - { - Name: &name0, - }, - }, - } - - require.NoError(g.Register("", tg)) - - mfs, err := g.Gather() - require.NoError(err) - require.Len(mfs, 2) - require.Equal(&name0, mfs[0].Name) - require.Equal(&name1, mfs[1].Name) -} diff --git a/api/metrics/prefix_gatherer.go b/api/metrics/prefix_gatherer.go new file mode 100644 index 000000000000..1f0b78a24380 --- /dev/null +++ b/api/metrics/prefix_gatherer.go @@ -0,0 +1,66 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "fmt" + "slices" + + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/protobuf/proto" + + "github.com/ava-labs/avalanchego/utils/metric" + + dto "github.com/prometheus/client_model/go" +) + +var _ MultiGatherer = (*prefixGatherer)(nil) + +// NewPrefixGatherer returns a new MultiGatherer that merges metrics by adding a +// prefix to their names. +func NewPrefixGatherer() MultiGatherer { + return &prefixGatherer{} +} + +type prefixGatherer struct { + multiGatherer +} + +func (g *prefixGatherer) Register(prefix string, gatherer prometheus.Gatherer) error { + g.lock.Lock() + defer g.lock.Unlock() + + // TODO: Restrict prefixes to avoid potential conflicts + if slices.Contains(g.names, prefix) { + return fmt.Errorf("%w: %q", + errDuplicateGatherer, + prefix, + ) + } + + g.names = append(g.names, prefix) + g.gatherers = append(g.gatherers, &prefixedGatherer{ + prefix: prefix, + gatherer: gatherer, + }) + return nil +} + +type prefixedGatherer struct { + prefix string + gatherer prometheus.Gatherer +} + +func (g *prefixedGatherer) Gather() ([]*dto.MetricFamily, error) { + // Gather returns partially filled metrics in the case of an error. So, it + // is expected to still return the metrics in the case an error is returned. + metricFamilies, err := g.gatherer.Gather() + for _, metricFamily := range metricFamilies { + metricFamily.Name = proto.String(metric.AppendNamespace( + g.prefix, + metricFamily.GetName(), + )) + } + return metricFamilies, err +} diff --git a/api/metrics/prefix_gatherer_test.go b/api/metrics/prefix_gatherer_test.go new file mode 100644 index 000000000000..ba37540b01e3 --- /dev/null +++ b/api/metrics/prefix_gatherer_test.go @@ -0,0 +1,150 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +func TestPrefixGatherer_Gather(t *testing.T) { + require := require.New(t) + + gatherer := NewPrefixGatherer() + require.NotNil(gatherer) + + registerA := prometheus.NewRegistry() + require.NoError(gatherer.Register("a", registerA)) + { + counterA := prometheus.NewCounter(counterOpts) + require.NoError(registerA.Register(counterA)) + } + + registerB := prometheus.NewRegistry() + require.NoError(gatherer.Register("b", registerB)) + { + counterB := prometheus.NewCounter(counterOpts) + counterB.Inc() + require.NoError(registerB.Register(counterB)) + } + + metrics, err := gatherer.Gather() + require.NoError(err) + require.Equal( + []*dto.MetricFamily{ + { + Name: proto.String("a_counter"), + Help: proto.String(counterOpts.Help), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Counter: &dto.Counter{ + Value: proto.Float64(0), + }, + }, + }, + }, + { + Name: proto.String("b_counter"), + Help: proto.String(counterOpts.Help), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + }, + }, + metrics, + ) +} + +func TestPrefixGatherer_Register(t *testing.T) { + firstPrefixedGatherer := &prefixedGatherer{ + prefix: "first", + gatherer: &testGatherer{}, + } + firstPrefixGatherer := func() *prefixGatherer { + return &prefixGatherer{ + multiGatherer: multiGatherer{ + names: []string{ + firstPrefixedGatherer.prefix, + }, + gatherers: prometheus.Gatherers{ + firstPrefixedGatherer, + }, + }, + } + } + secondPrefixedGatherer := &prefixedGatherer{ + prefix: "second", + gatherer: &testGatherer{ + mfs: []*dto.MetricFamily{{}}, + }, + } + secondPrefixGatherer := &prefixGatherer{ + multiGatherer: multiGatherer{ + names: []string{ + firstPrefixedGatherer.prefix, + secondPrefixedGatherer.prefix, + }, + gatherers: prometheus.Gatherers{ + firstPrefixedGatherer, + secondPrefixedGatherer, + }, + }, + } + + tests := []struct { + name string + prefixGatherer *prefixGatherer + prefix string + gatherer prometheus.Gatherer + expectedErr error + expectedPrefixGatherer *prefixGatherer + }{ + { + name: "first registration", + prefixGatherer: &prefixGatherer{}, + prefix: firstPrefixedGatherer.prefix, + gatherer: firstPrefixedGatherer.gatherer, + expectedErr: nil, + expectedPrefixGatherer: firstPrefixGatherer(), + }, + { + name: "second registration", + prefixGatherer: firstPrefixGatherer(), + prefix: secondPrefixedGatherer.prefix, + gatherer: secondPrefixedGatherer.gatherer, + expectedErr: nil, + expectedPrefixGatherer: secondPrefixGatherer, + }, + { + name: "conflicts with previous registration", + prefixGatherer: firstPrefixGatherer(), + prefix: firstPrefixedGatherer.prefix, + gatherer: secondPrefixedGatherer.gatherer, + expectedErr: errDuplicateGatherer, + expectedPrefixGatherer: firstPrefixGatherer(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + err := test.prefixGatherer.Register(test.prefix, test.gatherer) + require.ErrorIs(err, test.expectedErr) + require.Equal(test.expectedPrefixGatherer, test.prefixGatherer) + }) + } +} From e8ecbadec8eb1c336dd8927b0225499eceea6f34 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 5 Jun 2024 17:57:41 -0400 Subject: [PATCH 043/102] Update versions for v1.11.7 (#3083) Co-authored-by: Darioush Jalali --- CONTRIBUTING.md | 2 +- README.md | 2 +- RELEASES.md | 52 ++++++++++++++++++++++++++++++++++++++ go.mod | 4 +-- go.sum | 4 +-- version/compatibility.json | 3 ++- version/constants.go | 2 +- 7 files changed, 61 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7967e92288c0..7b57a5e6886f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ To start developing on AvalancheGo, you'll need a few things installed. -- Golang version >= 1.21.10 +- Golang version >= 1.21.11 - gcc - g++ diff --git a/README.md b/README.md index 222cb6e0cd14..e6763982bce8 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ The minimum recommended hardware specification for nodes connected to Mainnet is If you plan to build AvalancheGo from source, you will also need the following software: -- [Go](https://golang.org/doc/install) version >= 1.21.10 +- [Go](https://golang.org/doc/install) version >= 1.21.11 - [gcc](https://gcc.gnu.org/) - g++ diff --git a/RELEASES.md b/RELEASES.md index ea14d67c6df6..01521a2b4498 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,57 @@ # Release Notes +## [v1.11.7](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.7) + +This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. + +The plugin version is unchanged at `35` and is compatible with versions `v1.11.3-v1.11.6`. + +### APIs + +- Added peer's `trackedSubnets` that are not locally tracked to the response from `info.peers` + +### Configs + +- Changed the undocumented `pebble` option for `--db-type` to be `pebbledb` and documented the option + +### Fixes + +- Removed repeated DB compaction during bootstrapping that caused a significant regression in bootstrapping times +- Fixed C-Chain state-sync crash +- Fixed C-Chain state-sync ETA calculation +- Fixed Subnet owner reported by `platform.getSubnets` after a subnet's owner was rotated + +### What's Changed + +- Expose canonical warp formatting function by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3049 +- Remove subnet filter from Peer.TrackedSubnets() by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2975 +- Remove optional gatherer by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3052 +- [vms/platformvm] Return the correct owner in `platform.GetSubnets` after transfer by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3054 +- Add metrics client by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3057 +- [vms/platformvm] Replace `GetSubnets` with `GetSubnetIDs` in `State` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3055 +- Implement `constants.VMName` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3058 +- [testing] Remove superfluous gomega dep by @marun in https://github.com/ava-labs/avalanchego/pull/3063 +- [antithesis] Enable workload instrumentation by @marun in https://github.com/ava-labs/avalanchego/pull/3059 +- Add pebbledb to docs by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3061 +- [ci] Remove perpetually failing govulncheck job by @marun in https://github.com/ava-labs/avalanchego/pull/3069 +- Remove api namespace by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3066 +- Remove unused metrics namespaces by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3062 +- Only compact after executing a large number of blocks by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3065 +- Remove network namespace by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3067 +- Remove db namespace by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3068 +- Remove averager metrics namespace by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3072 +- chore: fix function name by @stellrust in https://github.com/ava-labs/avalanchego/pull/3075 +- Select metric by label in e2e tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3073 +- [tmpnet] Bootstrap subnets with a single node by @marun in https://github.com/ava-labs/avalanchego/pull/3005 +- [antithesis] Skip push for builder image by @marun in https://github.com/ava-labs/avalanchego/pull/3070 +- Implement label gatherer by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3074 + +### New Contributors + +- @stellrust made their first contribution in https://github.com/ava-labs/avalanchego/pull/3075 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.11.6...v1.11.7 + ## [v1.11.6](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.6) This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. diff --git a/go.mod b/go.mod index d7965b58658f..dd81b9438eae 100644 --- a/go.mod +++ b/go.mod @@ -4,13 +4,13 @@ module github.com/ava-labs/avalanchego // CONTRIBUTING.md // README.md // go.mod (here) -go 1.21.10 +go 1.21.11 require ( github.com/DataDog/zstd v1.5.2 github.com/NYTimes/gziphandler v1.1.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/coreth v0.13.5-remove-optional-gatherer.2 + github.com/ava-labs/coreth v0.13.5-rc.0 github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 github.com/btcsuite/btcd/btcutil v1.1.3 github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 diff --git a/go.sum b/go.sum index da0883a3048d..a3a0887b60b7 100644 --- a/go.sum +++ b/go.sum @@ -62,8 +62,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/antithesishq/antithesis-sdk-go v0.3.8 h1:OvGoHxIcOXFJLyn9IJQ5DzByZ3YVAWNBc394ObzDRb8= github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/coreth v0.13.5-remove-optional-gatherer.2 h1:RX9DcvgWxq42B2aiGzk77Y8w2bcB7ApO/Cdj9hA6QoE= -github.com/ava-labs/coreth v0.13.5-remove-optional-gatherer.2/go.mod h1:cm5c12xo5NiTgtbmeduv8i2nYdzgkczz9Wm3yiwwTRU= +github.com/ava-labs/coreth v0.13.5-rc.0 h1:PJQbR9o2RrW3j9ba4r1glXnmM2PNAP3xR569+gMcBd0= +github.com/ava-labs/coreth v0.13.5-rc.0/go.mod h1:cm5c12xo5NiTgtbmeduv8i2nYdzgkczz9Wm3yiwwTRU= github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 h1:mg9Uw6oZFJKytJxgxnl3uxZOs/SB8CVHg6Io4Tf99Zc= github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34/go.mod h1:pJxaT9bUgeRNVmNRgtCHb7sFDIRKy7CzTQVi8gGNT6g= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= diff --git a/version/compatibility.json b/version/compatibility.json index af88db003657..c2d3525a0393 100644 --- a/version/compatibility.json +++ b/version/compatibility.json @@ -3,7 +3,8 @@ "v1.11.3", "v1.11.4", "v1.11.5", - "v1.11.6" + "v1.11.6", + "v1.11.7" ], "34": [ "v1.11.2" diff --git a/version/constants.go b/version/constants.go index 159f83a34ac2..1e2b809d5a7a 100644 --- a/version/constants.go +++ b/version/constants.go @@ -26,7 +26,7 @@ var ( Current = &Semantic{ Major: 1, Minor: 11, - Patch: 6, + Patch: 7, } CurrentApp = &Application{ Name: Client, From 08ed4ac62657b76555d839109d05140b16a966b7 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 5 Jun 2024 20:51:30 -0400 Subject: [PATCH 044/102] Remove avalanche metrics registerer from consensus context (#3087) --- chains/manager.go | 39 +++++++++++-------- snow/context.go | 9 +---- .../avalanche/bootstrap/bootstrapper.go | 4 +- .../avalanche/bootstrap/bootstrapper_test.go | 10 +++-- snow/networking/sender/sender.go | 12 +----- snow/networking/sender/sender_test.go | 11 +++--- snow/snowtest/snowtest.go | 11 +++--- vms/platformvm/vm_test.go | 1 + 8 files changed, 46 insertions(+), 51 deletions(-) diff --git a/chains/manager.go b/chains/manager.go index 7fee70b8f816..8548954e1c5e 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -425,15 +425,6 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c return nil, fmt.Errorf("error while registering chain's metrics %w", err) } - // This converts the prefix for all the Avalanche consensus metrics from - // `avalanche_{chainID}_` into `avalanche_{chainID}_avalanche_` so that - // there are no conflicts when registering the Snowman consensus metrics. - avalancheConsensusMetrics := prometheus.NewRegistry() - avalancheDAGNamespace := metric.AppendNamespace(chainNamespace, "avalanche") - if err := m.Metrics.Register(avalancheDAGNamespace, avalancheConsensusMetrics); err != nil { - return nil, fmt.Errorf("error while registering DAG metrics %w", err) - } - vmMetrics := metrics.NewMultiGatherer() vmNamespace := metric.AppendNamespace(chainNamespace, "vm") if err := m.Metrics.Register(vmNamespace, vmMetrics); err != nil { @@ -463,11 +454,10 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c ValidatorState: m.validatorState, ChainDataDir: chainDataDir, }, - BlockAcceptor: m.BlockAcceptorGroup, - TxAcceptor: m.TxAcceptorGroup, - VertexAcceptor: m.VertexAcceptorGroup, - Registerer: consensusMetrics, - AvalancheRegisterer: avalancheConsensusMetrics, + BlockAcceptor: m.BlockAcceptorGroup, + TxAcceptor: m.TxAcceptorGroup, + VertexAcceptor: m.VertexAcceptorGroup, + Registerer: consensusMetrics, } // Get a factory for the vm we want to use on our chain @@ -572,11 +562,22 @@ func (m *manager) createAvalancheChain( txBootstrappingDB := prefixdb.New(TxBootstrappingDBPrefix, prefixDB) blockBootstrappingDB := prefixdb.New(BlockBootstrappingDBPrefix, prefixDB) - vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", ctx.AvalancheRegisterer) + // This converts the prefix for all the Avalanche consensus metrics from + // `avalanche_{chainID}_` into `avalanche_{chainID}_avalanche_` so that + // there are no conflicts when registering the Snowman consensus metrics. + avalancheConsensusMetrics := prometheus.NewRegistry() + primaryAlias := m.PrimaryAliasOrDefault(ctx.ChainID) + chainNamespace := metric.AppendNamespace(constants.PlatformName, primaryAlias) + avalancheDAGNamespace := metric.AppendNamespace(chainNamespace, "avalanche") + if err := m.Metrics.Register(avalancheDAGNamespace, avalancheConsensusMetrics); err != nil { + return nil, fmt.Errorf("error while registering DAG metrics %w", err) + } + + vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", avalancheConsensusMetrics) if err != nil { return nil, err } - txBlocker, err := queue.New(txBootstrappingDB, "tx", ctx.AvalancheRegisterer) + txBlocker, err := queue.New(txBootstrappingDB, "tx", avalancheConsensusMetrics) if err != nil { return nil, err } @@ -590,6 +591,7 @@ func (m *manager) createAvalancheChain( m.TimeoutManager, p2ppb.EngineType_ENGINE_TYPE_AVALANCHE, sb, + avalancheConsensusMetrics, ) if err != nil { return nil, fmt.Errorf("couldn't initialize avalanche sender: %w", err) @@ -608,6 +610,7 @@ func (m *manager) createAvalancheChain( m.TimeoutManager, p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, sb, + ctx.Registerer, ) if err != nil { return nil, fmt.Errorf("couldn't initialize avalanche sender: %w", err) @@ -864,7 +867,7 @@ func (m *manager) createAvalancheChain( ctx.Log, m.BootstrapMaxTimeGetAncestors, m.BootstrapAncestorsMaxContainersSent, - ctx.AvalancheRegisterer, + avalancheConsensusMetrics, ) if err != nil { return nil, fmt.Errorf("couldn't initialize avalanche base message handler: %w", err) @@ -896,6 +899,7 @@ func (m *manager) createAvalancheChain( avalancheBootstrapper, err := avbootstrap.New( avalancheBootstrapperConfig, snowmanBootstrapper.Start, + avalancheConsensusMetrics, ) if err != nil { return nil, fmt.Errorf("error initializing avalanche bootstrapper: %w", err) @@ -966,6 +970,7 @@ func (m *manager) createSnowmanChain( m.TimeoutManager, p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, sb, + ctx.Registerer, ) if err != nil { return nil, fmt.Errorf("couldn't initialize sender: %w", err) diff --git a/snow/context.go b/snow/context.go index f610adca9998..2fa501571890 100644 --- a/snow/context.go +++ b/snow/context.go @@ -65,15 +65,8 @@ type Registerer interface { type ConsensusContext struct { *Context - // Registers all common and snowman consensus metrics. Unlike the avalanche - // consensus engine metrics, we do not prefix the name with the engine name, - // as snowman is used for all chains by default. + // Registers all consensus metrics. Registerer Registerer - // Only used to register Avalanche consensus metrics. Previously, all - // metrics were prefixed with "avalanche_{chainID}_". Now we add avalanche - // to the prefix, "avalanche_{chainID}_avalanche_", to differentiate - // consensus operations after the DAG linearization. - AvalancheRegisterer Registerer // BlockAcceptor is the callback that will be fired whenever a VM is // notified that their block was accepted. diff --git a/snow/engine/avalanche/bootstrap/bootstrapper.go b/snow/engine/avalanche/bootstrap/bootstrapper.go index 55e3307e9337..00f9ab64a458 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper.go @@ -8,6 +8,7 @@ import ( "fmt" "time" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" @@ -47,6 +48,7 @@ var _ common.BootstrapableEngine = (*bootstrapper)(nil) func New( config Config, onFinished func(ctx context.Context, lastReqID uint32) error, + reg prometheus.Registerer, ) (common.BootstrapableEngine, error) { b := &bootstrapper{ Config: config, @@ -66,7 +68,7 @@ func New( processedCache: &cache.LRU[ids.ID, struct{}]{Size: cacheSize}, onFinished: onFinished, } - return b, b.metrics.Initialize(config.Ctx.AvalancheRegisterer) + return b, b.metrics.Initialize(reg) } // Note: To align with the Snowman invariant, it should be guaranteed the VM is diff --git a/snow/engine/avalanche/bootstrap/bootstrapper_test.go b/snow/engine/avalanche/bootstrap/bootstrapper_test.go index 47f92057552a..2792e8682f61 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper_test.go @@ -76,10 +76,10 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.Te peer := ids.GenerateTestNodeID() require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, peer, nil, ids.Empty, 1)) - vtxBlocker, err := queue.NewWithMissing(prefixdb.New([]byte("vtx"), db), "vtx", ctx.AvalancheRegisterer) + vtxBlocker, err := queue.NewWithMissing(prefixdb.New([]byte("vtx"), db), "vtx", prometheus.NewRegistry()) require.NoError(err) - txBlocker, err := queue.New(prefixdb.New([]byte("tx"), db), "tx", ctx.AvalancheRegisterer) + txBlocker, err := queue.New(prefixdb.New([]byte("tx"), db), "tx", prometheus.NewRegistry()) require.NoError(err) peerTracker := tracker.NewPeers() @@ -88,7 +88,7 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.Te startupTracker := tracker.NewStartup(peerTracker, totalWeight/2+1) vdrs.RegisterSetCallbackListener(constants.PrimaryNetworkID, startupTracker) - avaGetHandler, err := getter.New(manager, sender, ctx.Log, time.Second, 2000, ctx.AvalancheRegisterer) + avaGetHandler, err := getter.New(manager, sender, ctx.Log, time.Second, 2000, prometheus.NewRegistry()) require.NoError(err) p2pTracker, err := p2p.NewPeerTracker( @@ -172,6 +172,7 @@ func TestBootstrapperSingleFrontier(t *testing.T) { }) return nil }, + prometheus.NewRegistry(), ) require.NoError(err) @@ -278,6 +279,7 @@ func TestBootstrapperByzantineResponses(t *testing.T) { }) return nil }, + prometheus.NewRegistry(), ) require.NoError(err) @@ -444,6 +446,7 @@ func TestBootstrapperTxDependencies(t *testing.T) { }) return nil }, + prometheus.NewRegistry(), ) require.NoError(err) @@ -567,6 +570,7 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { }) return nil }, + prometheus.NewRegistry(), ) require.NoError(err) diff --git a/snow/networking/sender/sender.go b/snow/networking/sender/sender.go index 37076972fe12..e4e36bd3ebbb 100644 --- a/snow/networking/sender/sender.go +++ b/snow/networking/sender/sender.go @@ -5,7 +5,6 @@ package sender import ( "context" - "fmt" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" @@ -58,6 +57,7 @@ func New( timeouts timeout.Manager, engineType p2p.EngineType, subnet subnets.Subnet, + reg prometheus.Registerer, ) (common.Sender, error) { s := &sender{ ctx: ctx, @@ -75,16 +75,6 @@ func New( engineType: engineType, subnet: subnet, } - - var reg prometheus.Registerer - switch engineType { - case p2p.EngineType_ENGINE_TYPE_SNOWMAN: - reg = ctx.Registerer - case p2p.EngineType_ENGINE_TYPE_AVALANCHE: - reg = ctx.AvalancheRegisterer - default: - return nil, fmt.Errorf("unknown engine type %s", engineType) - } return s, reg.Register(s.failedDueToBench) } diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 6bd2bc558c9c..9453f43e4faa 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -100,6 +100,7 @@ func TestTimeout(t *testing.T) { tm, p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, subnets.New(ctx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) @@ -376,6 +377,7 @@ func TestReliableMessages(t *testing.T) { tm, p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, subnets.New(ctx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) @@ -532,6 +534,7 @@ func TestReliableMessagesToMyself(t *testing.T) { tm, p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, subnets.New(ctx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) @@ -843,6 +846,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { timeoutManager, p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, subnets.New(ctx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) @@ -1049,11 +1053,6 @@ func TestSender_Bootstrap_Responses(t *testing.T) { router = router.NewMockRouter(ctrl) ) - // Instantiate new registerers to avoid duplicate metrics - // registration - ctx.Registerer = prometheus.NewRegistry() - ctx.AvalancheRegisterer = prometheus.NewRegistry() - sender, err := New( ctx, msgCreator, @@ -1062,6 +1061,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { timeoutManager, p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, subnets.New(ctx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) @@ -1228,6 +1228,7 @@ func TestSender_Single_Request(t *testing.T) { timeoutManager, engineType, subnets.New(ctx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) diff --git a/snow/snowtest/snowtest.go b/snow/snowtest/snowtest.go index 0ddee75707ab..86374f766514 100644 --- a/snow/snowtest/snowtest.go +++ b/snow/snowtest/snowtest.go @@ -39,12 +39,11 @@ func (noOpAcceptor) Accept(*snow.ConsensusContext, ids.ID, []byte) error { func ConsensusContext(ctx *snow.Context) *snow.ConsensusContext { return &snow.ConsensusContext{ - Context: ctx, - Registerer: prometheus.NewRegistry(), - AvalancheRegisterer: prometheus.NewRegistry(), - BlockAcceptor: noOpAcceptor{}, - TxAcceptor: noOpAcceptor{}, - VertexAcceptor: noOpAcceptor{}, + Context: ctx, + Registerer: prometheus.NewRegistry(), + BlockAcceptor: noOpAcceptor{}, + TxAcceptor: noOpAcceptor{}, + VertexAcceptor: noOpAcceptor{}, } } diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 34b7d54b76e7..50c677688713 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -1447,6 +1447,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { timeoutManager, p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, subnets.New(consensusCtx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) From 657fe0bd2f8079042dae38f476141176bf9dcd21 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 5 Jun 2024 22:14:59 -0400 Subject: [PATCH 045/102] Remove rejection from `consensus.Add` (#3084) --- snow/consensus/snowman/consensus.go | 8 +- snow/consensus/snowman/consensus_test.go | 154 ++++++++------------- snow/consensus/snowman/network_test.go | 2 +- snow/consensus/snowman/topological.go | 26 +--- snow/consensus/snowman/traced_consensus.go | 10 -- snow/engine/snowman/transitive.go | 2 +- snow/engine/snowman/transitive_test.go | 2 +- 7 files changed, 68 insertions(+), 136 deletions(-) diff --git a/snow/consensus/snowman/consensus.go b/snow/consensus/snowman/consensus.go index 3f1006416366..1eaff4b0e2f4 100644 --- a/snow/consensus/snowman/consensus.go +++ b/snow/consensus/snowman/consensus.go @@ -31,9 +31,13 @@ type Consensus interface { // Returns the number of blocks processing NumProcessing() int - // Adds a new decision. Assumes the dependency has already been added. + // Add a new block. + // + // Add should not be called multiple times with the same block. + // The parent block should either be the last accepted block or processing. + // // Returns if a critical error has occurred. - Add(context.Context, Block) error + Add(Block) error // Decided returns true if the block has been decided. Decided(Block) bool diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go index d52e76759512..b4cb5b03a494 100644 --- a/snow/consensus/snowman/consensus_test.go +++ b/snow/consensus/snowman/consensus_test.go @@ -32,7 +32,7 @@ var ( NumProcessingTest, AddToTailTest, AddToNonTailTest, - AddToUnknownTest, + AddOnUnknownParentTest, StatusOrProcessingPreviouslyAcceptedTest, StatusOrProcessingPreviouslyRejectedTest, StatusOrProcessingUnissuedTest, @@ -51,7 +51,6 @@ var ( MetricsProcessingErrorTest, MetricsAcceptedErrorTest, MetricsRejectedErrorTest, - ErrorOnInitialRejectionTest, ErrorOnAcceptTest, ErrorOnRejectSiblingTest, ErrorOnTransitiveRejectionTest, @@ -139,7 +138,7 @@ func NumProcessingTest(t *testing.T, factory Factory) { require.Zero(sm.NumProcessing()) // Adding to the previous preference will update the preference - require.NoError(sm.Add(context.Background(), block)) + require.NoError(sm.Add(block)) require.Equal(1, sm.NumProcessing()) votes := bag.Of(block.ID()) @@ -176,7 +175,7 @@ func AddToTailTest(t *testing.T, factory Factory) { block := snowmantest.BuildChild(snowmantest.Genesis) // Adding to the previous preference will update the preference - require.NoError(sm.Add(context.Background(), block)) + require.NoError(sm.Add(block)) require.Equal(block.ID(), sm.Preference()) require.True(sm.IsPreferred(block)) @@ -215,18 +214,18 @@ func AddToNonTailTest(t *testing.T, factory Factory) { secondBlock := snowmantest.BuildChild(snowmantest.Genesis) // Adding to the previous preference will update the preference - require.NoError(sm.Add(context.Background(), firstBlock)) + require.NoError(sm.Add(firstBlock)) require.Equal(firstBlock.IDV, sm.Preference()) // Adding to something other than the previous preference won't update the // preference - require.NoError(sm.Add(context.Background(), secondBlock)) + require.NoError(sm.Add(secondBlock)) require.Equal(firstBlock.IDV, sm.Preference()) } // Make sure that adding a block that is detached from the rest of the tree -// rejects the block -func AddToUnknownTest(t *testing.T, factory Factory) { +// returns an error +func AddOnUnknownParentTest(t *testing.T, factory Factory) { require := require.New(t) sm := factory.New() @@ -260,11 +259,9 @@ func AddToUnknownTest(t *testing.T, factory Factory) { HeightV: snowmantest.GenesisHeight + 2, } - // Adding a block with an unknown parent means the parent must have already - // been rejected. Therefore the block should be immediately rejected - require.NoError(sm.Add(context.Background(), block)) - require.Equal(snowmantest.GenesisID, sm.Preference()) - require.Equal(choices.Rejected, block.Status()) + // Adding a block with an unknown parent should error. + err := sm.Add(block) + require.ErrorIs(err, errUnknownParentBlock) } func StatusOrProcessingPreviouslyAcceptedTest(t *testing.T, factory Factory) { @@ -402,7 +399,7 @@ func StatusOrProcessingIssuedTest(t *testing.T, factory Factory) { block := snowmantest.BuildChild(snowmantest.Genesis) - require.NoError(sm.Add(context.Background(), block)) + require.NoError(sm.Add(block)) require.Equal(choices.Processing, block.Status()) require.True(sm.Processing(block.ID())) require.False(sm.Decided(block)) @@ -440,7 +437,7 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { block := snowmantest.BuildChild(snowmantest.Genesis) - require.NoError(sm.Add(context.Background(), block)) + require.NoError(sm.Add(block)) votes := bag.Of(block.ID()) require.NoError(sm.RecordPoll(context.Background(), votes)) @@ -482,8 +479,8 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { firstBlock := snowmantest.BuildChild(snowmantest.Genesis) secondBlock := snowmantest.BuildChild(snowmantest.Genesis) - require.NoError(sm.Add(context.Background(), firstBlock)) - require.NoError(sm.Add(context.Background(), secondBlock)) + require.NoError(sm.Add(firstBlock)) + require.NoError(sm.Add(secondBlock)) votes := bag.Of(firstBlock.ID()) @@ -530,8 +527,8 @@ func RecordPollSplitVoteNoChangeTest(t *testing.T, factory Factory) { firstBlock := snowmantest.BuildChild(snowmantest.Genesis) secondBlock := snowmantest.BuildChild(snowmantest.Genesis) - require.NoError(sm.Add(context.Background(), firstBlock)) - require.NoError(sm.Add(context.Background(), secondBlock)) + require.NoError(sm.Add(firstBlock)) + require.NoError(sm.Add(secondBlock)) votes := bag.Of(firstBlock.ID(), secondBlock.ID()) @@ -614,9 +611,9 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { block1 := snowmantest.BuildChild(snowmantest.Genesis) block2 := snowmantest.BuildChild(block1) - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) - require.NoError(sm.Add(context.Background(), block2)) + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) + require.NoError(sm.Add(block2)) // Current graph structure: // G @@ -670,10 +667,10 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { block2 := snowmantest.BuildChild(block1) block3 := snowmantest.BuildChild(block1) - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) - require.NoError(sm.Add(context.Background(), block2)) - require.NoError(sm.Add(context.Background(), block3)) + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) + require.NoError(sm.Add(block2)) + require.NoError(sm.Add(block3)) // Current graph structure: // G @@ -738,7 +735,7 @@ func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { block := snowmantest.BuildChild(snowmantest.Genesis) unknownBlockID := ids.GenerateTestID() - require.NoError(sm.Add(context.Background(), block)) + require.NoError(sm.Add(block)) validVotes := bag.Of(block.ID()) require.NoError(sm.RecordPoll(context.Background(), validVotes)) @@ -781,11 +778,11 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { block3 := snowmantest.BuildChild(block0) block4 := snowmantest.BuildChild(block3) - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) - require.NoError(sm.Add(context.Background(), block2)) - require.NoError(sm.Add(context.Background(), block3)) - require.NoError(sm.Add(context.Background(), block4)) + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) + require.NoError(sm.Add(block2)) + require.NoError(sm.Add(block3)) + require.NoError(sm.Add(block4)) // Current graph structure: // G @@ -882,8 +879,8 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact } block3 := snowmantest.BuildChild(block2) - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) // When voting for [block0], we end up finalizing the first bit as 0. The // second bit is contested as either 0 or 1. For when the second bit is 1, @@ -896,11 +893,11 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact // instance has already decided it is rejected. Snowman doesn't actually // know that though, because that is an implementation detail of the // Snowball trie that is used. - require.NoError(sm.Add(context.Background(), block2)) + require.NoError(sm.Add(block2)) // Because [block2] is effectively rejected, [block3] is also effectively // rejected. - require.NoError(sm.Add(context.Background(), block3)) + require.NoError(sm.Add(block3)) require.Equal(block0.ID(), sm.Preference()) require.Equal(choices.Processing, block0.Status(), "should not be decided yet") @@ -964,10 +961,10 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { a2Block := snowmantest.BuildChild(a1Block) b2Block := snowmantest.BuildChild(b1Block) - require.NoError(sm.Add(context.Background(), a1Block)) - require.NoError(sm.Add(context.Background(), a2Block)) - require.NoError(sm.Add(context.Background(), b1Block)) - require.NoError(sm.Add(context.Background(), b2Block)) + require.NoError(sm.Add(a1Block)) + require.NoError(sm.Add(a2Block)) + require.NoError(sm.Add(b1Block)) + require.NoError(sm.Add(b2Block)) require.Equal(a2Block.ID(), sm.Preference()) @@ -1053,10 +1050,10 @@ func LastAcceptedTest(t *testing.T, factory Factory) { require.Equal(snowmantest.GenesisID, lastAcceptedID) require.Equal(snowmantest.GenesisHeight, lastAcceptedHeight) - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) - require.NoError(sm.Add(context.Background(), block1Conflict)) - require.NoError(sm.Add(context.Background(), block2)) + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) + require.NoError(sm.Add(block1Conflict)) + require.NoError(sm.Add(block2)) lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() require.Equal(snowmantest.GenesisID, lastAcceptedID) @@ -1195,46 +1192,6 @@ func MetricsRejectedErrorTest(t *testing.T, factory Factory) { require.Error(err) //nolint:forbidigo // error is not exported https://github.com/prometheus/client_golang/blob/main/prometheus/registry.go#L315 } -func ErrorOnInitialRejectionTest(t *testing.T, factory Factory) { - require := require.New(t) - - sm := factory.New() - - snowCtx := snowtest.Context(t, snowtest.CChainID) - ctx := snowtest.ConsensusContext(snowCtx) - params := snowball.Parameters{ - K: 1, - AlphaPreference: 1, - AlphaConfidence: 1, - Beta: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - - require.NoError(sm.Initialize( - ctx, - params, - snowmantest.GenesisID, - snowmantest.GenesisHeight, - snowmantest.GenesisTimestamp, - )) - - block := &snowmantest.Block{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - RejectV: errTest, - StatusV: choices.Processing, - }, - ParentV: ids.GenerateTestID(), - HeightV: snowmantest.GenesisHeight + 1, - } - - err := sm.Add(context.Background(), block) - require.ErrorIs(err, errTest) -} - func ErrorOnAcceptTest(t *testing.T, factory Factory) { require := require.New(t) @@ -1264,7 +1221,7 @@ func ErrorOnAcceptTest(t *testing.T, factory Factory) { block := snowmantest.BuildChild(snowmantest.Genesis) block.AcceptV = errTest - require.NoError(sm.Add(context.Background(), block)) + require.NoError(sm.Add(block)) votes := bag.Of(block.ID()) err := sm.RecordPoll(context.Background(), votes) @@ -1301,8 +1258,8 @@ func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) { block1 := snowmantest.BuildChild(snowmantest.Genesis) block1.RejectV = errTest - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) votes := bag.Of(block0.ID()) err := sm.RecordPoll(context.Background(), votes) @@ -1340,9 +1297,9 @@ func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { block2 := snowmantest.BuildChild(block1) block2.RejectV = errTest - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) - require.NoError(sm.Add(context.Background(), block2)) + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) + require.NoError(sm.Add(block2)) votes := bag.Of(block0.ID()) err := sm.RecordPoll(context.Background(), votes) @@ -1408,11 +1365,8 @@ func ErrorOnAddDecidedBlockTest(t *testing.T, factory Factory) { snowmantest.GenesisTimestamp, )) - block := snowmantest.BuildChild(snowmantest.Genesis) - require.NoError(block.Accept(context.Background())) - - err := sm.Add(context.Background(), block) - require.ErrorIs(err, errDuplicateAdd) + err := sm.Add(snowmantest.Genesis) + require.ErrorIs(err, errUnknownParentBlock) } func gatherCounterGauge(t *testing.T, reg *prometheus.Registry) map[string]float64 { @@ -1458,8 +1412,8 @@ func RecordPollWithDefaultParameters(t *testing.T, factory Factory) { blk1 := snowmantest.BuildChild(snowmantest.Genesis) blk2 := snowmantest.BuildChild(snowmantest.Genesis) - require.NoError(sm.Add(context.Background(), blk1)) - require.NoError(sm.Add(context.Background(), blk2)) + require.NoError(sm.Add(blk1)) + require.NoError(sm.Add(blk2)) votes := bag.Bag[ids.ID]{} votes.AddCount(blk1.ID(), params.AlphaConfidence) @@ -1504,9 +1458,9 @@ func RecordPollRegressionCalculateInDegreeIndegreeCalculation(t *testing.T, fact blk2 := snowmantest.BuildChild(blk1) blk3 := snowmantest.BuildChild(blk2) - require.NoError(sm.Add(context.Background(), blk1)) - require.NoError(sm.Add(context.Background(), blk2)) - require.NoError(sm.Add(context.Background(), blk3)) + require.NoError(sm.Add(blk1)) + require.NoError(sm.Add(blk2)) + require.NoError(sm.Add(blk3)) votes := bag.Bag[ids.ID]{} votes.AddCount(blk2.ID(), 1) diff --git a/snow/consensus/snowman/network_test.go b/snow/consensus/snowman/network_test.go index eec42f017953..8c302ed5c7c6 100644 --- a/snow/consensus/snowman/network_test.go +++ b/snow/consensus/snowman/network_test.go @@ -91,7 +91,7 @@ func (n *Network) AddNode(t testing.TB, sm Consensus) error { VerifyV: blk.Verify(context.Background()), BytesV: blk.Bytes(), } - if err := sm.Add(context.Background(), myBlock); err != nil { + if err := sm.Add(myBlock); err != nil { return err } deps[myBlock.ID()] = myDep diff --git a/snow/consensus/snowman/topological.go b/snow/consensus/snowman/topological.go index f2ef015654c7..6956d707a4e9 100644 --- a/snow/consensus/snowman/topological.go +++ b/snow/consensus/snowman/topological.go @@ -21,6 +21,7 @@ import ( var ( errDuplicateAdd = errors.New("duplicate block add") + errUnknownParentBlock = errors.New("unknown parent block") errTooManyProcessingBlocks = errors.New("too many processing blocks") errBlockProcessingTooLong = errors.New("block processing too long") @@ -137,7 +138,7 @@ func (ts *Topological) NumProcessing() int { return len(ts.blocks) - 1 } -func (ts *Topological) Add(ctx context.Context, blk Block) error { +func (ts *Topological) Add(blk Block) error { blkID := blk.ID() height := blk.Height() ts.ctx.Log.Verbo("adding block", @@ -145,12 +146,8 @@ func (ts *Topological) Add(ctx context.Context, blk Block) error { zap.Uint64("height", height), ) - // Make sure a block is not inserted twice. This enforces the invariant that - // blocks are always added in topological order. Essentially, a block that - // is being added should never have a child that was already added. - // Additionally, this prevents any edge cases that may occur due to adding - // different blocks with the same ID. - if ts.Decided(blk) || ts.Processing(blkID) { + // Make sure a block is not inserted twice. + if ts.Processing(blkID) { return errDuplicateAdd } @@ -160,20 +157,7 @@ func (ts *Topological) Add(ctx context.Context, blk Block) error { parentID := blk.Parent() parentNode, ok := ts.blocks[parentID] if !ok { - ts.ctx.Log.Verbo("block ancestor is missing, being rejected", - zap.Stringer("blkID", blkID), - zap.Uint64("height", height), - zap.Stringer("parentID", parentID), - ) - - // If the ancestor is missing, this means the ancestor must have already - // been pruned. Therefore, the dependent should be transitively - // rejected. - if err := blk.Reject(ctx); err != nil { - return err - } - ts.metrics.Rejected(blkID, ts.pollNumber, len(blk.Bytes())) - return nil + return errUnknownParentBlock } // add the block as a child of its parent, and add the block to the tree diff --git a/snow/consensus/snowman/traced_consensus.go b/snow/consensus/snowman/traced_consensus.go index 10f49229fe16..d2d5f197a65c 100644 --- a/snow/consensus/snowman/traced_consensus.go +++ b/snow/consensus/snowman/traced_consensus.go @@ -29,16 +29,6 @@ func Trace(consensus Consensus, tracer trace.Tracer) Consensus { } } -func (c *tracedConsensus) Add(ctx context.Context, blk Block) error { - ctx, span := c.tracer.Start(ctx, "tracedConsensus.Add", oteltrace.WithAttributes( - attribute.Stringer("blkID", blk.ID()), - attribute.Int64("height", int64(blk.Height())), - )) - defer span.End() - - return c.Consensus.Add(ctx, blk) -} - func (c *tracedConsensus) RecordPoll(ctx context.Context, votes bag.Bag[ids.ID]) error { ctx, span := c.tracer.Start(ctx, "tracedConsensus.RecordPoll", oteltrace.WithAttributes( attribute.Int("numVotes", votes.Len()), diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index 9e89fedd22b2..6bc93d1e8611 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -1116,7 +1116,7 @@ func (t *Transitive) addUnverifiedBlockToConsensus( zap.Stringer("blkID", blkID), zap.Uint64("height", blkHeight), ) - return true, t.Consensus.Add(ctx, &memoryBlock{ + return true, t.Consensus.Add(&memoryBlock{ Block: blk, metrics: t.metrics, tree: t.nonVerifieds, diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 2961b018c8ce..8c912df64a4a 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -2878,7 +2878,7 @@ func TestGetProcessingAncestor(t *testing.T) { time.Now(), )) - require.NoError(t, c.Add(context.Background(), issuedBlock)) + require.NoError(t, c.Add(issuedBlock)) nonVerifiedAncestors := ancestor.NewTree() nonVerifiedAncestors.Add(unissuedBlock.ID(), unissuedBlock.Parent()) From 1b82dce857823b3795205ffc2bac337cef0c4f25 Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed, 5 Jun 2024 23:59:21 -0400 Subject: [PATCH 046/102] [vms/platformvm] Rename `txstest.Builder` to `txstest.WalletFactory` (#2890) --- vms/platformvm/block/builder/builder_test.go | 38 +- vms/platformvm/block/builder/helpers_test.go | 19 +- .../block/builder/standard_block_test.go | 9 +- vms/platformvm/block/executor/helpers_test.go | 28 +- .../block/executor/proposal_block_test.go | 111 +++--- .../block/executor/standard_block_test.go | 35 +- vms/platformvm/service_test.go | 75 ++-- .../txs/executor/advance_time_test.go | 51 ++- .../txs/executor/create_chain_test.go | 36 +- .../txs/executor/create_subnet_test.go | 11 +- vms/platformvm/txs/executor/export_test.go | 9 +- vms/platformvm/txs/executor/helpers_test.go | 22 +- vms/platformvm/txs/executor/import_test.go | 9 +- .../txs/executor/proposal_tx_executor_test.go | 129 ++++--- .../txs/executor/reward_validator_test.go | 49 ++- .../txs/executor/standard_tx_executor_test.go | 243 ++++++++----- vms/platformvm/txs/txstest/builder.go | 326 +----------------- vms/platformvm/validator_set_property_test.go | 58 ++-- vms/platformvm/vm_regression_test.go | 241 ++++++++----- vms/platformvm/vm_test.go | 160 +++++---- 20 files changed, 838 insertions(+), 821 deletions(-) diff --git a/vms/platformvm/block/builder/builder_test.go b/vms/platformvm/block/builder/builder_test.go index 4088ccaee7a1..eeddc5bc7f9b 100644 --- a/vms/platformvm/block/builder/builder_test.go +++ b/vms/platformvm/block/builder/builder_test.go @@ -16,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/block" @@ -29,6 +28,7 @@ import ( blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func TestBuildBlockBasic(t *testing.T) { @@ -39,15 +39,17 @@ func TestBuildBlockBasic(t *testing.T) { defer env.ctx.Lock.Unlock() // Create a valid transaction - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) txID := tx.ID() // Issue the transaction @@ -109,7 +111,8 @@ func TestBuildBlockShouldReward(t *testing.T) { require.NoError(err) // Create a valid [AddPermissionlessValidatorTx] - tx, err := env.txBuilder.NewAddPermissionlessValidatorTx( + builder, txSigner := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -130,13 +133,14 @@ func TestBuildBlockShouldReward(t *testing.T) { Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, common.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }), ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) txID := tx.ID() // Issue the transaction @@ -247,15 +251,17 @@ func TestBuildBlockForceAdvanceTime(t *testing.T) { defer env.ctx.Lock.Unlock() // Create a valid transaction - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) txID := tx.ID() // Issue the transaction @@ -314,7 +320,8 @@ func TestBuildBlockInvalidStakingDurations(t *testing.T) { sk, err := bls.NewSecretKey() require.NoError(err) - tx1, err := env.txBuilder.NewAddPermissionlessValidatorTx( + builder1, signer1 := env.factory.NewWallet(preFundedKeys[0]) + utx1, err := builder1.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: ids.GenerateTestNodeID(), @@ -335,13 +342,14 @@ func TestBuildBlockInvalidStakingDurations(t *testing.T) { Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, common.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }), ) require.NoError(err) + tx1, err := walletsigner.SignUnsigned(context.Background(), signer1, utx1) + require.NoError(err) require.NoError(env.mempool.Add(tx1)) tx1ID := tx1.ID() _, ok := env.mempool.Get(tx1ID) @@ -353,7 +361,8 @@ func TestBuildBlockInvalidStakingDurations(t *testing.T) { sk, err = bls.NewSecretKey() require.NoError(err) - tx2, err := env.txBuilder.NewAddPermissionlessValidatorTx( + builder2, signer2 := env.factory.NewWallet(preFundedKeys[2]) + utx2, err := builder2.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: ids.GenerateTestNodeID(), @@ -374,13 +383,14 @@ func TestBuildBlockInvalidStakingDurations(t *testing.T) { Addrs: []ids.ShortID{preFundedKeys[2].PublicKey().Address()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[2]}, common.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{preFundedKeys[2].PublicKey().Address()}, }), ) require.NoError(err) + tx2, err := walletsigner.SignUnsigned(context.Background(), signer2, utx2) + require.NoError(err) require.NoError(env.mempool.Add(tx2)) tx2ID := tx2.ID() _, ok = env.mempool.Get(tx2ID) @@ -416,15 +426,17 @@ func TestPreviouslyDroppedTxsCannotBeReAddedToMempool(t *testing.T) { defer env.ctx.Lock.Unlock() // Create a valid transaction - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) txID := tx.ID() // Transaction should not be marked as dropped before being added to the diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index 909d7adefe5e..eb80a9ffc4d3 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -53,6 +53,7 @@ import ( blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" pvalidators "github.com/ava-labs/avalanchego/vms/platformvm/validators" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -118,7 +119,7 @@ type environment struct { state state.State uptimes uptime.Manager utxosVerifier utxo.Verifier - txBuilder *txstest.Builder + factory *txstest.WalletFactory backend txexecutor.Backend } @@ -152,12 +153,7 @@ func newEnvironment(t *testing.T, f fork) *environment { //nolint:unparam res.uptimes = uptime.NewManager(res.state, res.clk) res.utxosVerifier = utxo.NewVerifier(res.ctx, res.clk, res.fx) - - res.txBuilder = txstest.NewBuilder( - res.ctx, - res.config, - res.state, - ) + res.factory = txstest.NewWalletFactory(res.ctx, res.config, res.state) genesisID := res.state.GetLastAccepted() res.backend = txexecutor.Backend{ @@ -240,9 +236,8 @@ func newEnvironment(t *testing.T, f fork) *environment { //nolint:unparam func addSubnet(t *testing.T, env *environment) { require := require.New(t) - // Create a subnet - var err error - testSubnet1, err = env.txBuilder.NewCreateSubnetTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 2, Addrs: []ids.ShortID{ @@ -251,15 +246,15 @@ func addSubnet(t *testing.T, env *environment) { preFundedKeys[2].PublicKey().Address(), }, }, - []*secp256k1.PrivateKey{preFundedKeys[0]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }), ) require.NoError(err) + testSubnet1, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) - // store it genesisID := env.state.GetLastAccepted() stateDiff, err := state.NewDiff(genesisID, env.blkManager) require.NoError(err) diff --git a/vms/platformvm/block/builder/standard_block_test.go b/vms/platformvm/block/builder/standard_block_test.go index 8606163990eb..18ca5bdb3582 100644 --- a/vms/platformvm/block/builder/standard_block_test.go +++ b/vms/platformvm/block/builder/standard_block_test.go @@ -12,11 +12,12 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func TestAtomicTxImports(t *testing.T) { @@ -62,15 +63,17 @@ func TestAtomicTxImports(t *testing.T) { }}}, })) - tx, err := env.txBuilder.NewImportTx( + builder, signer := env.factory.NewWallet(recipientKey) + utx, err := builder.NewImportTx( env.ctx.XChainID, &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{recipientKey.PublicKey().Address()}, }, - []*secp256k1.PrivateKey{recipientKey}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) require.NoError(env.Builder.Add(tx)) b, err := env.Builder.BuildBlock(context.Background()) diff --git a/vms/platformvm/block/executor/helpers_test.go b/vms/platformvm/block/executor/helpers_test.go index d87b67c76d25..a8c276579c08 100644 --- a/vms/platformvm/block/executor/helpers_test.go +++ b/vms/platformvm/block/executor/helpers_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "fmt" "testing" "time" @@ -52,6 +53,7 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" pvalidators "github.com/ava-labs/avalanchego/vms/platformvm/validators" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -130,7 +132,7 @@ type environment struct { mockedState *state.MockState uptimes uptime.Manager utxosVerifier utxo.Verifier - txBuilder *txstest.Builder + factory *txstest.WalletFactory backend *executor.Backend } @@ -158,7 +160,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller, f fork) *environment res.state = defaultState(res.config, res.ctx, res.baseDB, rewardsCalc) res.uptimes = uptime.NewManager(res.state, res.clk) res.utxosVerifier = utxo.NewVerifier(res.ctx, res.clk, res.fx) - res.txBuilder = txstest.NewBuilder( + res.factory = txstest.NewWalletFactory( res.ctx, res.config, res.state, @@ -168,8 +170,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller, f fork) *environment res.mockedState = state.NewMockState(ctrl) res.uptimes = uptime.NewManager(res.mockedState, res.clk) res.utxosVerifier = utxo.NewVerifier(res.ctx, res.clk, res.fx) - - res.txBuilder = txstest.NewBuilder( + res.factory = txstest.NewWalletFactory( res.ctx, res.config, res.mockedState, @@ -251,9 +252,8 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller, f fork) *environment } func addSubnet(env *environment) { - // Create a subnet - var err error - testSubnet1, err = env.txBuilder.NewCreateSubnetTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 2, Addrs: []ids.ShortID{ @@ -262,7 +262,6 @@ func addSubnet(env *environment) { preFundedKeys[2].PublicKey().Address(), }, }, - []*secp256k1.PrivateKey{preFundedKeys[0]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, @@ -271,8 +270,11 @@ func addSubnet(env *environment) { if err != nil { panic(err) } + testSubnet1, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + if err != nil { + panic(err) + } - // store it genesisID := env.state.GetLastAccepted() stateDiff, err := state.NewDiff(genesisID, env.blkManager) if err != nil { @@ -495,7 +497,8 @@ func addPendingValidator( rewardAddress ids.ShortID, keys []*secp256k1.PrivateKey, ) (*txs.Tx, error) { - addPendingValidatorTx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(keys...) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(startTime.Unix()), @@ -507,11 +510,14 @@ func addPendingValidator( Addrs: []ids.ShortID{rewardAddress}, }, reward.PercentDenominator, - keys, ) if err != nil { return nil, err } + addPendingValidatorTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + if err != nil { + return nil, err + } staker, err := state.NewPendingStaker( addPendingValidatorTx.ID(), diff --git a/vms/platformvm/block/executor/proposal_block_test.go b/vms/platformvm/block/executor/proposal_block_test.go index 0436251e9993..3a3f45fd7c0d 100644 --- a/vms/platformvm/block/executor/proposal_block_test.go +++ b/vms/platformvm/block/executor/proposal_block_test.go @@ -29,6 +29,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -537,7 +538,8 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { env.config.TrackedSubnets.Add(subnetID) for _, staker := range test.stakers { - tx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: staker.nodeID, Start: uint64(staker.startTime.Unix()), @@ -549,13 +551,14 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { Addrs: []ids.ShortID{staker.rewardAddress}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ids.ShortEmpty}, }), ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -569,7 +572,8 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { } for _, subStaker := range test.subnetStakers { - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: subStaker.nodeID, @@ -579,13 +583,14 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ids.ShortEmpty}, }), ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) subnetStaker, err := state.NewPendingStaker( tx.ID(), @@ -604,7 +609,8 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { // add Staker0 (with the right end time) to state // so to allow proposalBlk issuance staker0.endTime = newTime - addStaker0, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: staker0.nodeID, Start: uint64(staker0.startTime.Unix()), @@ -616,13 +622,14 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { Addrs: []ids.ShortID{staker0.rewardAddress}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ids.ShortEmpty}, }), ) require.NoError(err) + addStaker0, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // store Staker0 to state addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -712,7 +719,8 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: subnetValidatorNodeID, @@ -722,9 +730,10 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addSubnetValTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err := state.NewCurrentStaker( @@ -743,7 +752,7 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { // Queue a staker that joins the staker set after the above validator leaves subnetVdr2NodeID := genesisNodeIDs[1] - tx, err = env.txBuilder.NewAddSubnetValidatorTx( + utx, err = builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: subnetVdr2NodeID, @@ -753,9 +762,10 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err = state.NewPendingStaker( tx.ID(), @@ -776,7 +786,7 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { // so to allow proposalBlk issuance staker0StartTime := defaultValidateStartTime staker0EndTime := subnetVdr1EndTime - addStaker0, err := env.txBuilder.NewAddValidatorTx( + uVdrTx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: ids.GenerateTestNodeID(), Start: uint64(staker0StartTime.Unix()), @@ -788,13 +798,14 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ids.ShortEmpty}, }), ) require.NoError(err) + addStaker0, err := walletsigner.SignUnsigned(context.Background(), signer, uVdrTx) + require.NoError(err) // store Staker0 to state addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -867,7 +878,9 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultGenesisTime.Add(1 * time.Minute) subnetVdr1EndTime := defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: subnetValidatorNodeID, @@ -877,9 +890,10 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -898,7 +912,8 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { // so to allow proposalBlk issuance staker0StartTime := defaultGenesisTime staker0EndTime := subnetVdr1StartTime - addStaker0, err := env.txBuilder.NewAddValidatorTx( + + uVdrTx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: ids.GenerateTestNodeID(), Start: uint64(staker0StartTime.Unix()), @@ -910,9 +925,10 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + addStaker0, err := walletsigner.SignUnsigned(context.Background(), signer, uVdrTx) + require.NoError(err) // store Staker0 to state addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -987,7 +1003,8 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { // just to allow proposalBlk issuance (with a reward Tx) staker0StartTime := defaultGenesisTime staker0EndTime := pendingValidatorStartTime - addStaker0, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: ids.GenerateTestNodeID(), Start: uint64(staker0StartTime.Unix()), @@ -999,9 +1016,10 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + addStaker0, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // store Staker0 to state addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -1056,7 +1074,8 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { pendingDelegatorStartTime := pendingValidatorStartTime.Add(1 * time.Second) pendingDelegatorEndTime := pendingDelegatorStartTime.Add(1 * time.Second) - addDelegatorTx, err := env.txBuilder.NewAddDelegatorTx( + builder, signer = env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]) + uDelTx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(pendingDelegatorStartTime.Unix()), @@ -1067,13 +1086,10 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{ - preFundedKeys[0], - preFundedKeys[1], - preFundedKeys[4], - }, ) require.NoError(err) + addDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), signer, uDelTx) + require.NoError(err) staker, err = state.NewPendingStaker( addDelegatorTx.ID(), @@ -1089,7 +1105,8 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { // add Staker0 (with the right end time) to state // so to allow proposalBlk issuance staker0EndTime = pendingDelegatorStartTime - addStaker0, err = env.txBuilder.NewAddValidatorTx( + builder, signer = env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err = builder.NewAddValidatorTx( &txs.Validator{ NodeID: ids.GenerateTestNodeID(), Start: uint64(staker0StartTime.Unix()), @@ -1101,9 +1118,10 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + addStaker0, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // store Staker0 to state addValTx = addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -1182,7 +1200,8 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { // so to allow proposalBlk issuance staker0StartTime := defaultGenesisTime staker0EndTime := pendingValidatorStartTime - addStaker0, err := env.txBuilder.NewAddValidatorTx( + builder, txSigner := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: ids.GenerateTestNodeID(), Start: uint64(staker0StartTime.Unix()), @@ -1194,9 +1213,10 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + addStaker0, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // store Staker0 to state addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -1250,7 +1270,8 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { // Add delegator pendingDelegatorStartTime := pendingValidatorStartTime.Add(1 * time.Second) pendingDelegatorEndTime := pendingDelegatorStartTime.Add(defaultMinStakingDuration) - addDelegatorTx, err := env.txBuilder.NewAddDelegatorTx( + builder, txSigner = env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]) + uDelTx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(pendingDelegatorStartTime.Unix()), @@ -1261,13 +1282,10 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{ - preFundedKeys[0], - preFundedKeys[1], - preFundedKeys[4], - }, ) require.NoError(err) + addDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx) + require.NoError(err) staker, err = state.NewPendingStaker( addDelegatorTx.ID(), @@ -1283,7 +1301,8 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { // add Staker0 (with the right end time) to state // so to allow proposalBlk issuance staker0EndTime = pendingDelegatorStartTime - addStaker0, err = env.txBuilder.NewAddValidatorTx( + builder, txSigner = env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err = builder.NewAddValidatorTx( &txs.Validator{ NodeID: ids.GenerateTestNodeID(), Start: uint64(staker0StartTime.Unix()), @@ -1295,9 +1314,10 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + addStaker0, err = walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // store Staker0 to state addValTx = addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -1365,7 +1385,8 @@ func TestAddValidatorProposalBlock(t *testing.T) { sk, err := bls.NewSecretKey() require.NoError(err) - addValidatorTx, err := env.txBuilder.NewAddPermissionlessValidatorTx( + builder, txSigner := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]) + utx, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -1386,13 +1407,10 @@ func TestAddValidatorProposalBlock(t *testing.T) { Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, 10000, - []*secp256k1.PrivateKey{ - preFundedKeys[0], - preFundedKeys[1], - preFundedKeys[4], - }, ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // Add validator through a [StandardBlock] preferredID := env.blkManager.Preferred() @@ -1452,7 +1470,7 @@ func TestAddValidatorProposalBlock(t *testing.T) { sk, err = bls.NewSecretKey() require.NoError(err) - addValidatorTx2, err := env.txBuilder.NewAddPermissionlessValidatorTx( + utx2, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -1473,13 +1491,10 @@ func TestAddValidatorProposalBlock(t *testing.T) { Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, 10000, - []*secp256k1.PrivateKey{ - preFundedKeys[0], - preFundedKeys[1], - preFundedKeys[4], - }, ) require.NoError(err) + addValidatorTx2, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx2) + require.NoError(err) // Add validator through a [ProposalBlock] and reward the last one preferredID = env.blkManager.Preferred() diff --git a/vms/platformvm/block/executor/standard_block_test.go b/vms/platformvm/block/executor/standard_block_test.go index b8c9257a2915..af8d469c48c3 100644 --- a/vms/platformvm/block/executor/standard_block_test.go +++ b/vms/platformvm/block/executor/standard_block_test.go @@ -23,6 +23,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func TestApricotStandardBlockTimeVerification(t *testing.T) { @@ -508,7 +510,8 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { } for _, staker := range test.subnetStakers { - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: staker.nodeID, @@ -518,9 +521,10 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -600,7 +604,8 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: subnetValidatorNodeID, @@ -610,9 +615,10 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addSubnetValTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err := state.NewCurrentStaker( @@ -631,7 +637,7 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { // Queue a staker that joins the staker set after the above validator leaves subnetVdr2NodeID := genesisNodeIDs[1] - tx, err = env.txBuilder.NewAddSubnetValidatorTx( + utx, err = builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: subnetVdr2NodeID, @@ -641,9 +647,10 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err = state.NewPendingStaker( tx.ID(), @@ -703,7 +710,8 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultGenesisTime.Add(1 * time.Minute) subnetVdr1EndTime := defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: subnetValidatorNodeID, @@ -713,9 +721,10 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -796,7 +805,8 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { pendingDelegatorStartTime := pendingValidatorStartTime.Add(1 * time.Second) pendingDelegatorEndTime := pendingDelegatorStartTime.Add(1 * time.Second) - addDelegatorTx, err := env.txBuilder.NewAddDelegatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]) + utx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(pendingDelegatorStartTime.Unix()), @@ -807,13 +817,10 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{ - preFundedKeys[0], - preFundedKeys[1], - preFundedKeys[4], - }, ) require.NoError(err) + addDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( addDelegatorTx.ID(), diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index d72e11f6379c..3d4aac61770e 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -34,7 +34,6 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" - "github.com/ava-labs/avalanchego/vms/platformvm/block/builder" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" @@ -47,8 +46,10 @@ import ( avajson "github.com/ava-labs/avalanchego/utils/json" vmkeystore "github.com/ava-labs/avalanchego/vms/components/keystore" pchainapi "github.com/ava-labs/avalanchego/vms/platformvm/api" + blockbuilder "github.com/ava-labs/avalanchego/vms/platformvm/block/builder" blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) var ( @@ -75,8 +76,8 @@ var ( } ) -func defaultService(t *testing.T) (*Service, *mutableSharedMemory, *txstest.Builder) { - vm, txBuilder, _, mutableSharedMemory := defaultVM(t, latestFork) +func defaultService(t *testing.T) (*Service, *mutableSharedMemory, *txstest.WalletFactory) { + vm, factory, _, mutableSharedMemory := defaultVM(t, latestFork) return &Service{ vm: vm, @@ -84,7 +85,7 @@ func defaultService(t *testing.T) (*Service, *mutableSharedMemory, *txstest.Buil stakerAttributesCache: &cache.LRU[ids.ID, *stakerAttributes]{ Size: stakerAttributesCacheSize, }, - }, mutableSharedMemory, txBuilder + }, mutableSharedMemory, factory } func TestExportKey(t *testing.T) { @@ -120,7 +121,7 @@ func TestExportKey(t *testing.T) { // Test issuing a tx and accepted func TestGetTxStatus(t *testing.T) { require := require.New(t) - service, mutableSharedMemory, txBuilder := defaultService(t) + service, mutableSharedMemory, factory := defaultService(t) service.vm.ctx.Lock.Lock() recipientKey, err := secp256k1.NewPrivateKey() @@ -168,15 +169,17 @@ func TestGetTxStatus(t *testing.T) { mutableSharedMemory.SharedMemory = sm - tx, err := txBuilder.NewImportTx( + builder, signer := factory.NewWallet(recipientKey) + utx, err := builder.NewImportTx( service.vm.ctx.XChainID, &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ids.ShortEmpty}, }, - []*secp256k1.PrivateKey{recipientKey}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) service.vm.ctx.Lock.Unlock() @@ -212,30 +215,32 @@ func TestGetTxStatus(t *testing.T) { func TestGetTx(t *testing.T) { type test struct { description string - createTx func(service *Service, builder *txstest.Builder) (*txs.Tx, error) + createTx func(service *Service, factory *txstest.WalletFactory) (*txs.Tx, error) } tests := []test{ { "standard block", - func(_ *Service, builder *txstest.Builder) (*txs.Tx, error) { - return builder.NewCreateChainTx( // Test GetTx works for standard blocks + func(_ *Service, factory *txstest.WalletFactory) (*txs.Tx, error) { + builder, signer := factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), []byte{}, constants.AVMID, []ids.ID{}, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, common.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }), ) + require.NoError(t, err) + return walletsigner.SignUnsigned(context.Background(), signer, utx) }, }, { "proposal block", - func(service *Service, builder *txstest.Builder) (*txs.Tx, error) { + func(service *Service, factory *txstest.WalletFactory) (*txs.Tx, error) { sk, err := bls.NewSecretKey() require.NoError(t, err) @@ -244,7 +249,8 @@ func TestGetTx(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, } - return builder.NewAddPermissionlessValidatorTx( // Test GetTx works for proposal blocks + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: ids.GenerateTestNodeID(), @@ -259,18 +265,20 @@ func TestGetTx(t *testing.T) { rewardsOwner, rewardsOwner, 0, - []*secp256k1.PrivateKey{keys[0]}, common.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }), ) + require.NoError(t, err) + return walletsigner.SignUnsigned(context.Background(), txSigner, utx) }, }, { "atomic block", - func(service *Service, builder *txstest.Builder) (*txs.Tx, error) { - return builder.NewExportTx( // Test GetTx works for proposal blocks + func(service *Service, factory *txstest.WalletFactory) (*txs.Tx, error) { + builder, signer := factory.NewWallet(keys[0]) + utx, err := builder.NewExportTx( service.vm.ctx.XChainID, []*avax.TransferableOutput{{ Asset: avax.Asset{ID: service.vm.ctx.AVAXAssetID}, @@ -283,12 +291,13 @@ func TestGetTx(t *testing.T) { }, }, }}, - []*secp256k1.PrivateKey{keys[0]}, common.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }), ) + require.NoError(t, err) + return walletsigner.SignUnsigned(context.Background(), signer, utx) }, }, } @@ -398,7 +407,7 @@ func TestGetBalance(t *testing.T) { func TestGetStake(t *testing.T) { require := require.New(t) - service, _, txBuilder := defaultService(t) + service, _, factory := defaultService(t) // Ensure GetStake is correct for each of the genesis validators genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) @@ -470,7 +479,8 @@ func TestGetStake(t *testing.T) { delegatorNodeID := genesisNodeIDs[0] delegatorStartTime := defaultValidateStartTime delegatorEndTime := defaultGenesisTime.Add(defaultMinStakingDuration) - tx, err := txBuilder.NewAddDelegatorTx( + builder, signer := factory.NewWallet(keys[0]) + utx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: delegatorNodeID, Start: uint64(delegatorStartTime.Unix()), @@ -481,13 +491,14 @@ func TestGetStake(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, - []*secp256k1.PrivateKey{keys[0]}, common.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }), ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addDelTx := tx.Unsigned.(*txs.AddDelegatorTx) staker, err := state.NewCurrentStaker( @@ -532,7 +543,7 @@ func TestGetStake(t *testing.T) { stakeAmount = service.vm.MinValidatorStake + 54321 pendingStakerNodeID := ids.GenerateTestNodeID() pendingStakerEndTime := uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix()) - tx, err = txBuilder.NewAddValidatorTx( + utx2, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: pendingStakerNodeID, Start: uint64(defaultGenesisTime.Unix()), @@ -544,13 +555,14 @@ func TestGetStake(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, 0, - []*secp256k1.PrivateKey{keys[0]}, common.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }), ) require.NoError(err) + tx, err = walletsigner.SignUnsigned(context.Background(), signer, utx2) + require.NoError(err) staker, err = state.NewPendingStaker( tx.ID(), @@ -584,7 +596,7 @@ func TestGetStake(t *testing.T) { func TestGetCurrentValidators(t *testing.T) { require := require.New(t) - service, _, txBuilder := defaultService(t) + service, _, factory := defaultService(t) genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) @@ -618,7 +630,8 @@ func TestGetCurrentValidators(t *testing.T) { service.vm.ctx.Lock.Lock() - delTx, err := txBuilder.NewAddDelegatorTx( + builder, signer := factory.NewWallet(keys[0]) + utx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: validatorNodeID, Start: uint64(delegatorStartTime.Unix()), @@ -629,13 +642,14 @@ func TestGetCurrentValidators(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, - []*secp256k1.PrivateKey{keys[0]}, common.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }), ) require.NoError(err) + delTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addDelTx := delTx.Unsigned.(*txs.AddDelegatorTx) staker, err := state.NewCurrentStaker( @@ -692,7 +706,7 @@ func TestGetCurrentValidators(t *testing.T) { service.vm.ctx.Lock.Lock() // Reward the delegator - tx, err := builder.NewRewardValidatorTx(service.vm.ctx, delTx.ID()) + tx, err := blockbuilder.NewRewardValidatorTx(service.vm.ctx, delTx.ID()) require.NoError(err) service.vm.state.AddTx(tx, status.Committed) service.vm.state.DeleteCurrentDelegator(staker) @@ -753,25 +767,26 @@ func TestGetBlock(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { require := require.New(t) - service, _, txBuilder := defaultService(t) + service, _, factory := defaultService(t) service.vm.ctx.Lock.Lock() service.vm.StaticFeeConfig.CreateAssetTxFee = 100 * defaultTxFee - // Make a block an accept it, then check we can get it. - tx, err := txBuilder.NewCreateChainTx( // Test GetTx works for standard blocks + builder, signer := factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), []byte{}, constants.AVMID, []ids.ID{}, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, common.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }), ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) preferredID := service.vm.manager.Preferred() preferred, err := service.vm.manager.GetBlock(preferredID) diff --git a/vms/platformvm/txs/executor/advance_time_test.go b/vms/platformvm/txs/executor/advance_time_test.go index 755cbb3b2beb..48f4426276b2 100644 --- a/vms/platformvm/txs/executor/advance_time_test.go +++ b/vms/platformvm/txs/executor/advance_time_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "fmt" "testing" "time" @@ -20,6 +21,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func newAdvanceTimeTx(t testing.TB, timestamp time.Time) (*txs.Tx, error) { @@ -376,7 +379,8 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { } for _, staker := range test.subnetStakers { - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: staker.nodeID, @@ -386,9 +390,10 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -473,7 +478,9 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: subnetValidatorNodeID, @@ -483,9 +490,10 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addSubnetValTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err := state.NewCurrentStaker( @@ -505,7 +513,7 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { // Queue a staker that joins the staker set after the above validator leaves subnetVdr2NodeID := genesisNodeIDs[1] - tx, err = env.txBuilder.NewAddSubnetValidatorTx( + utx, err = builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: subnetVdr2NodeID, @@ -515,9 +523,10 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err = state.NewPendingStaker( tx.ID(), @@ -584,7 +593,8 @@ func TestTrackedSubnet(t *testing.T) { subnetVdr1StartTime := defaultValidateStartTime.Add(1 * time.Minute) subnetVdr1EndTime := defaultValidateStartTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: subnetValidatorNodeID, @@ -594,9 +604,10 @@ func TestTrackedSubnet(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -689,7 +700,8 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { pendingDelegatorStartTime := pendingValidatorStartTime.Add(1 * time.Second) pendingDelegatorEndTime := pendingDelegatorStartTime.Add(1 * time.Second) - addDelegatorTx, err := env.txBuilder.NewAddDelegatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]) + utx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(pendingDelegatorStartTime.Unix()), @@ -700,13 +712,10 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{ - preFundedKeys[0], - preFundedKeys[1], - preFundedKeys[4], - }, ) require.NoError(err) + addDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( addDelegatorTx.ID(), @@ -791,7 +800,8 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { // Add delegator pendingDelegatorStartTime := pendingValidatorStartTime.Add(1 * time.Second) pendingDelegatorEndTime := pendingDelegatorStartTime.Add(defaultMinStakingDuration) - addDelegatorTx, err := env.txBuilder.NewAddDelegatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]) + utx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(pendingDelegatorStartTime.Unix()), @@ -802,9 +812,10 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]}, ) require.NoError(err) + addDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( addDelegatorTx.ID(), @@ -907,7 +918,8 @@ func addPendingValidator( nodeID ids.NodeID, keys []*secp256k1.PrivateKey, ) (*txs.Tx, error) { - addPendingValidatorTx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(keys...) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(startTime.Unix()), @@ -919,11 +931,14 @@ func addPendingValidator( Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - keys, ) if err != nil { return nil, err } + addPendingValidatorTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + if err != nil { + return nil, err + } staker, err := state.NewPendingStaker( addPendingValidatorTx.ID(), diff --git a/vms/platformvm/txs/executor/create_chain_test.go b/vms/platformvm/txs/executor/create_chain_test.go index 346c8ab1468a..4d52432ab8fa 100644 --- a/vms/platformvm/txs/executor/create_chain_test.go +++ b/vms/platformvm/txs/executor/create_chain_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "testing" "time" @@ -21,6 +22,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) // Ensure Execute fails when there are not enough control sigs @@ -30,15 +33,17 @@ func TestCreateChainTxInsufficientControlSigs(t *testing.T) { env.ctx.Lock.Lock() defer env.ctx.Lock.Unlock() - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Remove a signature tx.Creds[0].(*secp256k1fx.Credential).Sigs = tx.Creds[0].(*secp256k1fx.Credential).Sigs[1:] @@ -62,15 +67,17 @@ func TestCreateChainTxWrongControlSig(t *testing.T) { env.ctx.Lock.Lock() defer env.ctx.Lock.Unlock() - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Generate new, random key to sign tx with key, err := secp256k1.NewPrivateKey() @@ -101,15 +108,17 @@ func TestCreateChainTxNoSuchSubnet(t *testing.T) { env.ctx.Lock.Lock() defer env.ctx.Lock.Unlock() - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) tx.Unsigned.(*txs.CreateChainTx).SubnetID = ids.GenerateTestID() @@ -132,15 +141,17 @@ func TestCreateChainTxValid(t *testing.T) { env.ctx.Lock.Lock() defer env.ctx.Lock.Unlock() - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -195,17 +206,20 @@ func TestCreateChainTxAP3FeeChange(t *testing.T) { env.state.SetTimestamp(test.time) // to duly set fee cfg := *env.config + cfg.StaticFeeConfig.CreateBlockchainTxFee = test.fee - builder := txstest.NewBuilder(env.ctx, &cfg, env.state) - tx, err := builder.NewCreateChainTx( + factory := txstest.NewWalletFactory(env.ctx, &cfg, env.state) + builder, signer := factory.NewWallet(preFundedKeys...) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, ids.GenerateTestID(), nil, "", - preFundedKeys, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) diff --git a/vms/platformvm/txs/executor/create_subnet_test.go b/vms/platformvm/txs/executor/create_subnet_test.go index c1902dd56625..8ba9b529d565 100644 --- a/vms/platformvm/txs/executor/create_subnet_test.go +++ b/vms/platformvm/txs/executor/create_subnet_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "testing" "time" @@ -16,6 +17,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func TestCreateSubnetTxAP3FeeChange(t *testing.T) { @@ -63,12 +66,14 @@ func TestCreateSubnetTxAP3FeeChange(t *testing.T) { cfg := *env.config cfg.StaticFeeConfig.CreateSubnetTxFee = test.fee - builder := txstest.NewBuilder(env.ctx, &cfg, env.state) - tx, err := builder.NewCreateSubnetTx( + factory := txstest.NewWalletFactory(env.ctx, &cfg, env.state) + builder, signer := factory.NewWallet(preFundedKeys...) + utx, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{}, - preFundedKeys, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) diff --git a/vms/platformvm/txs/executor/export_test.go b/vms/platformvm/txs/executor/export_test.go index 7959791187db..f962c1af8814 100644 --- a/vms/platformvm/txs/executor/export_test.go +++ b/vms/platformvm/txs/executor/export_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "testing" "time" @@ -14,6 +15,8 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func TestNewExportTx(t *testing.T) { @@ -50,7 +53,8 @@ func TestNewExportTx(t *testing.T) { t.Run(tt.description, func(t *testing.T) { require := require.New(t) - tx, err := env.txBuilder.NewExportTx( + builder, signer := env.factory.NewWallet(tt.sourceKeys...) + utx, err := builder.NewExportTx( tt.destinationChainID, []*avax.TransferableOutput{{ Asset: avax.Asset{ID: env.ctx.AVAXAssetID}, @@ -63,9 +67,10 @@ func TestNewExportTx(t *testing.T) { }, }, }}, - tt.sourceKeys, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index 74c706227fea..557fb691417c 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "fmt" "math" "testing" @@ -47,6 +48,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) const ( @@ -105,7 +108,7 @@ type environment struct { states map[ids.ID]state.Chain uptimes uptime.Manager utxosHandler utxo.Verifier - txBuilder *txstest.Builder + factory *txstest.WalletFactory backend Backend } @@ -144,11 +147,7 @@ func newEnvironment(t *testing.T, f fork) *environment { uptimes := uptime.NewManager(baseState, clk) utxosVerifier := utxo.NewVerifier(ctx, clk, fx) - txBuilder := txstest.NewBuilder( - ctx, - config, - baseState, - ) + factory := txstest.NewWalletFactory(ctx, config, baseState) backend := Backend{ Config: config, @@ -173,7 +172,7 @@ func newEnvironment(t *testing.T, f fork) *environment { states: make(map[ids.ID]state.Chain), uptimes: uptimes, utxosHandler: utxosVerifier, - txBuilder: txBuilder, + factory: factory, backend: backend, } @@ -209,9 +208,8 @@ func newEnvironment(t *testing.T, f fork) *environment { func addSubnet(t *testing.T, env *environment) { require := require.New(t) - // Create a subnet - var err error - testSubnet1, err = env.txBuilder.NewCreateSubnetTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 2, Addrs: []ids.ShortID{ @@ -220,15 +218,15 @@ func addSubnet(t *testing.T, env *environment) { preFundedKeys[2].PublicKey().Address(), }, }, - []*secp256k1.PrivateKey{preFundedKeys[0]}, common.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }), ) require.NoError(err) + testSubnet1, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) - // store it stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) diff --git a/vms/platformvm/txs/executor/import_test.go b/vms/platformvm/txs/executor/import_test.go index 7bb0be9afcc1..7e8db3da48f0 100644 --- a/vms/platformvm/txs/executor/import_test.go +++ b/vms/platformvm/txs/executor/import_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "math/rand" "testing" "time" @@ -19,6 +20,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/chain/p/builder" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) var fundedSharedMemoryCalls byte @@ -120,15 +123,17 @@ func TestNewImportTx(t *testing.T) { require := require.New(t) env.msm.SharedMemory = tt.sharedMemory - tx, err := env.txBuilder.NewImportTx( + + builder, signer := env.factory.NewWallet(tt.sourceKeys...) + utx, err := builder.NewImportTx( tt.sourceChainID, to, - tt.sourceKeys, ) require.ErrorIs(err, tt.expectedErr) if tt.expectedErr != nil { return } + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) require.NoError(err) unsignedTx := tx.Unsigned.(*txs.ImportTx) diff --git a/vms/platformvm/txs/executor/proposal_tx_executor_test.go b/vms/platformvm/txs/executor/proposal_tx_executor_test.go index 87caaca7d7c1..cb547d4d589d 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor_test.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "math" "testing" "time" @@ -19,6 +20,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func TestProposalTxExecuteAddDelegator(t *testing.T) { @@ -35,7 +38,8 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { addMinStakeValidator := func(env *environment) { require := require.New(t) - tx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: newValidatorID, Start: newValidatorStartTime, @@ -46,10 +50,11 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{rewardAddress}, }, - reward.PercentDenominator, // Shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, + reward.PercentDenominator, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( @@ -71,7 +76,8 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { addMaxStakeValidator := func(env *environment) { require := require.New(t) - tx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: newValidatorID, Start: newValidatorStartTime, @@ -83,9 +89,10 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { Addrs: []ids.ShortID{rewardAddress}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( @@ -237,7 +244,8 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { env := newEnvironment(t, apricotPhase5) env.config.UpgradeConfig.ApricotPhase3Time = tt.AP3Time - tx, err := env.txBuilder.NewAddDelegatorTx( + builder, signer := env.factory.NewWallet(tt.feeKeys...) + utx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: tt.nodeID, Start: tt.startTime, @@ -248,9 +256,10 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{rewardAddress}, }, - tt.feeKeys, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) if tt.setup != nil { tt.setup(env) @@ -285,7 +294,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { // Case: Proposed validator currently validating primary network // but stops validating subnet after stops validating primary network // (note that keys[0] is a genesis validator) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -295,9 +305,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -320,7 +331,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { // and proposed subnet validation period is subset of // primary network validation period // (note that keys[0] is a genesis validator) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -330,9 +342,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -355,7 +368,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { dsStartTime := defaultValidateStartTime.Add(10 * time.Second) dsEndTime := dsStartTime.Add(5 * defaultMinStakingDuration) - addDSTx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: pendingDSValidatorID, Start: uint64(dsStartTime.Unix()), @@ -366,14 +380,16 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, - reward.PercentDenominator, // shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, + reward.PercentDenominator, ) require.NoError(err) + addDSTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) { // Case: Proposed validator isn't in pending or current validator sets - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: pendingDSValidatorID, @@ -383,9 +399,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -423,7 +440,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Proposed validator is pending validator of primary network // but starts validating subnet before primary network - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: pendingDSValidatorID, @@ -433,9 +451,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -456,7 +475,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Proposed validator is pending validator of primary network // but stops validating subnet after primary network - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: pendingDSValidatorID, @@ -466,9 +486,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -489,7 +510,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Proposed validator is pending validator of primary network and // period validating subnet is subset of time validating primary network - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: pendingDSValidatorID, @@ -499,9 +521,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -524,7 +547,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { env.state.SetTimestamp(newTimestamp) { - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -534,9 +558,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -559,7 +584,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { // Case: Proposed validator already validating the subnet // First, add validator as validator of subnet - subnetTx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer = env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + uSubnetTx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -569,9 +595,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + subnetTx, err := walletsigner.SignUnsigned(context.Background(), signer, uSubnetTx) + require.NoError(err) addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( @@ -589,7 +616,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID - duplicateSubnetTx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -599,9 +627,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + duplicateSubnetTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -625,7 +654,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Too few signatures - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -635,9 +665,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Remove a signature addSubnetValidatorTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) @@ -664,7 +695,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Control Signature from invalid key (keys[3] is not a control key) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -674,9 +706,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Replace a valid signature with one from keys[3] sig, err := preFundedKeys[3].SignHash(hashing.ComputeHash256(tx.Unsigned.Bytes())) @@ -702,7 +735,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Proposed validator in pending validator set for subnet // First, add validator to pending validator set of subnet - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -712,9 +746,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( @@ -758,7 +793,8 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { { // Case: Validator's start time too early - tx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(chainTime.Unix()), @@ -770,9 +806,10 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -794,7 +831,8 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { nodeID := genesisNodeIDs[0] // Case: Validator already validating primary network - tx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(defaultValidateStartTime.Unix()) + 1, @@ -806,9 +844,10 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -829,7 +868,8 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { { // Case: Validator in pending validator set of primary network startTime := defaultValidateStartTime.Add(1 * time.Second) - tx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(startTime.Unix()), @@ -840,10 +880,11 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, - reward.PercentDenominator, // shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, + reward.PercentDenominator, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( @@ -878,7 +919,8 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { { // Case: Validator doesn't have enough tokens to cover stake amount - tx, err := env.txBuilder.NewAddValidatorTx( // create the tx + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: ids.GenerateTestNodeID(), Start: uint64(defaultValidateStartTime.Unix()) + 1, @@ -890,9 +932,10 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Remove all UTXOs owned by preFundedKeys[0] utxoIDs, err := env.state.UTXOIDs(preFundedKeys[0].PublicKey().Address().Bytes(), ids.Empty, math.MaxInt32) diff --git a/vms/platformvm/txs/executor/reward_validator_test.go b/vms/platformvm/txs/executor/reward_validator_test.go index 215e9a6f729a..88b7b0b8a368 100644 --- a/vms/platformvm/txs/executor/reward_validator_test.go +++ b/vms/platformvm/txs/executor/reward_validator_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "testing" "time" @@ -13,7 +14,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -22,6 +22,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func newRewardValidatorTx(t testing.TB, txID ids.ID) (*txs.Tx, error) { @@ -239,7 +241,8 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { vdrEndTime := uint64(defaultValidateStartTime.Add(2 * defaultMinStakingDuration).Unix()) vdrNodeID := ids.GenerateTestNodeID() - vdrTx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + uVdrTx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: vdrNodeID, Start: vdrStartTime, @@ -251,14 +254,15 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { Addrs: []ids.ShortID{vdrRewardAddress}, }, reward.PercentDenominator/4, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + vdrTx, err := walletsigner.SignUnsigned(context.Background(), signer, uVdrTx) + require.NoError(err) delStartTime := vdrStartTime delEndTime := vdrEndTime - delTx, err := env.txBuilder.NewAddDelegatorTx( + uDelTx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: vdrNodeID, Start: delStartTime, @@ -269,9 +273,10 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{delRewardAddress}, }, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + delTx, err := walletsigner.SignUnsigned(context.Background(), signer, uDelTx) + require.NoError(err) addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrStaker, err := state.NewCurrentStaker( @@ -368,7 +373,8 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { vdrEndTime := uint64(defaultValidateStartTime.Add(2 * defaultMinStakingDuration).Unix()) vdrNodeID := ids.GenerateTestNodeID() - vdrTx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + uVdrTx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: vdrNodeID, Start: vdrStartTime, @@ -380,14 +386,15 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { Addrs: []ids.ShortID{vdrRewardAddress}, }, reward.PercentDenominator/4, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + vdrTx, err := walletsigner.SignUnsigned(context.Background(), signer, uVdrTx) + require.NoError(err) delStartTime := vdrStartTime delEndTime := vdrEndTime - delTx, err := env.txBuilder.NewAddDelegatorTx( + uDelTx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: vdrNodeID, Start: delStartTime, @@ -398,9 +405,10 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{delRewardAddress}, }, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + delTx, err := walletsigner.SignUnsigned(context.Background(), signer, uDelTx) + require.NoError(err) addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrRewardAmt := uint64(2000000) @@ -592,7 +600,8 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * vdrEndTime := uint64(defaultValidateStartTime.Add(2 * defaultMinStakingDuration).Unix()) vdrNodeID := ids.GenerateTestNodeID() - vdrTx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + uVdrTx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: vdrNodeID, Start: vdrStartTime, @@ -604,14 +613,15 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * Addrs: []ids.ShortID{vdrRewardAddress}, }, reward.PercentDenominator/4, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + vdrTx, err := walletsigner.SignUnsigned(context.Background(), signer, uVdrTx) + require.NoError(err) delStartTime := vdrStartTime delEndTime := vdrEndTime - delTx, err := env.txBuilder.NewAddDelegatorTx( + uDelTx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: vdrNodeID, Start: delStartTime, @@ -622,9 +632,10 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * Threshold: 1, Addrs: []ids.ShortID{delRewardAddress}, }, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + delTx, err := walletsigner.SignUnsigned(context.Background(), signer, uDelTx) + require.NoError(err) addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrRewardAmt := uint64(2000000) @@ -762,7 +773,8 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { vdrEndTime := uint64(defaultValidateStartTime.Add(2 * defaultMinStakingDuration).Unix()) vdrNodeID := ids.GenerateTestNodeID() - vdrTx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + uVdrTx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: vdrNodeID, Start: vdrStartTime, @@ -774,13 +786,15 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { Addrs: []ids.ShortID{vdrRewardAddress}, }, reward.PercentDenominator/4, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + vdrTx, err := walletsigner.SignUnsigned(context.Background(), signer, uVdrTx) + require.NoError(err) delStartTime := vdrStartTime delEndTime := vdrEndTime - delTx, err := env.txBuilder.NewAddDelegatorTx( + + uDelTx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: vdrNodeID, Start: delStartTime, @@ -791,9 +805,10 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{delRewardAddress}, }, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + delTx, err := walletsigner.SignUnsigned(context.Background(), signer, uDelTx) + require.NoError(err) addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrStaker, err := state.NewCurrentStaker( diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index de69b0ff5a8c..1de00365723f 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "errors" "math" "math/rand" @@ -36,6 +37,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) // This tests that the math performed during TransformSubnetTx execution can @@ -74,7 +77,8 @@ func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { // Case: Empty validator node ID after banff env.config.UpgradeConfig.BanffTime = test.banffTime - tx, err := env.txBuilder.NewAddValidatorTx( // create the tx + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: ids.EmptyNodeID, Start: uint64(startTime.Unix()), @@ -86,9 +90,10 @@ func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -117,7 +122,8 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { addMinStakeValidator := func(env *environment) { require := require.New(t) - tx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: newValidatorID, Start: uint64(newValidatorStartTime.Unix()), @@ -128,10 +134,11 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{rewardAddress}, }, - reward.PercentDenominator, // Shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, + reward.PercentDenominator, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( @@ -153,7 +160,8 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { addMaxStakeValidator := func(env *environment) { require := require.New(t) - tx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: newValidatorID, Start: uint64(newValidatorStartTime.Unix()), @@ -164,10 +172,11 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{rewardAddress}, }, - reward.PercentDenominator, // Shared - []*secp256k1.PrivateKey{preFundedKeys[0]}, + reward.PercentDenominator, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( @@ -319,7 +328,8 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { env := newEnvironment(t, apricotPhase5) env.config.UpgradeConfig.ApricotPhase3Time = tt.AP3Time - tx, err := env.txBuilder.NewAddDelegatorTx( + builder, signer := env.factory.NewWallet(tt.feeKeys...) + utx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: tt.nodeID, Start: uint64(tt.startTime.Unix()), @@ -330,9 +340,10 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{rewardAddress}, }, - tt.feeKeys, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) if tt.setup != nil { tt.setup(env) @@ -367,7 +378,8 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { // but stops validating subnet after stops validating primary network // (note that keys[0] is a genesis validator) startTime := defaultValidateStartTime.Add(time.Second) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -377,9 +389,10 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -398,7 +411,8 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { // and proposed subnet validation period is subset of // primary network validation period // (note that keys[0] is a genesis validator) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -408,9 +422,10 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -429,7 +444,8 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { dsStartTime := defaultGenesisTime.Add(10 * time.Second) dsEndTime := dsStartTime.Add(5 * defaultMinStakingDuration) - addDSTx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: pendingDSValidatorID, Start: uint64(dsStartTime.Unix()), @@ -440,14 +456,16 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, - reward.PercentDenominator, // shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, + reward.PercentDenominator, ) require.NoError(err) + addDSTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) { // Case: Proposed validator isn't in pending or current validator sets - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: pendingDSValidatorID, @@ -457,9 +475,10 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -493,7 +512,8 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Case: Proposed validator is pending validator of primary network // but starts validating subnet before primary network - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: pendingDSValidatorID, @@ -503,9 +523,10 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -522,7 +543,8 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Case: Proposed validator is pending validator of primary network // but stops validating subnet after primary network - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: pendingDSValidatorID, @@ -532,9 +554,10 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -551,7 +574,8 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Case: Proposed validator is pending validator of primary network and // period validating subnet is subset of time validating primary network - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: pendingDSValidatorID, @@ -561,9 +585,10 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -581,7 +606,8 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { env.state.SetTimestamp(newTimestamp) { - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -591,9 +617,10 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -612,7 +639,8 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { // Case: Proposed validator already validating the subnet // First, add validator as validator of subnet - subnetTx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer = env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + uSubnetTx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -622,9 +650,10 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + subnetTx, err := walletsigner.SignUnsigned(context.Background(), signer, uSubnetTx) + require.NoError(err) addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( @@ -643,7 +672,8 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID startTime := defaultValidateStartTime.Add(time.Second) - duplicateSubnetTx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -653,9 +683,10 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -663,9 +694,9 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { executor := StandardTxExecutor{ Backend: &env.backend, State: onAcceptState, - Tx: duplicateSubnetTx, + Tx: tx, } - err = duplicateSubnetTx.Unsigned.Visit(&executor) + err = tx.Unsigned.Visit(&executor) require.ErrorIs(err, ErrDuplicateValidator) } @@ -676,7 +707,8 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Case: Duplicate signatures startTime := defaultValidateStartTime.Add(time.Second) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -686,9 +718,10 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Duplicate a signature addSubnetValidatorTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) @@ -712,7 +745,8 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Case: Too few signatures startTime := defaultValidateStartTime.Add(time.Second) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -722,9 +756,10 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Remove a signature addSubnetValidatorTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) @@ -748,7 +783,8 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Case: Control Signature from invalid key (keys[3] is not a control key) startTime := defaultValidateStartTime.Add(time.Second) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -758,9 +794,10 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], preFundedKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Replace a valid signature with one from keys[3] sig, err := preFundedKeys[3].SignHash(hashing.ComputeHash256(tx.Unsigned.Bytes())) @@ -783,7 +820,8 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { // Case: Proposed validator in pending validator set for subnet // First, add validator to pending validator set of subnet startTime := defaultValidateStartTime.Add(time.Second) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -793,9 +831,10 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( @@ -834,7 +873,8 @@ func TestBanffStandardTxExecutorAddValidator(t *testing.T) { { // Case: Validator's start time too early - tx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(defaultValidateStartTime.Unix()) - 1, @@ -846,9 +886,10 @@ func TestBanffStandardTxExecutorAddValidator(t *testing.T) { Addrs: []ids.ShortID{ids.ShortEmpty}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -865,7 +906,8 @@ func TestBanffStandardTxExecutorAddValidator(t *testing.T) { { // Case: Validator in current validator set of primary network startTime := defaultValidateStartTime.Add(1 * time.Second) - tx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(startTime.Unix()), @@ -876,10 +918,11 @@ func TestBanffStandardTxExecutorAddValidator(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{ids.ShortEmpty}, }, - reward.PercentDenominator, // shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, + reward.PercentDenominator, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( @@ -908,7 +951,8 @@ func TestBanffStandardTxExecutorAddValidator(t *testing.T) { { // Case: Validator in pending validator set of primary network startTime := defaultValidateStartTime.Add(1 * time.Second) - tx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(startTime.Unix()), @@ -919,10 +963,11 @@ func TestBanffStandardTxExecutorAddValidator(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{ids.ShortEmpty}, }, - reward.PercentDenominator, // shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, + reward.PercentDenominator, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -948,7 +993,8 @@ func TestBanffStandardTxExecutorAddValidator(t *testing.T) { { // Case: Validator doesn't have enough tokens to cover stake amount startTime := defaultValidateStartTime.Add(1 * time.Second) - tx, err := env.txBuilder.NewAddValidatorTx( // create the tx + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(startTime.Unix()), @@ -960,9 +1006,10 @@ func TestBanffStandardTxExecutorAddValidator(t *testing.T) { Addrs: []ids.ShortID{ids.ShortEmpty}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Remove all UTXOs owned by preFundedKeys[0] utxoIDs, err := env.state.UTXOIDs(preFundedKeys[0].PublicKey().Address().Bytes(), ids.Empty, math.MaxInt32) @@ -1003,7 +1050,8 @@ func TestDurangoDisabledTransactions(t *testing.T) { endTime = chainTime.Add(defaultMaxStakingDuration) ) - tx, err := env.txBuilder.NewAddValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: 0, @@ -1014,10 +1062,11 @@ func TestDurangoDisabledTransactions(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{ids.ShortEmpty}, }, - reward.PercentDenominator, // shares - preFundedKeys, + reward.PercentDenominator, ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) return tx }, @@ -1039,7 +1088,8 @@ func TestDurangoDisabledTransactions(t *testing.T) { } it.Release() - tx, err := env.txBuilder.NewAddDelegatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: primaryValidator.NodeID, Start: 0, @@ -1050,9 +1100,10 @@ func TestDurangoDisabledTransactions(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{ids.ShortEmpty}, }, - preFundedKeys, ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) return tx }, @@ -1107,7 +1158,8 @@ func TestDurangoMemoField(t *testing.T) { } it.Release() - tx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: primaryValidator.NodeID, @@ -1117,10 +1169,11 @@ func TestDurangoMemoField(t *testing.T) { }, Subnet: testSubnet1.TxID, }, - preFundedKeys, common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1130,16 +1183,18 @@ func TestDurangoMemoField(t *testing.T) { { name: "CreateChainTx", setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewCreateChainTx( testSubnet1.TxID, - []byte{}, // genesisData - ids.GenerateTestID(), // vmID - []ids.ID{}, // fxIDs - "aaa", // chain name - preFundedKeys, + []byte{}, + ids.GenerateTestID(), + []ids.ID{}, + "aaa", common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1150,15 +1205,17 @@ func TestDurangoMemoField(t *testing.T) { { name: "CreateSubnetTx", setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { - tx, err := env.txBuilder.NewCreateSubnetTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, - preFundedKeys, common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1190,17 +1247,19 @@ func TestDurangoMemoField(t *testing.T) { ) env.msm.SharedMemory = sharedMemory - tx, err := env.txBuilder.NewImportTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewImportTx( sourceChain, &secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, Addrs: []ids.ShortID{sourceKey.PublicKey().Address()}, }, - preFundedKeys, common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1211,7 +1270,8 @@ func TestDurangoMemoField(t *testing.T) { { name: "ExportTx", setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { - tx, err := env.txBuilder.NewExportTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewExportTx( env.ctx.XChainID, []*avax.TransferableOutput{{ Asset: avax.Asset{ID: env.ctx.AVAXAssetID}, @@ -1224,10 +1284,11 @@ func TestDurangoMemoField(t *testing.T) { }, }, }}, - preFundedKeys, common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1252,7 +1313,8 @@ func TestDurangoMemoField(t *testing.T) { it.Release() endTime := primaryValidator.EndTime - subnetValTx, err := env.txBuilder.NewAddSubnetValidatorTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: primaryValidator.NodeID, @@ -1262,9 +1324,10 @@ func TestDurangoMemoField(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(t, err) + subnetValTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1275,13 +1338,15 @@ func TestDurangoMemoField(t *testing.T) { Tx: subnetValTx, })) - tx, err := env.txBuilder.NewRemoveSubnetValidatorTx( + builder, signer = env.factory.NewWallet(preFundedKeys...) + utx2, err := builder.NewRemoveSubnetValidatorTx( primaryValidator.NodeID, testSubnet1.ID(), - preFundedKeys, common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx2) + require.NoError(t, err) return tx, onAcceptState }, @@ -1289,7 +1354,8 @@ func TestDurangoMemoField(t *testing.T) { { name: "TransformSubnetTx", setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { - tx, err := env.txBuilder.NewTransformSubnetTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewTransformSubnetTx( testSubnet1.TxID, // subnetID ids.GenerateTestID(), // assetID 10, // initial supply @@ -1304,10 +1370,11 @@ func TestDurangoMemoField(t *testing.T) { 10, // min delegator stake 1, // max validator weight factor 80, // uptime requirement - preFundedKeys, common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1326,7 +1393,8 @@ func TestDurangoMemoField(t *testing.T) { sk, err := bls.NewSecretKey() require.NoError(t, err) - tx, err := env.txBuilder.NewAddPermissionlessValidatorTx( + builder, txSigner := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -1347,10 +1415,11 @@ func TestDurangoMemoField(t *testing.T) { Addrs: []ids.ShortID{ids.ShortEmpty}, }, reward.PercentDenominator, - preFundedKeys, common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1374,7 +1443,8 @@ func TestDurangoMemoField(t *testing.T) { } it.Release() - tx, err := env.txBuilder.NewAddPermissionlessDelegatorTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewAddPermissionlessDelegatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: primaryValidator.NodeID, @@ -1389,10 +1459,11 @@ func TestDurangoMemoField(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{ids.ShortEmpty}, }, - preFundedKeys, common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1403,16 +1474,18 @@ func TestDurangoMemoField(t *testing.T) { { name: "TransferSubnetOwnershipTx", setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { - tx, err := env.txBuilder.NewTransferSubnetOwnershipTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewTransferSubnetOwnershipTx( testSubnet1.TxID, &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ids.ShortEmpty}, }, - preFundedKeys, common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1423,7 +1496,8 @@ func TestDurangoMemoField(t *testing.T) { { name: "BaseTx", setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { - tx, err := env.txBuilder.NewBaseTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewBaseTx( []*avax.TransferableOutput{ { Asset: avax.Asset{ID: env.ctx.AVAXAssetID}, @@ -1436,10 +1510,11 @@ func TestDurangoMemoField(t *testing.T) { }, }, }, - preFundedKeys, common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) diff --git a/vms/platformvm/txs/txstest/builder.go b/vms/platformvm/txs/txstest/builder.go index e9d9c8d40061..532720be981f 100644 --- a/vms/platformvm/txs/txstest/builder.go +++ b/vms/platformvm/txs/txstest/builder.go @@ -4,348 +4,40 @@ package txstest import ( - "context" - "fmt" - "time" - - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/chain/p/builder" - "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" - - vmsigner "github.com/ava-labs/avalanchego/vms/platformvm/signer" - walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" + "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) -func NewBuilder( +func NewWalletFactory( ctx *snow.Context, cfg *config.Config, state state.State, -) *Builder { - return &Builder{ +) *WalletFactory { + return &WalletFactory{ ctx: ctx, cfg: cfg, state: state, } } -type Builder struct { +type WalletFactory struct { ctx *snow.Context cfg *config.Config state state.State } -func (b *Builder) NewImportTx( - chainID ids.ID, - to *secp256k1fx.OutputOwners, - keys []*secp256k1.PrivateKey, - options ...common.Option, -) (*txs.Tx, error) { - pBuilder, pSigner := b.builders(keys) - - utx, err := pBuilder.NewImportTx( - chainID, - to, - options..., - ) - if err != nil { - return nil, fmt.Errorf("failed building import tx: %w", err) - } - - return walletsigner.SignUnsigned(context.Background(), pSigner, utx) -} - -func (b *Builder) NewExportTx( - chainID ids.ID, - outputs []*avax.TransferableOutput, - keys []*secp256k1.PrivateKey, - options ...common.Option, -) (*txs.Tx, error) { - pBuilder, pSigner := b.builders(keys) - - utx, err := pBuilder.NewExportTx( - chainID, - outputs, - options..., - ) - if err != nil { - return nil, fmt.Errorf("failed building export tx: %w", err) - } - - return walletsigner.SignUnsigned(context.Background(), pSigner, utx) -} - -func (b *Builder) NewCreateChainTx( - subnetID ids.ID, - genesis []byte, - vmID ids.ID, - fxIDs []ids.ID, - chainName string, - keys []*secp256k1.PrivateKey, - options ...common.Option, -) (*txs.Tx, error) { - pBuilder, pSigner := b.builders(keys) - - utx, err := pBuilder.NewCreateChainTx( - subnetID, - genesis, - vmID, - fxIDs, - chainName, - options..., - ) - if err != nil { - return nil, fmt.Errorf("failed building create chain tx: %w", err) - } - - return walletsigner.SignUnsigned(context.Background(), pSigner, utx) -} - -func (b *Builder) NewCreateSubnetTx( - owner *secp256k1fx.OutputOwners, - keys []*secp256k1.PrivateKey, - options ...common.Option, -) (*txs.Tx, error) { - pBuilder, pSigner := b.builders(keys) - - utx, err := pBuilder.NewCreateSubnetTx( - owner, - options..., - ) - if err != nil { - return nil, fmt.Errorf("failed building create subnet tx: %w", err) - } - - return walletsigner.SignUnsigned(context.Background(), pSigner, utx) -} - -func (b *Builder) NewTransformSubnetTx( - subnetID ids.ID, - assetID ids.ID, - initialSupply uint64, - maxSupply uint64, - minConsumptionRate uint64, - maxConsumptionRate uint64, - minValidatorStake uint64, - maxValidatorStake uint64, - minStakeDuration time.Duration, - maxStakeDuration time.Duration, - minDelegationFee uint32, - minDelegatorStake uint64, - maxValidatorWeightFactor byte, - uptimeRequirement uint32, - keys []*secp256k1.PrivateKey, - options ...common.Option, -) (*txs.Tx, error) { - pBuilder, pSigner := b.builders(keys) - - utx, err := pBuilder.NewTransformSubnetTx( - subnetID, - assetID, - initialSupply, - maxSupply, - minConsumptionRate, - maxConsumptionRate, - minValidatorStake, - maxValidatorStake, - minStakeDuration, - maxStakeDuration, - minDelegationFee, - minDelegatorStake, - maxValidatorWeightFactor, - uptimeRequirement, - options..., - ) - if err != nil { - return nil, fmt.Errorf("failed building transform subnet tx: %w", err) - } - - return walletsigner.SignUnsigned(context.Background(), pSigner, utx) -} - -func (b *Builder) NewAddValidatorTx( - vdr *txs.Validator, - rewardsOwner *secp256k1fx.OutputOwners, - shares uint32, - keys []*secp256k1.PrivateKey, - options ...common.Option, -) (*txs.Tx, error) { - pBuilder, pSigner := b.builders(keys) - - utx, err := pBuilder.NewAddValidatorTx( - vdr, - rewardsOwner, - shares, - options..., - ) - if err != nil { - return nil, fmt.Errorf("failed building add validator tx: %w", err) - } - - return walletsigner.SignUnsigned(context.Background(), pSigner, utx) -} - -func (b *Builder) NewAddPermissionlessValidatorTx( - vdr *txs.SubnetValidator, - signer vmsigner.Signer, - assetID ids.ID, - validationRewardsOwner *secp256k1fx.OutputOwners, - delegationRewardsOwner *secp256k1fx.OutputOwners, - shares uint32, - keys []*secp256k1.PrivateKey, - options ...common.Option, -) (*txs.Tx, error) { - pBuilder, pSigner := b.builders(keys) - - utx, err := pBuilder.NewAddPermissionlessValidatorTx( - vdr, - signer, - assetID, - validationRewardsOwner, - delegationRewardsOwner, - shares, - options..., - ) - if err != nil { - return nil, fmt.Errorf("failed building add permissionless validator tx: %w", err) - } - - return walletsigner.SignUnsigned(context.Background(), pSigner, utx) -} - -func (b *Builder) NewAddDelegatorTx( - vdr *txs.Validator, - rewardsOwner *secp256k1fx.OutputOwners, - keys []*secp256k1.PrivateKey, - options ...common.Option, -) (*txs.Tx, error) { - pBuilder, pSigner := b.builders(keys) - - utx, err := pBuilder.NewAddDelegatorTx( - vdr, - rewardsOwner, - options..., - ) - if err != nil { - return nil, fmt.Errorf("failed building add delegator tx: %w", err) - } - - return walletsigner.SignUnsigned(context.Background(), pSigner, utx) -} - -func (b *Builder) NewAddPermissionlessDelegatorTx( - vdr *txs.SubnetValidator, - assetID ids.ID, - rewardsOwner *secp256k1fx.OutputOwners, - keys []*secp256k1.PrivateKey, - options ...common.Option, -) (*txs.Tx, error) { - pBuilder, pSigner := b.builders(keys) - - utx, err := pBuilder.NewAddPermissionlessDelegatorTx( - vdr, - assetID, - rewardsOwner, - options..., - ) - if err != nil { - return nil, fmt.Errorf("failed building add permissionless delegator tx: %w", err) - } - - return walletsigner.SignUnsigned(context.Background(), pSigner, utx) -} - -func (b *Builder) NewAddSubnetValidatorTx( - vdr *txs.SubnetValidator, - keys []*secp256k1.PrivateKey, - options ...common.Option, -) (*txs.Tx, error) { - pBuilder, pSigner := b.builders(keys) - - utx, err := pBuilder.NewAddSubnetValidatorTx( - vdr, - options..., - ) - if err != nil { - return nil, fmt.Errorf("failed building add subnet validator tx: %w", err) - } - - return walletsigner.SignUnsigned(context.Background(), pSigner, utx) -} - -func (b *Builder) NewRemoveSubnetValidatorTx( - nodeID ids.NodeID, - subnetID ids.ID, - keys []*secp256k1.PrivateKey, - options ...common.Option, -) (*txs.Tx, error) { - pBuilder, pSigner := b.builders(keys) - - utx, err := pBuilder.NewRemoveSubnetValidatorTx( - nodeID, - subnetID, - options..., - ) - if err != nil { - return nil, fmt.Errorf("failed building remove subnet validator tx: %w", err) - } - - return walletsigner.SignUnsigned(context.Background(), pSigner, utx) -} - -func (b *Builder) NewTransferSubnetOwnershipTx( - subnetID ids.ID, - owner *secp256k1fx.OutputOwners, - keys []*secp256k1.PrivateKey, - options ...common.Option, -) (*txs.Tx, error) { - pBuilder, pSigner := b.builders(keys) - - utx, err := pBuilder.NewTransferSubnetOwnershipTx( - subnetID, - owner, - options..., - ) - if err != nil { - return nil, fmt.Errorf("failed building transfer subnet ownership tx: %w", err) - } - - return walletsigner.SignUnsigned(context.Background(), pSigner, utx) -} - -func (b *Builder) NewBaseTx( - outputs []*avax.TransferableOutput, - keys []*secp256k1.PrivateKey, - options ...common.Option, -) (*txs.Tx, error) { - pBuilder, pSigner := b.builders(keys) - - utx, err := pBuilder.NewBaseTx( - outputs, - options..., - ) - if err != nil { - return nil, fmt.Errorf("failed building base tx: %w", err) - } - - return walletsigner.SignUnsigned(context.Background(), pSigner, utx) -} - -func (b *Builder) builders(keys []*secp256k1.PrivateKey) (builder.Builder, walletsigner.Signer) { +func (w *WalletFactory) NewWallet(keys ...*secp256k1.PrivateKey) (builder.Builder, signer.Signer) { var ( kc = secp256k1fx.NewKeychain(keys...) addrs = kc.Addresses() - backend = newBackend(addrs, b.state, b.ctx.SharedMemory) - context = newContext(b.ctx, b.cfg, b.state.GetTimestamp()) - builder = builder.New(addrs, context, backend) - signer = walletsigner.New(kc, backend) + backend = newBackend(addrs, w.state, w.ctx.SharedMemory) + context = newContext(w.ctx, w.cfg, w.state.GetTimestamp()) ) - return builder, signer + return builder.New(addrs, context, backend), signer.New(kc, backend) } diff --git a/vms/platformvm/validator_set_property_test.go b/vms/platformvm/validator_set_property_test.go index faf5eb810d51..9ed3dc7cbd4d 100644 --- a/vms/platformvm/validator_set_property_test.go +++ b/vms/platformvm/validator_set_property_test.go @@ -30,7 +30,6 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" @@ -50,6 +49,7 @@ import ( blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -258,14 +258,9 @@ func takeValidatorsSnapshotAtCurrentHeight(vm *VM, validatorsSetByHeightAndSubne } func addSubnetValidator(vm *VM, data *validatorInputData, subnetID ids.ID) (*state.Staker, error) { - txBuilder := txstest.NewBuilder( - vm.ctx, - &vm.Config, - vm.state, - ) - - addr := keys[0].PublicKey().Address() - signedTx, err := txBuilder.NewAddSubnetValidatorTx( + factory := txstest.NewWalletFactory(vm.ctx, &vm.Config, vm.state) + builder, signer := factory.NewWallet(keys[0], keys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: data.nodeID, @@ -275,16 +270,19 @@ func addSubnetValidator(vm *VM, data *validatorInputData, subnetID ids.ID) (*sta }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{addr}, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }), ) if err != nil { - return nil, fmt.Errorf("could not create AddSubnetValidatorTx: %w", err) + return nil, fmt.Errorf("could not build AddSubnetValidatorTx: %w", err) + } + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + if err != nil { + return nil, fmt.Errorf("could not sign AddSubnetValidatorTx: %w", err) } - return internalAddValidator(vm, signedTx) + return internalAddValidator(vm, tx) } func addPrimaryValidatorWithBLSKey(vm *VM, data *validatorInputData) (*state.Staker, error) { @@ -295,13 +293,9 @@ func addPrimaryValidatorWithBLSKey(vm *VM, data *validatorInputData) (*state.Sta return nil, fmt.Errorf("failed to generate BLS key: %w", err) } - txBuilder := txstest.NewBuilder( - vm.ctx, - &vm.Config, - vm.state, - ) - - signedTx, err := txBuilder.NewAddPermissionlessValidatorTx( + factory := txstest.NewWalletFactory(vm.ctx, &vm.Config, vm.state) + builder, txSigner := factory.NewWallet(keys[0], keys[1]) + utx, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: data.nodeID, @@ -322,16 +316,19 @@ func addPrimaryValidatorWithBLSKey(vm *VM, data *validatorInputData) (*state.Sta Addrs: []ids.ShortID{addr}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }), ) if err != nil { - return nil, fmt.Errorf("could not create AddPermissionlessValidatorTx: %w", err) + return nil, fmt.Errorf("could not build AddPermissionlessValidatorTx: %w", err) + } + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + if err != nil { + return nil, fmt.Errorf("could not sign AddPermissionlessValidatorTx: %w", err) } - return internalAddValidator(vm, signedTx) + return internalAddValidator(vm, tx) } func internalAddValidator(vm *VM, signedTx *txs.Tx) (*state.Staker, error) { @@ -718,21 +715,16 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { return nil, ids.Empty, err } - txBuilder := txstest.NewBuilder( - vm.ctx, - &vm.Config, - vm.state, - ) - // Create a subnet and store it in testSubnet1 // Note: following Banff activation, block acceptance will move // chain time ahead - testSubnet1, err = txBuilder.NewCreateSubnetTx( + factory := txstest.NewWalletFactory(vm.ctx, &vm.Config, vm.state) + builder, signer := factory.NewWallet(keys[len(keys)-1]) + utx, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{keys[len(keys)-1]}, // pays tx fee walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, @@ -741,6 +733,10 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { if err != nil { return nil, ids.Empty, err } + testSubnet1, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + if err != nil { + return nil, ids.Empty, err + } vm.ctx.Lock.Unlock() err = vm.issueTxFromRPC(testSubnet1) vm.ctx.Lock.Lock() diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index d29b2b0af1fa..629be17fe1ba 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -49,12 +49,13 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -65,7 +66,8 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { changeAddr := keys[0].PublicKey().Address() // create valid tx - addValidatorTx, err := txBuilder.NewAddValidatorTx( + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(validatorStartTime.Unix()), @@ -77,13 +79,14 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { Addrs: []ids.ShortID{changeAddr}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() @@ -108,7 +111,8 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { firstDelegatorEndTime := firstDelegatorStartTime.Add(vm.MinStakeDuration) // create valid tx - addFirstDelegatorTx, err := txBuilder.NewAddDelegatorTx( + builder, txSigner = factory.NewWallet(keys[0], keys[1]) + uDelTx1, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(firstDelegatorStartTime.Unix()), @@ -119,13 +123,14 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addFirstDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx1) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() @@ -152,7 +157,8 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { vm.clock.Set(secondDelegatorStartTime.Add(-10 * executor.SyncBound)) // create valid tx - addSecondDelegatorTx, err := txBuilder.NewAddDelegatorTx( + builder, txSigner = factory.NewWallet(keys[0], keys[1], keys[3]) + uDelTx2, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(secondDelegatorStartTime.Unix()), @@ -163,13 +169,14 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }, - []*secp256k1.PrivateKey{keys[0], keys[1], keys[3]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addSecondDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx2) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() @@ -186,7 +193,8 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { thirdDelegatorEndTime := thirdDelegatorStartTime.Add(vm.MinStakeDuration) // create valid tx - addThirdDelegatorTx, err := txBuilder.NewAddDelegatorTx( + builder, txSigner = factory.NewWallet(keys[0], keys[1], keys[4]) + uDelTx3, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(thirdDelegatorStartTime.Unix()), @@ -197,13 +205,14 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }, - []*secp256k1.PrivateKey{keys[0], keys[1], keys[4]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addThirdDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx3) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() @@ -251,7 +260,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, apricotPhase3) + vm, factory, _, _ := defaultVM(t, apricotPhase3) vm.UpgradeConfig.ApricotPhase3Time = test.ap3Time vm.ctx.Lock.Lock() @@ -265,7 +274,8 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { changeAddr := keys[0].PublicKey().Address() // create valid tx - addValidatorTx, err := txBuilder.NewAddValidatorTx( + builder, txSigner := factory.NewWallet(keys[0], keys[1]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(validatorStartTime.Unix()), @@ -277,13 +287,14 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { Addrs: []ids.ShortID{id}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // issue the add validator tx vm.ctx.Lock.Unlock() @@ -298,7 +309,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx - addFirstDelegatorTx, err := txBuilder.NewAddDelegatorTx( + uDelTx1, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(delegator1StartTime.Unix()), @@ -309,13 +320,14 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addFirstDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx1) + require.NoError(err) // issue the first add delegator tx vm.ctx.Lock.Unlock() @@ -330,7 +342,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx - addSecondDelegatorTx, err := txBuilder.NewAddDelegatorTx( + uDelTx2, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(delegator2StartTime.Unix()), @@ -341,13 +353,14 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addSecondDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx2) + require.NoError(err) // issue the second add delegator tx vm.ctx.Lock.Unlock() @@ -362,7 +375,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx - addThirdDelegatorTx, err := txBuilder.NewAddDelegatorTx( + uDelTx3, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(delegator3StartTime.Unix()), @@ -373,13 +386,14 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addThirdDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx3) + require.NoError(err) // issue the third add delegator tx vm.ctx.Lock.Unlock() @@ -394,7 +408,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx - addFourthDelegatorTx, err := txBuilder.NewAddDelegatorTx( + uDelTx4, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(delegator4StartTime.Unix()), @@ -405,13 +419,14 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addFourthDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx4) + require.NoError(err) // issue the fourth add delegator tx vm.ctx.Lock.Unlock() @@ -485,50 +500,55 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { addr0 := key0.PublicKey().Address() addr1 := key1.PublicKey().Address() - txBuilder := txstest.NewBuilder( + factory := txstest.NewWalletFactory( vm.ctx, &vm.Config, vm.state, ) - addSubnetTx0, err := txBuilder.NewCreateSubnetTx( + builder, txSigner := factory.NewWallet(key0) + utx0, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr0}, }, - []*secp256k1.PrivateKey{key0}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr0}, }), ) require.NoError(err) + addSubnetTx0, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx0) + require.NoError(err) - addSubnetTx1, err := txBuilder.NewCreateSubnetTx( + builder, txSigner = factory.NewWallet(key1) + utx1, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr1}, }, - []*secp256k1.PrivateKey{key1}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr1}, }), ) require.NoError(err) + addSubnetTx1, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx1) + require.NoError(err) - addSubnetTx2, err := txBuilder.NewCreateSubnetTx( + utx2, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr1}, }, - []*secp256k1.PrivateKey{key1}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr0}, }), ) require.NoError(err) + addSubnetTx2, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx2) + require.NoError(err) preferredID := vm.manager.Preferred() preferred, err := vm.manager.GetBlock(preferredID) @@ -582,7 +602,7 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require := require.New(t) - vm, txBuilder, baseDB, mutableSharedMemory := defaultVM(t, cortina) + vm, factory, baseDB, mutableSharedMemory := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -591,7 +611,8 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { newValidatorEndTime := newValidatorStartTime.Add(defaultMinStakingDuration) // Create the tx to add a new validator - addValidatorTx, err := txBuilder.NewAddValidatorTx( + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(newValidatorStartTime.Unix()), @@ -603,9 +624,10 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // Create the standard block to add the new validator preferredID := vm.manager.Preferred() @@ -791,7 +813,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require := require.New(t) - vm, txBuilder, baseDB, mutableSharedMemory := defaultVM(t, cortina) + vm, factory, baseDB, mutableSharedMemory := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -803,7 +825,8 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { nodeID0 := ids.GenerateTestNodeID() // Create the tx to add the first new validator - addValidatorTx0, err := txBuilder.NewAddValidatorTx( + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID0, Start: uint64(newValidatorStartTime0.Unix()), @@ -815,9 +838,10 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, ) require.NoError(err) + addValidatorTx0, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // Create the standard block to add the first new validator preferredID := vm.manager.Preferred() @@ -979,7 +1003,8 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { nodeID1 := ids.GenerateTestNodeID() // Create the tx to add the second new validator - addValidatorTx1, err := txBuilder.NewAddValidatorTx( + builder, txSigner = factory.NewWallet(keys[1]) + utx1, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID1, Start: uint64(newValidatorStartTime1.Unix()), @@ -991,9 +1016,10 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[1]}, ) require.NoError(err) + addValidatorTx1, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx1) + require.NoError(err) // Create the standard block to add the second new validator preferredChainTime = importBlk.Timestamp() @@ -1111,7 +1137,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1138,7 +1164,8 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { extraNodeID := ids.GenerateTestNodeID() // Create the tx to add the first new validator - addValidatorTx0, err := txBuilder.NewAddValidatorTx( + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: extraNodeID, Start: uint64(newValidatorStartTime0.Unix()), @@ -1150,9 +1177,10 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, ) require.NoError(err) + addValidatorTx0, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // Create the standard block to add the first new validator preferredID := vm.manager.Preferred() @@ -1251,7 +1279,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { delegator2EndTime := delegator2StartTime.Add(3 * defaultMinStakingDuration) delegator2Stake := defaultMaxValidatorStake - validatorStake - vm, txBuilder, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1263,7 +1291,8 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { changeAddr := keys[0].PublicKey().Address() // create valid tx - addValidatorTx, err := txBuilder.NewAddValidatorTx( + builder, txSigner := factory.NewWallet(keys[0], keys[1]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(validatorStartTime.Unix()), @@ -1275,13 +1304,14 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { Addrs: []ids.ShortID{id}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // issue the add validator tx vm.ctx.Lock.Unlock() @@ -1296,7 +1326,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx - addFirstDelegatorTx, err := txBuilder.NewAddDelegatorTx( + uDelTx, err := builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(delegator1StartTime.Unix()), @@ -1307,13 +1337,14 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addFirstDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx) + require.NoError(err) // issue the first add delegator tx vm.ctx.Lock.Unlock() @@ -1328,7 +1359,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx - addSecondDelegatorTx, err := txBuilder.NewAddDelegatorTx( + uDelTx, err = builder.NewAddDelegatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(delegator2StartTime.Unix()), @@ -1339,13 +1370,14 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addSecondDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx) + require.NoError(err) // attempting to issue the second add delegator tx should fail because the // total stake weight would go over the limit. @@ -1361,7 +1393,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t validatorStartTime := latestForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, txBuilder, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1372,7 +1404,8 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() - addValidatorTx, err := txBuilder.NewAddValidatorTx( + builder, txSigner := factory.NewWallet(keys[0], keys[1]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(validatorStartTime.Unix()), @@ -1384,13 +1417,14 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t Addrs: []ids.ShortID{id}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(addValidatorTx)) @@ -1403,18 +1437,19 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t require.NoError(addValidatorBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - createSubnetTx, err := txBuilder.NewCreateSubnetTx( + uSubnetTx, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + createSubnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uSubnetTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(createSubnetTx)) @@ -1427,7 +1462,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t require.NoError(createSubnetBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - addSubnetValidatorTx, err := txBuilder.NewAddSubnetValidatorTx( + uSubnetValTx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -1437,13 +1472,14 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t }, Subnet: createSubnetTx.ID(), }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addSubnetValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uSubnetValTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(addSubnetValidatorTx)) @@ -1464,16 +1500,17 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t require.NoError(err) require.Empty(emptyValidatorSet) - removeSubnetValidatorTx, err := txBuilder.NewRemoveSubnetValidatorTx( + uRemoveSubnetValTx, err := builder.NewRemoveSubnetValidatorTx( nodeID, createSubnetTx.ID(), - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + removeSubnetValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uRemoveSubnetValTx) + require.NoError(err) // Set the clock so that the validator will be moved from the pending // validator set into the current validator set. @@ -1505,7 +1542,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t validatorStartTime := latestForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, txBuilder, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1516,7 +1553,8 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() - addValidatorTx, err := txBuilder.NewAddValidatorTx( + builder, txSigner := factory.NewWallet(keys[0], keys[1]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(validatorStartTime.Unix()), @@ -1528,13 +1566,14 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t Addrs: []ids.ShortID{id}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(addValidatorTx)) @@ -1547,18 +1586,19 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t require.NoError(addValidatorBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - createSubnetTx, err := txBuilder.NewCreateSubnetTx( + uCreateSubnetTx, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + createSubnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uCreateSubnetTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(createSubnetTx)) @@ -1571,7 +1611,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t require.NoError(createSubnetBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - addSubnetValidatorTx, err := txBuilder.NewAddSubnetValidatorTx( + uAddSubnetValTx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -1581,13 +1621,14 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t }, Subnet: createSubnetTx.ID(), }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + addSubnetValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddSubnetValTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(addSubnetValidatorTx)) @@ -1600,16 +1641,17 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t require.NoError(addSubnetValidatorBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - removeSubnetValidatorTx, err := txBuilder.NewRemoveSubnetValidatorTx( + uRemoveSubnetValTx, err := builder.NewRemoveSubnetValidatorTx( nodeID, createSubnetTx.ID(), - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + removeSubnetValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uRemoveSubnetValTx) + require.NoError(err) // Set the clock so that the validator will be moved from the pending // validator set into the current validator set. @@ -1632,7 +1674,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { // setup require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1662,7 +1704,8 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { require.NoError(err) // build primary network validator with BLS key - primaryTx, err := txBuilder.NewAddPermissionlessValidatorTx( + builder, txSigner := factory.NewWallet(keys...) + uPrimaryTx, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -1683,14 +1726,14 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { Addrs: []ids.ShortID{addr}, }, reward.PercentDenominator, - keys, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }), ) require.NoError(err) - uPrimaryTx := primaryTx.Unsigned.(*txs.AddPermissionlessValidatorTx) + primaryTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uPrimaryTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(primaryTx)) @@ -1710,7 +1753,8 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { require.NoError(err) // insert the subnet validator - subnetTx, err := txBuilder.NewAddSubnetValidatorTx( + builder, txSigner = factory.NewWallet(keys[0], keys[1]) + uAddSubnetValTx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -1720,13 +1764,14 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }), ) require.NoError(err) + subnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddSubnetValTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(subnetTx)) @@ -1789,7 +1834,8 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { require.NoError(err) require.NotEqual(sk1, sk2) - primaryRestartTx, err := txBuilder.NewAddPermissionlessValidatorTx( + builder, txSigner = factory.NewWallet(keys...) + uPrimaryRestartTx, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -1810,14 +1856,14 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { Addrs: []ids.ShortID{addr}, }, reward.PercentDenominator, - keys, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }), ) require.NoError(err) - uPrimaryRestartTx := primaryRestartTx.Unsigned.(*txs.AddPermissionlessValidatorTx) + primaryRestartTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uPrimaryRestartTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(primaryRestartTx)) @@ -1893,7 +1939,7 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { // setup require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1913,7 +1959,9 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { // Add a primary network validator with no BLS key nodeID := ids.GenerateTestNodeID() addr := keys[0].PublicKey().Address() - primaryTx1, err := txBuilder.NewAddValidatorTx( + + builder, txSigner := factory.NewWallet(keys[0]) + uAddValTx1, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(primaryStartTime1.Unix()), @@ -1925,13 +1973,14 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { Addrs: []ids.ShortID{addr}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }), ) require.NoError(err) + primaryTx1, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddValTx1) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(primaryTx1)) @@ -1981,7 +2030,8 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { sk2, err := bls.NewSecretKey() require.NoError(err) - primaryRestartTx, err := txBuilder.NewAddPermissionlessValidatorTx( + builder, txSigner = factory.NewWallet(keys...) + uPrimaryRestartTx, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -2002,13 +2052,14 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { Addrs: []ids.ShortID{addr}, }, reward.PercentDenominator, - keys, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }), ) require.NoError(err) + primaryRestartTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uPrimaryRestartTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(primaryRestartTx)) @@ -2044,7 +2095,7 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { // setup require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -2068,7 +2119,9 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { // Add a primary network validator with no BLS key nodeID := ids.GenerateTestNodeID() addr := keys[0].PublicKey().Address() - primaryTx1, err := txBuilder.NewAddValidatorTx( + + builder, txSigner := factory.NewWallet(keys[0]) + uPrimaryTx1, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(primaryStartTime1.Unix()), @@ -2080,13 +2133,14 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { Addrs: []ids.ShortID{addr}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }), ) require.NoError(err) + primaryTx1, err := walletsigner.SignUnsigned(context.Background(), txSigner, uPrimaryTx1) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(primaryTx1)) @@ -2106,7 +2160,8 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { require.NoError(err) // insert the subnet validator - subnetTx, err := txBuilder.NewAddSubnetValidatorTx( + builder, txSigner = factory.NewWallet(keys[0], keys[1]) + uAddSubnetValTx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -2116,13 +2171,14 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }), ) require.NoError(err) + subnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddSubnetValTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(subnetTx)) @@ -2184,7 +2240,8 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { sk2, err := bls.NewSecretKey() require.NoError(err) - primaryRestartTx, err := txBuilder.NewAddPermissionlessValidatorTx( + builder, txSigner = factory.NewWallet(keys...) + uPrimaryRestartTx, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -2205,13 +2262,14 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { Addrs: []ids.ShortID{addr}, }, reward.PercentDenominator, - keys, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }), ) require.NoError(err) + primaryRestartTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uPrimaryRestartTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(primaryRestartTx)) @@ -2256,7 +2314,7 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { // setup require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -2278,7 +2336,9 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { // Add a primary network validator with no BLS key nodeID := ids.GenerateTestNodeID() addr := keys[0].PublicKey().Address() - primaryTx1, err := txBuilder.NewAddValidatorTx( + + builder, txSigner := factory.NewWallet(keys[0]) + uPrimaryTx1, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(primaryStartTime1.Unix()), @@ -2290,13 +2350,14 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { Addrs: []ids.ShortID{addr}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }), ) require.NoError(err) + primaryTx1, err := walletsigner.SignUnsigned(context.Background(), txSigner, uPrimaryTx1) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(primaryTx1)) @@ -2313,7 +2374,8 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { require.NoError(err) // insert the subnet validator - subnetTx, err := txBuilder.NewAddSubnetValidatorTx( + builder, txSigner = factory.NewWallet(keys[0], keys[1]) + uAddSubnetValTx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -2323,13 +2385,14 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{keys[0], keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }), ) require.NoError(err) + subnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddSubnetValTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(subnetTx)) diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 50c677688713..e6e645d74242 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -71,6 +71,7 @@ import ( blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" walletbuilder "github.com/ava-labs/avalanchego/wallet/chain/p/builder" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -205,7 +206,7 @@ func defaultGenesis(t *testing.T, avaxAssetID ids.ID) (*api.BuildGenesisArgs, [] return &buildGenesisArgs, genesisBytes } -func defaultVM(t *testing.T, f fork) (*VM, *txstest.Builder, database.Database, *mutableSharedMemory) { +func defaultVM(t *testing.T, f fork) (*VM, *txstest.WalletFactory, database.Database, *mutableSharedMemory) { require := require.New(t) var ( apricotPhase3Time = mockable.MaxTime @@ -309,7 +310,7 @@ func defaultVM(t *testing.T, f fork) (*VM, *txstest.Builder, database.Database, require.NoError(vm.SetState(context.Background(), snow.NormalOp)) - builder := txstest.NewBuilder( + factory := txstest.NewWalletFactory( ctx, &vm.Config, vm.state, @@ -318,8 +319,8 @@ func defaultVM(t *testing.T, f fork) (*VM, *txstest.Builder, database.Database, // Create a subnet and store it in testSubnet1 // Note: following Banff activation, block acceptance will move // chain time ahead - var err error - testSubnet1, err = builder.NewCreateSubnetTx( + builder, signer := factory.NewWallet(keys[0]) + utx, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 2, Addrs: []ids.ShortID{ @@ -328,13 +329,15 @@ func defaultVM(t *testing.T, f fork) (*VM, *txstest.Builder, database.Database, keys[2].PublicKey().Address(), }, }, - []*secp256k1.PrivateKey{keys[0]}, // pays tx fee walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }), ) require.NoError(err) + testSubnet1, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) + vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(testSubnet1)) vm.ctx.Lock.Lock() @@ -351,7 +354,7 @@ func defaultVM(t *testing.T, f fork) (*VM, *txstest.Builder, database.Database, require.NoError(vm.Shutdown(context.Background())) }) - return vm, builder, db, msm + return vm, factory, db, msm } // Ensure genesis state is parsed from bytes and stored correctly @@ -410,7 +413,7 @@ func TestGenesis(t *testing.T) { // accept proposal to add validator to primary network func TestAddValidatorCommit(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -425,7 +428,8 @@ func TestAddValidatorCommit(t *testing.T) { require.NoError(err) // create valid tx - tx, err := txBuilder.NewAddPermissionlessValidatorTx( + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -446,9 +450,10 @@ func TestAddValidatorCommit(t *testing.T) { Addrs: []ids.ShortID{rewardAddress}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() @@ -473,7 +478,7 @@ func TestAddValidatorCommit(t *testing.T) { // verify invalid attempt to add validator to primary network func TestInvalidAddValidatorCommit(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -482,7 +487,8 @@ func TestInvalidAddValidatorCommit(t *testing.T) { endTime := startTime.Add(defaultMinStakingDuration) // create invalid tx - tx, err := txBuilder.NewAddValidatorTx( + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(startTime.Unix()), @@ -494,9 +500,10 @@ func TestInvalidAddValidatorCommit(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) preferredID := vm.manager.Preferred() preferred, err := vm.manager.GetBlock(preferredID) @@ -527,7 +534,7 @@ func TestInvalidAddValidatorCommit(t *testing.T) { // Reject attempt to add validator to primary network func TestAddValidatorReject(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -539,7 +546,8 @@ func TestAddValidatorReject(t *testing.T) { ) // create valid tx - tx, err := txBuilder.NewAddValidatorTx( + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddValidatorTx( &txs.Validator{ NodeID: nodeID, Start: uint64(startTime.Unix()), @@ -551,9 +559,10 @@ func TestAddValidatorReject(t *testing.T) { Addrs: []ids.ShortID{rewardAddress}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() @@ -576,7 +585,7 @@ func TestAddValidatorReject(t *testing.T) { // Reject proposal to add validator to primary network func TestAddValidatorInvalidNotReissued(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -590,7 +599,8 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { require.NoError(err) // create valid tx - tx, err := txBuilder.NewAddPermissionlessValidatorTx( + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: repeatNodeID, @@ -611,9 +621,10 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() @@ -625,7 +636,7 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { // Accept proposal to add validator to subnet func TestAddSubnetValidatorAccept(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -638,7 +649,8 @@ func TestAddSubnetValidatorAccept(t *testing.T) { // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] // validates primary network ([defaultValidateStartTime, defaultValidateEndTime]) - tx, err := txBuilder.NewAddSubnetValidatorTx( + builder, txSigner := factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -648,9 +660,10 @@ func TestAddSubnetValidatorAccept(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() @@ -675,7 +688,7 @@ func TestAddSubnetValidatorAccept(t *testing.T) { // Reject proposal to add validator to subnet func TestAddSubnetValidatorReject(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -688,7 +701,8 @@ func TestAddSubnetValidatorReject(t *testing.T) { // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] // validates primary network ([defaultValidateStartTime, defaultValidateEndTime]) - tx, err := txBuilder.NewAddSubnetValidatorTx( + builder, txSigner := factory.NewWallet(testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]) + utx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -698,9 +712,10 @@ func TestAddSubnetValidatorReject(t *testing.T) { }, Subnet: testSubnet1.ID(), }, - []*secp256k1.PrivateKey{testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() @@ -873,19 +888,21 @@ func TestUnneededBuildBlock(t *testing.T) { // test acceptance of proposal to create a new chain func TestCreateChain(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - tx, err := txBuilder.NewCreateChainTx( + builder, txSigner := factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, ids.ID{'t', 'e', 's', 't', 'v', 'm'}, nil, "name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(tx)) @@ -921,12 +938,12 @@ func TestCreateChain(t *testing.T) { // 3) Advance timestamp to validator's end time (removing validator from current) func TestCreateSubnet(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - nodeID := genesisNodeIDs[0] - createSubnetTx, err := txBuilder.NewCreateSubnetTx( + builder, txSigner := factory.NewWallet(keys[0]) + uCreateSubnetTx, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ @@ -934,13 +951,14 @@ func TestCreateSubnet(t *testing.T) { keys[1].PublicKey().Address(), }, }, - []*secp256k1.PrivateKey{keys[0]}, // payer walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }), ) require.NoError(err) + createSubnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uCreateSubnetTx) + require.NoError(err) subnetID := createSubnetTx.ID() vm.ctx.Lock.Unlock() @@ -964,10 +982,11 @@ func TestCreateSubnet(t *testing.T) { require.Contains(subnetIDs, subnetID) // Now that we've created a new subnet, add a validator to that subnet + nodeID := genesisNodeIDs[0] startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) // [startTime, endTime] is subset of time keys[0] validates default subnet so tx is valid - addValidatorTx, err := txBuilder.NewAddSubnetValidatorTx( + uAddValTx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -977,9 +996,10 @@ func TestCreateSubnet(t *testing.T) { }, Subnet: subnetID, }, - []*secp256k1.PrivateKey{keys[0]}, ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddValTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(addValidatorTx)) @@ -1020,7 +1040,7 @@ func TestCreateSubnet(t *testing.T) { // test asset import func TestAtomicImport(t *testing.T) { require := require.New(t) - vm, txBuilder, baseDB, mutableSharedMemory := defaultVM(t, latestFork) + vm, factory, baseDB, mutableSharedMemory := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1036,13 +1056,13 @@ func TestAtomicImport(t *testing.T) { mutableSharedMemory.SharedMemory = m.NewSharedMemory(vm.ctx.ChainID) peerSharedMemory := m.NewSharedMemory(vm.ctx.XChainID) - _, err := txBuilder.NewImportTx( + builder, _ := factory.NewWallet(keys[0]) + _, err := builder.NewImportTx( vm.ctx.XChainID, &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{recipientKey.PublicKey().Address()}, }, - []*secp256k1.PrivateKey{keys[0]}, ) require.ErrorIs(err, walletbuilder.ErrInsufficientFunds) @@ -1077,15 +1097,17 @@ func TestAtomicImport(t *testing.T) { }, })) - tx, err := txBuilder.NewImportTx( + builder, txSigner := factory.NewWallet(recipientKey) + utx, err := builder.NewImportTx( vm.ctx.XChainID, &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{recipientKey.PublicKey().Address()}, }, - []*secp256k1.PrivateKey{recipientKey}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(tx)) @@ -2085,7 +2107,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { validatorStartTime := latestForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, txBuilder, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -2097,7 +2119,8 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { sk, err := bls.NewSecretKey() require.NoError(err) - addValidatorTx, err := txBuilder.NewAddPermissionlessValidatorTx( + builder, txSigner := factory.NewWallet(keys[0]) + uAddValTx, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -2118,13 +2141,14 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { Addrs: []ids.ShortID{id}, }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }), ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddValTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(addValidatorTx)) @@ -2137,18 +2161,19 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { require.NoError(addValidatorBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - createSubnetTx, err := txBuilder.NewCreateSubnetTx( + uCreateSubnetTx, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{id}, }, - []*secp256k1.PrivateKey{keys[0]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }), ) require.NoError(err) + createSubnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uCreateSubnetTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(createSubnetTx)) @@ -2161,7 +2186,8 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { require.NoError(createSubnetBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - addSubnetValidatorTx, err := txBuilder.NewAddSubnetValidatorTx( + builder, txSigner = factory.NewWallet(key, keys[1]) + uAddSubnetValTx, err := builder.NewAddSubnetValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, @@ -2171,24 +2197,27 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { }, Subnet: createSubnetTx.ID(), }, - []*secp256k1.PrivateKey{key, keys[1]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[1].PublicKey().Address()}, }), ) require.NoError(err) + addSubnetValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddSubnetValTx) + require.NoError(err) - removeSubnetValidatorTx, err := txBuilder.NewRemoveSubnetValidatorTx( + builder, txSigner = factory.NewWallet(key, keys[2]) + uRemoveSubnetValTx, err := builder.NewRemoveSubnetValidatorTx( nodeID, createSubnetTx.ID(), - []*secp256k1.PrivateKey{key, keys[2]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[2].PublicKey().Address()}, }), ) require.NoError(err) + removeSubnetValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uRemoveSubnetValTx) + require.NoError(err) statelessBlock, err := block.NewBanffStandardBlock( vm.state.GetTimestamp(), @@ -2214,23 +2243,25 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { func TestTransferSubnetOwnershipTx(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - // Create a subnet - createSubnetTx, err := txBuilder.NewCreateSubnetTx( + builder, txSigner := factory.NewWallet(keys[0]) + uCreateSubnetTx, err := builder.NewCreateSubnetTx( &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{keys[0]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }), ) require.NoError(err) + createSubnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uCreateSubnetTx) + require.NoError(err) + subnetID := createSubnetTx.ID() vm.ctx.Lock.Unlock() @@ -2261,15 +2292,16 @@ func TestTransferSubnetOwnershipTx(t *testing.T) { expectedOwner.InitCtx(ctx) require.Equal(expectedOwner, subnetOwner) - transferSubnetOwnershipTx, err := txBuilder.NewTransferSubnetOwnershipTx( + uTransferSubnetOwnershipTx, err := builder.NewTransferSubnetOwnershipTx( subnetID, &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[1].PublicKey().Address()}, }, - []*secp256k1.PrivateKey{keys[0]}, ) require.NoError(err) + transferSubnetOwnershipTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uTransferSubnetOwnershipTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(transferSubnetOwnershipTx)) @@ -2300,14 +2332,15 @@ func TestTransferSubnetOwnershipTx(t *testing.T) { func TestBaseTx(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() sendAmt := uint64(100000) changeAddr := ids.ShortEmpty - baseTx, err := txBuilder.NewBaseTx( + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewBaseTx( []*avax.TransferableOutput{ { Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, @@ -2322,13 +2355,14 @@ func TestBaseTx(t *testing.T) { }, }, }, - []*secp256k1.PrivateKey{keys[0]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + baseTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) totalInputAmt := uint64(0) key0InputAmt := uint64(0) @@ -2384,7 +2418,7 @@ func TestBaseTx(t *testing.T) { func TestPruneMempool(t *testing.T) { require := require.New(t) - vm, txBuilder, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -2392,7 +2426,8 @@ func TestPruneMempool(t *testing.T) { sendAmt := uint64(100000) changeAddr := ids.ShortEmpty - baseTx, err := txBuilder.NewBaseTx( + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewBaseTx( []*avax.TransferableOutput{ { Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, @@ -2407,13 +2442,14 @@ func TestPruneMempool(t *testing.T) { }, }, }, - []*secp256k1.PrivateKey{keys[0]}, walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{changeAddr}, }), ) require.NoError(err) + baseTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(baseTx)) @@ -2433,7 +2469,8 @@ func TestPruneMempool(t *testing.T) { sk, err := bls.NewSecretKey() require.NoError(err) - addValidatorTx, err := txBuilder.NewAddPermissionlessValidatorTx( + builder, txSigner = factory.NewWallet(keys[1]) + uAddValTx, err := builder.NewAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: ids.GenerateTestNodeID(), @@ -2454,9 +2491,10 @@ func TestPruneMempool(t *testing.T) { Addrs: []ids.ShortID{keys[2].Address()}, }, 20000, - []*secp256k1.PrivateKey{keys[1]}, ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddValTx) + require.NoError(err) vm.ctx.Lock.Unlock() require.NoError(vm.issueTxFromRPC(addValidatorTx)) From 7d3415cd8febe448d4356098e1a1f810eecef92a Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 6 Jun 2024 06:43:37 -0400 Subject: [PATCH 047/102] Small metrics cleanup (#3088) --- snow/networking/handler/handler.go | 18 ++++++++++-- snow/networking/handler/message_queue.go | 28 +++++++++++-------- snow/networking/handler/message_queue_test.go | 19 +++++++++---- vms/avm/vm.go | 12 ++++---- vms/platformvm/vm.go | 12 ++++---- 5 files changed, 57 insertions(+), 32 deletions(-) diff --git a/snow/networking/handler/handler.go b/snow/networking/handler/handler.go index f1966adc4dc4..9388d2d66be6 100644 --- a/snow/networking/handler/handler.go +++ b/snow/networking/handler/handler.go @@ -165,11 +165,25 @@ func New( return nil, fmt.Errorf("initializing handler metrics errored with: %w", err) } cpuTracker := resourceTracker.CPUTracker() - h.syncMessageQueue, err = NewMessageQueue(h.ctx, h.validators, cpuTracker, "handler") + h.syncMessageQueue, err = NewMessageQueue( + h.ctx.Log, + h.ctx.SubnetID, + h.validators, + cpuTracker, + "handler", + h.ctx.Registerer, + ) if err != nil { return nil, fmt.Errorf("initializing sync message queue errored with: %w", err) } - h.asyncMessageQueue, err = NewMessageQueue(h.ctx, h.validators, cpuTracker, "handler_async") + h.asyncMessageQueue, err = NewMessageQueue( + h.ctx.Log, + h.ctx.SubnetID, + h.validators, + cpuTracker, + "handler_async", + h.ctx.Registerer, + ) if err != nil { return nil, fmt.Errorf("initializing async message queue errored with: %w", err) } diff --git a/snow/networking/handler/message_queue.go b/snow/networking/handler/message_queue.go index f17cfc1a2e92..4d632c62d77e 100644 --- a/snow/networking/handler/message_queue.go +++ b/snow/networking/handler/message_queue.go @@ -13,10 +13,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/proto/pb/p2p" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/buffer" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) @@ -60,7 +60,8 @@ type messageQueue struct { clock mockable.Clock metrics messageQueueMetrics - ctx *snow.ConsensusContext + log logging.Logger + subnetID ids.ID // Validator set for the chain associated with this vdrs validators.Manager // Tracks CPU utilization of each node @@ -75,20 +76,23 @@ type messageQueue struct { } func NewMessageQueue( - ctx *snow.ConsensusContext, + log logging.Logger, + subnetID ids.ID, vdrs validators.Manager, cpuTracker tracker.Tracker, metricsNamespace string, + reg prometheus.Registerer, ) (MessageQueue, error) { m := &messageQueue{ - ctx: ctx, + log: log, + subnetID: subnetID, vdrs: vdrs, cpuTracker: cpuTracker, cond: sync.NewCond(&sync.Mutex{}), nodeToUnprocessedMsgs: make(map[ids.NodeID]int), msgAndCtxs: buffer.NewUnboundedDeque[*msgAndContext](1 /*=initSize*/), } - return m, m.metrics.initialize(metricsNamespace, ctx.Registerer) + return m, m.metrics.initialize(metricsNamespace, reg) } func (m *messageQueue) Push(ctx context.Context, msg Message) { @@ -137,7 +141,7 @@ func (m *messageQueue) Pop() (context.Context, Message, bool) { i := 0 for { if i == n { - m.ctx.Log.Debug("canPop is false for all unprocessed messages", + m.log.Debug("canPop is false for all unprocessed messages", zap.Int("numMessages", n), ) } @@ -212,21 +216,21 @@ func (m *messageQueue) canPop(msg message.InboundMessage) bool { // the number of nodes with unprocessed messages. baseMaxCPU := 1 / float64(len(m.nodeToUnprocessedMsgs)) nodeID := msg.NodeID() - weight := m.vdrs.GetWeight(m.ctx.SubnetID, nodeID) + weight := m.vdrs.GetWeight(m.subnetID, nodeID) var portionWeight float64 - if totalVdrsWeight, err := m.vdrs.TotalWeight(m.ctx.SubnetID); err != nil { + if totalVdrsWeight, err := m.vdrs.TotalWeight(m.subnetID); err != nil { // The sum of validator weights should never overflow, but if they do, // we treat portionWeight as 0. - m.ctx.Log.Error("failed to get total weight of validators", - zap.Stringer("subnetID", m.ctx.SubnetID), + m.log.Error("failed to get total weight of validators", + zap.Stringer("subnetID", m.subnetID), zap.Error(err), ) } else if totalVdrsWeight == 0 { // The sum of validator weights should never be 0, but handle that case // for completeness here to avoid divide by 0. - m.ctx.Log.Warn("validator set is empty", - zap.Stringer("subnetID", m.ctx.SubnetID), + m.log.Warn("validator set is empty", + zap.Stringer("subnetID", m.subnetID), ) } else { portionWeight = float64(weight) / float64(totalVdrsWeight) diff --git a/snow/networking/handler/message_queue_test.go b/snow/networking/handler/message_queue_test.go index 577a4686faa0..a74ffcfb4469 100644 --- a/snow/networking/handler/message_queue_test.go +++ b/snow/networking/handler/message_queue_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -15,21 +16,27 @@ import ( "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/tracker" - "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" ) func TestQueue(t *testing.T) { ctrl := gomock.NewController(t) require := require.New(t) cpuTracker := tracker.NewMockTracker(ctrl) - snowCtx := snowtest.Context(t, snowtest.CChainID) - ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() vdr1ID, vdr2ID := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() - require.NoError(vdrs.AddStaker(ctx.SubnetID, vdr1ID, nil, ids.Empty, 1)) - require.NoError(vdrs.AddStaker(ctx.SubnetID, vdr2ID, nil, ids.Empty, 1)) - mIntf, err := NewMessageQueue(ctx, vdrs, cpuTracker, "") + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr2ID, nil, ids.Empty, 1)) + mIntf, err := NewMessageQueue( + logging.NoLog{}, + constants.PrimaryNetworkID, + vdrs, + cpuTracker, + "", + prometheus.NewRegistry(), + ) require.NoError(err) u := mIntf.(*messageQueue) currentTime := time.Now() diff --git a/vms/avm/vm.go b/vms/avm/vm.go index ab05b053b393..6a455132c1a1 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -15,6 +15,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/versiondb" @@ -33,7 +34,6 @@ import ( "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/config" - "github.com/ava-labs/avalanchego/vms/avm/metrics" "github.com/ava-labs/avalanchego/vms/avm/network" "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" @@ -47,6 +47,7 @@ import ( blockbuilder "github.com/ava-labs/avalanchego/vms/avm/block/builder" blockexecutor "github.com/ava-labs/avalanchego/vms/avm/block/executor" extensions "github.com/ava-labs/avalanchego/vms/avm/fxs" + avmmetrics "github.com/ava-labs/avalanchego/vms/avm/metrics" txexecutor "github.com/ava-labs/avalanchego/vms/avm/txs/executor" xmempool "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" ) @@ -66,7 +67,7 @@ type VM struct { config.Config - metrics metrics.Metrics + metrics avmmetrics.Metrics avax.AddressManager ids.Aliaser @@ -173,16 +174,15 @@ func (vm *VM) Initialize( zap.Reflect("config", avmConfig), ) - registerer := prometheus.NewRegistry() - if err := ctx.Metrics.Register("", registerer); err != nil { + vm.registerer, err = metrics.MakeAndRegister(ctx.Metrics, "") + if err != nil { return err } - vm.registerer = registerer vm.connectedPeers = make(map[ids.NodeID]*version.Application) // Initialize metrics as soon as possible - vm.metrics, err = metrics.New(registerer) + vm.metrics, err = avmmetrics.New(vm.registerer) if err != nil { return fmt.Errorf("failed to initialize metrics: %w", err) } diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 565960cff599..efbfe0fa5453 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -12,9 +12,9 @@ import ( "time" "github.com/gorilla/rpc/v2" - "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -35,7 +35,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" - "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/network" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" @@ -47,6 +46,7 @@ import ( snowmanblock "github.com/ava-labs/avalanchego/snow/engine/snowman/block" blockbuilder "github.com/ava-labs/avalanchego/vms/platformvm/block/builder" blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" + platformvmmetrics "github.com/ava-labs/avalanchego/vms/platformvm/metrics" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" pmempool "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" pvalidators "github.com/ava-labs/avalanchego/vms/platformvm/validators" @@ -65,7 +65,7 @@ type VM struct { *network.Network validators.State - metrics metrics.Metrics + metrics platformvmmetrics.Metrics // Used to get time. Useful for faking time during tests. clock mockable.Clock @@ -113,13 +113,13 @@ func (vm *VM) Initialize( } chainCtx.Log.Info("using VM execution config", zap.Reflect("config", execConfig)) - registerer := prometheus.NewRegistry() - if err := chainCtx.Metrics.Register("", registerer); err != nil { + registerer, err := metrics.MakeAndRegister(chainCtx.Metrics, "") + if err != nil { return err } // Initialize metrics as soon as possible - vm.metrics, err = metrics.New(registerer) + vm.metrics, err = platformvmmetrics.New(registerer) if err != nil { return fmt.Errorf("failed to initialize metrics: %w", err) } From f1a9d2ab766ab7c88a11eb491988f7db04b7e310 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 6 Jun 2024 11:28:09 -0400 Subject: [PATCH 048/102] Fix race in test (#3089) --- vms/platformvm/service_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 3d4aac61770e..69e94d0dee3e 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -1085,10 +1085,13 @@ func TestServiceGetSubnets(t *testing.T) { newOwnerIDStr := "P-testing1t73fa4p4dypa4s3kgufuvr6hmprjclw66mgqgm" newOwnerID, err := service.addrManager.ParseLocalAddress(newOwnerIDStr) require.NoError(err) + + service.vm.ctx.Lock.Lock() service.vm.state.SetSubnetOwner(testSubnet1ID, &secp256k1fx.OutputOwners{ Addrs: []ids.ShortID{newOwnerID}, Threshold: 1, }) + service.vm.ctx.Lock.Unlock() require.NoError(service.GetSubnets(nil, &GetSubnetsArgs{}, &response)) require.Equal([]APISubnet{ From 783fdfc9d52267865300acee56b10e918a6e80b6 Mon Sep 17 00:00:00 2001 From: aaronbuchwald Date: Thu, 6 Jun 2024 11:59:05 -0400 Subject: [PATCH 049/102] Implement error driven snowflake hardcoded with a single beta (#2978) --- snow/consensus/snowball/binary_snowflake.go | 69 ++++++++++------- snow/consensus/snowball/nnary_snowflake.go | 69 ++++++++++------- snow/consensus/snowball/parameters.go | 5 ++ snow/consensus/snowball/unary_snowball.go | 17 +++-- .../consensus/snowball/unary_snowball_test.go | 14 ++-- snow/consensus/snowball/unary_snowflake.go | 74 ++++++++++++------- .../snowball/unary_snowflake_test.go | 16 ++-- 7 files changed, 164 insertions(+), 100 deletions(-) diff --git a/snow/consensus/snowball/binary_snowflake.go b/snow/consensus/snowball/binary_snowflake.go index 6349fd3975a6..81cca1ce8501 100644 --- a/snow/consensus/snowball/binary_snowflake.go +++ b/snow/consensus/snowball/binary_snowflake.go @@ -11,29 +11,38 @@ func newBinarySnowflake(alphaPreference, alphaConfidence, beta, choice int) bina return binarySnowflake{ binarySlush: newBinarySlush(choice), alphaPreference: alphaPreference, - alphaConfidence: alphaConfidence, - beta: beta, + terminationConditions: []terminationCondition{ + { + alphaConfidence: alphaConfidence, + beta: beta, + }, + }, + confidence: make([]int, 1), } } // binarySnowflake is the implementation of a binary snowflake instance +// Invariant: +// len(terminationConditions) == len(confidence) +// terminationConditions[i].alphaConfidence < terminationConditions[i+1].alphaConfidence +// terminationConditions[i].beta <= terminationConditions[i+1].beta +// confidence[i] >= confidence[i+1] (except after finalizing due to early termination) type binarySnowflake struct { // wrap the binary slush logic binarySlush - // confidence tracks the number of successful polls in a row that have - // returned the preference - confidence int - // alphaPreference is the threshold required to update the preference alphaPreference int - // alphaConfidence is the threshold required to increment the confidence counter - alphaConfidence int + // terminationConditions gives the ascending ordered list of alphaConfidence values + // required to increment the corresponding confidence counter. + // The corresponding beta values give the threshold required to finalize this instance. + terminationConditions []terminationCondition - // beta is the number of consecutive successful queries required for - // finalization. - beta int + // confidence is the number of consecutive succcessful polls for a given + // alphaConfidence threshold. + // This instance finalizes when confidence[i] >= terminationConditions[i].beta for any i + confidence []int // finalized prevents the state from changing after the required number of // consecutive polls has been reached @@ -50,26 +59,34 @@ func (sf *binarySnowflake) RecordPoll(count, choice int) { return } - if count < sf.alphaConfidence { - sf.confidence = 0 - sf.binarySlush.RecordSuccessfulPoll(choice) - return + // If I am changing my preference, reset confidence counters + // before recording a successful poll on the slush instance. + if choice != sf.Preference() { + clear(sf.confidence) } + sf.binarySlush.RecordSuccessfulPoll(choice) - if preference := sf.Preference(); preference == choice { - sf.confidence++ - } else { - // confidence is set to 1 because there has already been 1 successful - // poll, namely this poll. - sf.confidence = 1 + for i, terminationCondition := range sf.terminationConditions { + // If I did not reach this alpha threshold, I did not + // reach any more alpha thresholds. + // Clear the remaining confidence counters. + if count < terminationCondition.alphaConfidence { + clear(sf.confidence[i:]) + return + } + + // I reached this alpha threshold, increment the confidence counter + // and check if I can finalize. + sf.confidence[i]++ + if sf.confidence[i] >= terminationCondition.beta { + sf.finalized = true + return + } } - - sf.finalized = sf.confidence >= sf.beta - sf.binarySlush.RecordSuccessfulPoll(choice) } func (sf *binarySnowflake) RecordUnsuccessfulPoll() { - sf.confidence = 0 + clear(sf.confidence) } func (sf *binarySnowflake) Finalized() bool { @@ -78,7 +95,7 @@ func (sf *binarySnowflake) Finalized() bool { func (sf *binarySnowflake) String() string { return fmt.Sprintf("SF(Confidence = %d, Finalized = %v, %s)", - sf.confidence, + sf.confidence[0], sf.finalized, &sf.binarySlush) } diff --git a/snow/consensus/snowball/nnary_snowflake.go b/snow/consensus/snowball/nnary_snowflake.go index 9433078e36b8..ab3c4f462c29 100644 --- a/snow/consensus/snowball/nnary_snowflake.go +++ b/snow/consensus/snowball/nnary_snowflake.go @@ -15,30 +15,39 @@ func newNnarySnowflake(alphaPreference, alphaConfidence, beta int, choice ids.ID return nnarySnowflake{ nnarySlush: newNnarySlush(choice), alphaPreference: alphaPreference, - alphaConfidence: alphaConfidence, - beta: beta, + terminationConditions: []terminationCondition{ + { + alphaConfidence: alphaConfidence, + beta: beta, + }, + }, + confidence: make([]int, 1), } } // nnarySnowflake is the implementation of a snowflake instance with an // unbounded number of choices +// Invariant: +// len(terminationConditions) == len(confidence) +// terminationConditions[i].alphaConfidence < terminationConditions[i+1].alphaConfidence +// terminationConditions[i].beta <= terminationConditions[i+1].beta +// confidence[i] >= confidence[i+1] (except after finalizing due to early termination) type nnarySnowflake struct { // wrap the n-nary slush logic nnarySlush - // beta is the number of consecutive successful queries required for - // finalization. - beta int - // alphaPreference is the threshold required to update the preference alphaPreference int - // alphaConfidence is the threshold required to increment the confidence counter - alphaConfidence int + // terminationConditions gives the ascending ordered list of alphaConfidence values + // required to increment the corresponding confidence counter. + // The corresponding beta values give the threshold required to finalize this instance. + terminationConditions []terminationCondition - // confidence tracks the number of successful polls in a row that have - // returned the preference - confidence int + // confidence is the number of consecutive succcessful polls for a given + // alphaConfidence threshold. + // This instance finalizes when confidence[i] >= terminationConditions[i].beta for any i + confidence []int // finalized prevents the state from changing after the required number of // consecutive polls has been reached @@ -57,26 +66,34 @@ func (sf *nnarySnowflake) RecordPoll(count int, choice ids.ID) { return } - if count < sf.alphaConfidence { - sf.confidence = 0 - sf.nnarySlush.RecordSuccessfulPoll(choice) - return + // If I am changing my preference, reset confidence counters + // before recording a successful poll on the slush instance. + if choice != sf.Preference() { + clear(sf.confidence) } + sf.nnarySlush.RecordSuccessfulPoll(choice) - if preference := sf.Preference(); preference == choice { - sf.confidence++ - } else { - // confidence is set to 1 because there has already been 1 successful - // poll, namely this poll. - sf.confidence = 1 + for i, terminationCondition := range sf.terminationConditions { + // If I did not reach this alpha threshold, I did not + // reach any more alpha thresholds. + // Clear the remaining confidence counters. + if count < terminationCondition.alphaConfidence { + clear(sf.confidence[i:]) + return + } + + // I reached this alpha threshold, increment the confidence counter + // and check if I can finalize. + sf.confidence[i]++ + if sf.confidence[i] >= terminationCondition.beta { + sf.finalized = true + return + } } - - sf.finalized = sf.confidence >= sf.beta - sf.nnarySlush.RecordSuccessfulPoll(choice) } func (sf *nnarySnowflake) RecordUnsuccessfulPoll() { - sf.confidence = 0 + clear(sf.confidence) } func (sf *nnarySnowflake) Finalized() bool { @@ -85,7 +102,7 @@ func (sf *nnarySnowflake) Finalized() bool { func (sf *nnarySnowflake) String() string { return fmt.Sprintf("SF(Confidence = %d, Finalized = %v, %s)", - sf.confidence, + sf.confidence[0], sf.finalized, &sf.nnarySlush) } diff --git a/snow/consensus/snowball/parameters.go b/snow/consensus/snowball/parameters.go index 9a63a3316e6e..a13d99c27565 100644 --- a/snow/consensus/snowball/parameters.go +++ b/snow/consensus/snowball/parameters.go @@ -122,3 +122,8 @@ func (p Parameters) MinPercentConnectedHealthy() float64 { alphaRatio := float64(p.AlphaConfidence) / float64(p.K) return alphaRatio*(1-MinPercentConnectedBuffer) + MinPercentConnectedBuffer } + +type terminationCondition struct { + alphaConfidence int + beta int +} diff --git a/snow/consensus/snowball/unary_snowball.go b/snow/consensus/snowball/unary_snowball.go index 2c15a58cf971..24ed78cee43b 100644 --- a/snow/consensus/snowball/unary_snowball.go +++ b/snow/consensus/snowball/unary_snowball.go @@ -3,7 +3,10 @@ package snowball -import "fmt" +import ( + "fmt" + "slices" +) var _ Unary = (*unarySnowball)(nil) @@ -32,12 +35,11 @@ func (sb *unarySnowball) RecordPoll(count int) { func (sb *unarySnowball) Extend(choice int) Binary { bs := &binarySnowball{ binarySnowflake: binarySnowflake{ - binarySlush: binarySlush{preference: choice}, - confidence: sb.confidence, - alphaPreference: sb.alphaPreference, - alphaConfidence: sb.alphaConfidence, - beta: sb.beta, - finalized: sb.Finalized(), + binarySlush: binarySlush{preference: choice}, + confidence: slices.Clone(sb.confidence), + alphaPreference: sb.alphaPreference, + terminationConditions: sb.terminationConditions, + finalized: sb.Finalized(), }, preference: choice, } @@ -47,6 +49,7 @@ func (sb *unarySnowball) Extend(choice int) Binary { func (sb *unarySnowball) Clone() Unary { newSnowball := *sb + newSnowball.confidence = slices.Clone(sb.confidence) return &newSnowball } diff --git a/snow/consensus/snowball/unary_snowball_test.go b/snow/consensus/snowball/unary_snowball_test.go index 1178ca6bdaa9..4bea0458d95f 100644 --- a/snow/consensus/snowball/unary_snowball_test.go +++ b/snow/consensus/snowball/unary_snowball_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func UnarySnowballStateTest(t *testing.T, sb *unarySnowball, expectedPreferenceStrength, expectedConfidence int, expectedFinalized bool) { +func UnarySnowballStateTest(t *testing.T, sb *unarySnowball, expectedPreferenceStrength int, expectedConfidence []int, expectedFinalized bool) { require := require.New(t) require.Equal(expectedPreferenceStrength, sb.preferenceStrength) @@ -26,25 +26,25 @@ func TestUnarySnowball(t *testing.T) { sb := newUnarySnowball(alphaPreference, alphaConfidence, beta) sb.RecordPoll(alphaConfidence) - UnarySnowballStateTest(t, &sb, 1, 1, false) + UnarySnowballStateTest(t, &sb, 1, []int{1}, false) sb.RecordPoll(alphaPreference) - UnarySnowballStateTest(t, &sb, 2, 0, false) + UnarySnowballStateTest(t, &sb, 2, []int{0}, false) sb.RecordPoll(alphaConfidence) - UnarySnowballStateTest(t, &sb, 3, 1, false) + UnarySnowballStateTest(t, &sb, 3, []int{1}, false) sb.RecordUnsuccessfulPoll() - UnarySnowballStateTest(t, &sb, 3, 0, false) + UnarySnowballStateTest(t, &sb, 3, []int{0}, false) sb.RecordPoll(alphaConfidence) - UnarySnowballStateTest(t, &sb, 4, 1, false) + UnarySnowballStateTest(t, &sb, 4, []int{1}, false) sbCloneIntf := sb.Clone() require.IsType(&unarySnowball{}, sbCloneIntf) sbClone := sbCloneIntf.(*unarySnowball) - UnarySnowballStateTest(t, sbClone, 4, 1, false) + UnarySnowballStateTest(t, sbClone, 4, []int{1}, false) binarySnowball := sbClone.Extend(0) diff --git a/snow/consensus/snowball/unary_snowflake.go b/snow/consensus/snowball/unary_snowflake.go index edf5fdbd256f..3e21316dc370 100644 --- a/snow/consensus/snowball/unary_snowflake.go +++ b/snow/consensus/snowball/unary_snowflake.go @@ -3,33 +3,45 @@ package snowball -import "fmt" +import ( + "fmt" + "slices" +) var _ Unary = (*unarySnowflake)(nil) func newUnarySnowflake(alphaPreference, alphaConfidence, beta int) unarySnowflake { return unarySnowflake{ alphaPreference: alphaPreference, - alphaConfidence: alphaConfidence, - beta: beta, + terminationConditions: []terminationCondition{ + { + alphaConfidence: alphaConfidence, + beta: beta, + }, + }, + confidence: make([]int, 1), } } // unarySnowflake is the implementation of a unary snowflake instance +// Invariant: +// len(terminationConditions) == len(confidence) +// terminationConditions[i].alphaConfidence < terminationConditions[i+1].alphaConfidence +// terminationConditions[i].beta <= terminationConditions[i+1].beta +// confidence[i] >= confidence[i+1] (except after finalizing due to early termination) type unarySnowflake struct { - // beta is the number of consecutive successful queries required for - // finalization. - beta int - // alphaPreference is the threshold required to update the preference alphaPreference int - // alphaConfidence is the threshold required to increment the confidence counter - alphaConfidence int + // terminationConditions gives the ascending ordered list of alphaConfidence values + // required to increment the corresponding confidence counter. + // The corresponding beta values give the threshold required to finalize this instance. + terminationConditions []terminationCondition - // confidence tracks the number of successful polls in a row that have - // returned the preference - confidence int + // confidence is the number of consecutive succcessful polls for a given + // alphaConfidence threshold. + // This instance finalizes when confidence[i] >= terminationConditions[i].beta for any i + confidence []int // finalized prevents the state from changing after the required number of // consecutive polls has been reached @@ -37,17 +49,27 @@ type unarySnowflake struct { } func (sf *unarySnowflake) RecordPoll(count int) { - if count < sf.alphaConfidence { - sf.RecordUnsuccessfulPoll() - return + for i, terminationCondition := range sf.terminationConditions { + // If I did not reach this alpha threshold, I did not + // reach any more alpha thresholds. + // Clear the remaining confidence counters. + if count < terminationCondition.alphaConfidence { + clear(sf.confidence[i:]) + return + } + + // I reached this alpha threshold, increment the confidence counter + // and check if I can finalize. + sf.confidence[i]++ + if sf.confidence[i] >= terminationCondition.beta { + sf.finalized = true + return + } } - - sf.confidence++ - sf.finalized = sf.finalized || sf.confidence >= sf.beta } func (sf *unarySnowflake) RecordUnsuccessfulPoll() { - sf.confidence = 0 + clear(sf.confidence) } func (sf *unarySnowflake) Finalized() bool { @@ -56,22 +78,22 @@ func (sf *unarySnowflake) Finalized() bool { func (sf *unarySnowflake) Extend(choice int) Binary { return &binarySnowflake{ - binarySlush: binarySlush{preference: choice}, - confidence: sf.confidence, - alphaPreference: sf.alphaPreference, - alphaConfidence: sf.alphaConfidence, - beta: sf.beta, - finalized: sf.finalized, + binarySlush: binarySlush{preference: choice}, + confidence: slices.Clone(sf.confidence), + alphaPreference: sf.alphaPreference, + terminationConditions: sf.terminationConditions, + finalized: sf.finalized, } } func (sf *unarySnowflake) Clone() Unary { newSnowflake := *sf + newSnowflake.confidence = slices.Clone(sf.confidence) return &newSnowflake } func (sf *unarySnowflake) String() string { return fmt.Sprintf("SF(Confidence = %d, Finalized = %v)", - sf.confidence, + sf.confidence[0], sf.finalized) } diff --git a/snow/consensus/snowball/unary_snowflake_test.go b/snow/consensus/snowball/unary_snowflake_test.go index 6a3348f53502..0c6282060b42 100644 --- a/snow/consensus/snowball/unary_snowflake_test.go +++ b/snow/consensus/snowball/unary_snowflake_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func UnarySnowflakeStateTest(t *testing.T, sf *unarySnowflake, expectedConfidence int, expectedFinalized bool) { +func UnarySnowflakeStateTest(t *testing.T, sf *unarySnowflake, expectedConfidence []int, expectedFinalized bool) { require := require.New(t) require.Equal(expectedConfidence, sf.confidence) @@ -25,19 +25,19 @@ func TestUnarySnowflake(t *testing.T) { sf := newUnarySnowflake(alphaPreference, alphaConfidence, beta) sf.RecordPoll(alphaConfidence) - UnarySnowflakeStateTest(t, &sf, 1, false) + UnarySnowflakeStateTest(t, &sf, []int{1}, false) sf.RecordUnsuccessfulPoll() - UnarySnowflakeStateTest(t, &sf, 0, false) + UnarySnowflakeStateTest(t, &sf, []int{0}, false) sf.RecordPoll(alphaConfidence) - UnarySnowflakeStateTest(t, &sf, 1, false) + UnarySnowflakeStateTest(t, &sf, []int{1}, false) sfCloneIntf := sf.Clone() require.IsType(&unarySnowflake{}, sfCloneIntf) sfClone := sfCloneIntf.(*unarySnowflake) - UnarySnowflakeStateTest(t, sfClone, 1, false) + UnarySnowflakeStateTest(t, sfClone, []int{1}, false) binarySnowflake := sfClone.Extend(0) @@ -53,11 +53,11 @@ func TestUnarySnowflake(t *testing.T) { require.True(binarySnowflake.Finalized()) sf.RecordPoll(alphaConfidence) - UnarySnowflakeStateTest(t, &sf, 2, true) + UnarySnowflakeStateTest(t, &sf, []int{2}, true) sf.RecordUnsuccessfulPoll() - UnarySnowflakeStateTest(t, &sf, 0, true) + UnarySnowflakeStateTest(t, &sf, []int{0}, true) sf.RecordPoll(alphaConfidence) - UnarySnowflakeStateTest(t, &sf, 1, true) + UnarySnowflakeStateTest(t, &sf, []int{1}, true) } From 7dca39699bebcab5c7a6fec3c3ec4c01f075f31f Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 6 Jun 2024 13:33:13 -0400 Subject: [PATCH 050/102] Replace all chain namespaces with labels (#3053) --- api/metrics/prefix_gatherer.go | 38 ++- api/metrics/prefix_gatherer_test.go | 56 +++- chains/linearizable_vm.go | 3 - chains/manager.go | 310 ++++++++++++++---- database/meterdb/db.go | 16 +- database/meterdb/db_test.go | 4 +- node/node.go | 145 ++++++-- snow/consensus/snowman/consensus_test.go | 2 +- snow/context.go | 4 + snow/networking/benchlist/benchlist.go | 33 +- snow/networking/benchlist/benchlist_test.go | 4 + snow/networking/benchlist/manager.go | 23 +- snow/networking/benchlist/metrics.go | 36 -- snow/networking/handler/handler.go | 11 +- snow/networking/handler/handler_test.go | 7 + snow/networking/handler/health_test.go | 1 + snow/networking/handler/metrics.go | 22 +- snow/networking/router/chain_router.go | 5 +- .../networking/router/chain_router_metrics.go | 17 +- snow/networking/router/chain_router_test.go | 35 +- snow/networking/router/mock_router.go | 8 +- snow/networking/router/router.go | 3 +- snow/networking/router/traced_router.go | 6 +- snow/networking/sender/sender_test.go | 12 +- snow/networking/timeout/manager.go | 18 +- snow/networking/timeout/manager_test.go | 2 +- snow/networking/timeout/metrics.go | 93 ++---- snow/networking/tracker/resource_tracker.go | 29 +- snow/snowtest/snowtest.go | 3 +- tests/e2e/x/transfer/virtuous.go | 20 +- utils/resource/metrics.go | 27 +- utils/resource/usage.go | 2 +- utils/timer/adaptive_timeout_manager.go | 31 +- utils/timer/adaptive_timeout_manager_test.go | 3 +- vms/metervm/block_vm.go | 22 +- vms/metervm/vertex_vm.go | 22 +- vms/platformvm/vm_test.go | 4 +- vms/proposervm/batched_vm_test.go | 2 + vms/proposervm/block_test.go | 3 + vms/proposervm/config.go | 5 + vms/proposervm/post_fork_option_test.go | 2 + vms/proposervm/state_syncable_vm_test.go | 2 + vms/proposervm/vm.go | 18 +- vms/proposervm/vm_test.go | 12 + vms/rpcchainvm/vm_client.go | 17 +- vms/rpcchainvm/vm_server.go | 57 ++-- 46 files changed, 759 insertions(+), 436 deletions(-) delete mode 100644 snow/networking/benchlist/metrics.go diff --git a/api/metrics/prefix_gatherer.go b/api/metrics/prefix_gatherer.go index 1f0b78a24380..fae7adb26e84 100644 --- a/api/metrics/prefix_gatherer.go +++ b/api/metrics/prefix_gatherer.go @@ -4,8 +4,8 @@ package metrics import ( + "errors" "fmt" - "slices" "github.com/prometheus/client_golang/prometheus" "google.golang.org/protobuf/proto" @@ -15,7 +15,11 @@ import ( dto "github.com/prometheus/client_model/go" ) -var _ MultiGatherer = (*prefixGatherer)(nil) +var ( + _ MultiGatherer = (*prefixGatherer)(nil) + + errOverlappingNamespaces = errors.New("prefix could create overlapping namespaces") +) // NewPrefixGatherer returns a new MultiGatherer that merges metrics by adding a // prefix to their names. @@ -31,12 +35,14 @@ func (g *prefixGatherer) Register(prefix string, gatherer prometheus.Gatherer) e g.lock.Lock() defer g.lock.Unlock() - // TODO: Restrict prefixes to avoid potential conflicts - if slices.Contains(g.names, prefix) { - return fmt.Errorf("%w: %q", - errDuplicateGatherer, - prefix, - ) + for _, existingPrefix := range g.names { + if eitherIsPrefix(prefix, existingPrefix) { + return fmt.Errorf("%w: %q conflicts with %q", + errOverlappingNamespaces, + prefix, + existingPrefix, + ) + } } g.names = append(g.names, prefix) @@ -64,3 +70,19 @@ func (g *prefixedGatherer) Gather() ([]*dto.MetricFamily, error) { } return metricFamilies, err } + +// eitherIsPrefix returns true if either [a] is a prefix of [b] or [b] is a +// prefix of [a]. +// +// This function accounts for the usage of the namespace boundary, so "hello" is +// not considered a prefix of "helloworld". However, "hello" is considered a +// prefix of "hello_world". +func eitherIsPrefix(a, b string) bool { + if len(a) > len(b) { + a, b = b, a + } + return a == b[:len(a)] && // a is a prefix of b + (len(a) == 0 || // a is empty + len(a) == len(b) || // a is equal to b + b[len(a)] == metric.NamespaceSeparatorByte) // a ends at a namespace boundary of b +} diff --git a/api/metrics/prefix_gatherer_test.go b/api/metrics/prefix_gatherer_test.go index ba37540b01e3..ff2526e6742e 100644 --- a/api/metrics/prefix_gatherer_test.go +++ b/api/metrics/prefix_gatherer_test.go @@ -134,7 +134,7 @@ func TestPrefixGatherer_Register(t *testing.T) { prefixGatherer: firstPrefixGatherer(), prefix: firstPrefixedGatherer.prefix, gatherer: secondPrefixedGatherer.gatherer, - expectedErr: errDuplicateGatherer, + expectedErr: errOverlappingNamespaces, expectedPrefixGatherer: firstPrefixGatherer(), }, } @@ -148,3 +148,57 @@ func TestPrefixGatherer_Register(t *testing.T) { }) } } + +func TestEitherIsPrefix(t *testing.T) { + tests := []struct { + name string + a string + b string + expected bool + }{ + { + name: "empty strings", + a: "", + b: "", + expected: true, + }, + { + name: "an empty string", + a: "", + b: "hello", + expected: true, + }, + { + name: "same strings", + a: "x", + b: "x", + expected: true, + }, + { + name: "different strings", + a: "x", + b: "y", + expected: false, + }, + { + name: "splits namespace", + a: "hello", + b: "hello_world", + expected: true, + }, + { + name: "is prefix before separator", + a: "hello", + b: "helloworld", + expected: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + require.Equal(test.expected, eitherIsPrefix(test.a, test.b)) + require.Equal(test.expected, eitherIsPrefix(test.b, test.a)) + }) + } +} diff --git a/chains/linearizable_vm.go b/chains/linearizable_vm.go index 0521e418667f..e7e99b77cb93 100644 --- a/chains/linearizable_vm.go +++ b/chains/linearizable_vm.go @@ -6,7 +6,6 @@ package chains import ( "context" - "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -29,7 +28,6 @@ type initializeOnLinearizeVM struct { vmToInitialize common.VM vmToLinearize *linearizeOnInitializeVM - registerer metrics.MultiGatherer ctx *snow.Context db database.Database genesisBytes []byte @@ -42,7 +40,6 @@ type initializeOnLinearizeVM struct { func (vm *initializeOnLinearizeVM) Linearize(ctx context.Context, stopVertexID ids.ID) error { vm.vmToLinearize.stopVertexID = stopVertexID - vm.ctx.Metrics = vm.registerer return vm.vmToInitialize.Initialize( ctx, vm.ctx, diff --git a/chains/manager.go b/chains/manager.go index 8548954e1c5e..bdc6d0ef0180 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -13,7 +13,6 @@ import ( "sync" "time" - "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/api/health" @@ -74,8 +73,19 @@ import ( ) const ( + ChainLabel = "chain" + defaultChannelSize = 1 initialQueueSize = 3 + + avalancheNamespace = constants.PlatformName + metric.NamespaceSeparator + "avalanche" + handlerNamespace = constants.PlatformName + metric.NamespaceSeparator + "handler" + meterchainvmNamespace = constants.PlatformName + metric.NamespaceSeparator + "meterchainvm" + meterdagvmNamespace = constants.PlatformName + metric.NamespaceSeparator + "meterdagvm" + proposervmNamespace = constants.PlatformName + metric.NamespaceSeparator + "proposervm" + p2pNamespace = constants.PlatformName + metric.NamespaceSeparator + "p2p" + snowmanNamespace = constants.PlatformName + metric.NamespaceSeparator + "snowman" + stakeNamespace = constants.PlatformName + metric.NamespaceSeparator + "stake" ) var ( @@ -207,7 +217,9 @@ type ManagerConfig struct { // ShutdownNodeFunc allows the chain manager to issue a request to shutdown the node ShutdownNodeFunc func(exitCode int) MeterVMEnabled bool // Should each VM be wrapped with a MeterVM - Metrics metrics.MultiGatherer + + Metrics metrics.MultiGatherer + MeterDBMetrics metrics.MultiGatherer FrontierPollFrequency time.Duration ConsensusAppConcurrency int @@ -259,10 +271,60 @@ type manager struct { // snowman++ related interface to allow validators retrieval validatorState validators.State + + avalancheGatherer metrics.MultiGatherer // chainID + handlerGatherer metrics.MultiGatherer // chainID + meterChainVMGatherer metrics.MultiGatherer // chainID + meterDAGVMGatherer metrics.MultiGatherer // chainID + proposervmGatherer metrics.MultiGatherer // chainID + p2pGatherer metrics.MultiGatherer // chainID + snowmanGatherer metrics.MultiGatherer // chainID + stakeGatherer metrics.MultiGatherer // chainID + vmGatherer map[ids.ID]metrics.MultiGatherer // vmID -> chainID } // New returns a new Manager -func New(config *ManagerConfig) Manager { +func New(config *ManagerConfig) (Manager, error) { + avalancheGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(avalancheNamespace, avalancheGatherer); err != nil { + return nil, err + } + + handlerGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(handlerNamespace, handlerGatherer); err != nil { + return nil, err + } + + meterChainVMGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(meterchainvmNamespace, meterChainVMGatherer); err != nil { + return nil, err + } + + meterDAGVMGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(meterdagvmNamespace, meterDAGVMGatherer); err != nil { + return nil, err + } + + proposervmGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(proposervmNamespace, proposervmGatherer); err != nil { + return nil, err + } + + p2pGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(p2pNamespace, p2pGatherer); err != nil { + return nil, err + } + + snowmanGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(snowmanNamespace, snowmanGatherer); err != nil { + return nil, err + } + + stakeGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(stakeNamespace, stakeGatherer); err != nil { + return nil, err + } + return &manager{ Aliaser: ids.NewAliaser(), ManagerConfig: *config, @@ -270,7 +332,17 @@ func New(config *ManagerConfig) Manager { chainsQueue: buffer.NewUnboundedBlockingDeque[ChainParameters](initialQueueSize), unblockChainCreatorCh: make(chan struct{}), chainCreatorShutdownCh: make(chan struct{}), - } + + avalancheGatherer: avalancheGatherer, + handlerGatherer: handlerGatherer, + meterChainVMGatherer: meterChainVMGatherer, + meterDAGVMGatherer: meterDAGVMGatherer, + proposervmGatherer: proposervmGatherer, + p2pGatherer: p2pGatherer, + snowmanGatherer: snowmanGatherer, + stakeGatherer: stakeGatherer, + vmGatherer: make(map[ids.ID]metrics.MultiGatherer), + }, nil } // QueueChainCreation queues a chain creation request @@ -419,16 +491,17 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c return nil, fmt.Errorf("error while creating chain's log %w", err) } - consensusMetrics := prometheus.NewRegistry() - chainNamespace := metric.AppendNamespace(constants.PlatformName, primaryAlias) - if err := m.Metrics.Register(chainNamespace, consensusMetrics); err != nil { - return nil, fmt.Errorf("error while registering chain's metrics %w", err) + snowmanMetrics, err := metrics.MakeAndRegister( + m.snowmanGatherer, + primaryAlias, + ) + if err != nil { + return nil, err } - vmMetrics := metrics.NewMultiGatherer() - vmNamespace := metric.AppendNamespace(chainNamespace, "vm") - if err := m.Metrics.Register(vmNamespace, vmMetrics); err != nil { - return nil, fmt.Errorf("error while registering vm's metrics %w", err) + vmMetrics, err := m.getOrMakeVMRegisterer(chainParams.VMID, primaryAlias) + if err != nil { + return nil, err } ctx := &snow.ConsensusContext{ @@ -454,10 +527,11 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c ValidatorState: m.validatorState, ChainDataDir: chainDataDir, }, + PrimaryAlias: primaryAlias, + Registerer: snowmanMetrics, BlockAcceptor: m.BlockAcceptorGroup, TxAcceptor: m.TxAcceptorGroup, VertexAcceptor: m.VertexAcceptorGroup, - Registerer: consensusMetrics, } // Get a factory for the vm we want to use on our chain @@ -551,10 +625,20 @@ func (m *manager) createAvalancheChain( State: snow.Initializing, }) - meterDB, err := meterdb.New("db", ctx.Registerer, m.DB) + primaryAlias := m.PrimaryAliasOrDefault(ctx.ChainID) + meterDBReg, err := metrics.MakeAndRegister( + m.MeterDBMetrics, + primaryAlias, + ) + if err != nil { + return nil, err + } + + meterDB, err := meterdb.New(meterDBReg, m.DB) if err != nil { return nil, err } + prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) vmDB := prefixdb.New(VMDBPrefix, prefixDB) vertexDB := prefixdb.New(VertexDBPrefix, prefixDB) @@ -562,22 +646,19 @@ func (m *manager) createAvalancheChain( txBootstrappingDB := prefixdb.New(TxBootstrappingDBPrefix, prefixDB) blockBootstrappingDB := prefixdb.New(BlockBootstrappingDBPrefix, prefixDB) - // This converts the prefix for all the Avalanche consensus metrics from - // `avalanche_{chainID}_` into `avalanche_{chainID}_avalanche_` so that - // there are no conflicts when registering the Snowman consensus metrics. - avalancheConsensusMetrics := prometheus.NewRegistry() - primaryAlias := m.PrimaryAliasOrDefault(ctx.ChainID) - chainNamespace := metric.AppendNamespace(constants.PlatformName, primaryAlias) - avalancheDAGNamespace := metric.AppendNamespace(chainNamespace, "avalanche") - if err := m.Metrics.Register(avalancheDAGNamespace, avalancheConsensusMetrics); err != nil { - return nil, fmt.Errorf("error while registering DAG metrics %w", err) + avalancheMetrics, err := metrics.MakeAndRegister( + m.avalancheGatherer, + primaryAlias, + ) + if err != nil { + return nil, err } - vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", avalancheConsensusMetrics) + vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", avalancheMetrics) if err != nil { return nil, err } - txBlocker, err := queue.New(txBootstrappingDB, "tx", avalancheConsensusMetrics) + txBlocker, err := queue.New(txBootstrappingDB, "tx", avalancheMetrics) if err != nil { return nil, err } @@ -591,7 +672,7 @@ func (m *manager) createAvalancheChain( m.TimeoutManager, p2ppb.EngineType_ENGINE_TYPE_AVALANCHE, sb, - avalancheConsensusMetrics, + avalancheMetrics, ) if err != nil { return nil, fmt.Errorf("couldn't initialize avalanche sender: %w", err) @@ -627,7 +708,15 @@ func (m *manager) createAvalancheChain( dagVM := vm if m.MeterVMEnabled { - dagVM = metervm.NewVertexVM(dagVM) + meterdagvmReg, err := metrics.MakeAndRegister( + m.meterDAGVMGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + + dagVM = metervm.NewVertexVM(dagVM, meterdagvmReg) } if m.TracingEnabled { dagVM = tracedvm.NewVertexVM(dagVM, m.Tracer) @@ -645,17 +734,6 @@ func (m *manager) createAvalancheChain( }, ) - avalancheRegisterer := metrics.NewMultiGatherer() - snowmanRegisterer := metrics.NewMultiGatherer() - if err := ctx.Context.Metrics.Register("avalanche", avalancheRegisterer); err != nil { - return nil, err - } - if err := ctx.Context.Metrics.Register("", snowmanRegisterer); err != nil { - return nil, err - } - - ctx.Context.Metrics = avalancheRegisterer - // The channel through which a VM may send messages to the consensus engine // VM uses this channel to notify engine that a block is ready to be made msgChan := make(chan common.Message, defaultChannelSize) @@ -695,14 +773,20 @@ func (m *manager) createAvalancheChain( zap.Uint64("numHistoricalBlocks", numHistoricalBlocks), ) - chainAlias := m.PrimaryAliasOrDefault(ctx.ChainID) - // Note: this does not use [dagVM] to ensure we use the [vm]'s height index. untracedVMWrappedInsideProposerVM := NewLinearizeOnInitializeVM(vm) var vmWrappedInsideProposerVM block.ChainVM = untracedVMWrappedInsideProposerVM if m.TracingEnabled { - vmWrappedInsideProposerVM = tracedvm.NewBlockVM(vmWrappedInsideProposerVM, chainAlias, m.Tracer) + vmWrappedInsideProposerVM = tracedvm.NewBlockVM(vmWrappedInsideProposerVM, primaryAlias, m.Tracer) + } + + proposervmReg, err := metrics.MakeAndRegister( + m.proposervmGatherer, + primaryAlias, + ) + if err != nil { + return nil, err } // Note: vmWrappingProposerVM is the VM that the Snowman engines should be @@ -717,11 +801,20 @@ func (m *manager) createAvalancheChain( NumHistoricalBlocks: numHistoricalBlocks, StakingLeafSigner: m.StakingTLSSigner, StakingCertLeaf: m.StakingTLSCert, + Registerer: proposervmReg, }, ) if m.MeterVMEnabled { - vmWrappingProposerVM = metervm.NewBlockVM(vmWrappingProposerVM) + meterchainvmReg, err := metrics.MakeAndRegister( + m.meterChainVMGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + + vmWrappingProposerVM = metervm.NewBlockVM(vmWrappingProposerVM, meterchainvmReg) } if m.TracingEnabled { vmWrappingProposerVM = tracedvm.NewBlockVM(vmWrappingProposerVM, "proposervm", m.Tracer) @@ -734,7 +827,6 @@ func (m *manager) createAvalancheChain( vmToInitialize: vmWrappingProposerVM, vmToLinearize: untracedVMWrappedInsideProposerVM, - registerer: snowmanRegisterer, ctx: ctx.Context, db: vmDB, genesisBytes: genesisData, @@ -756,16 +848,32 @@ func (m *manager) createAvalancheChain( sampleK = int(bootstrapWeight) } - connectedValidators, err := tracker.NewMeteredPeers(ctx.Registerer) + stakeReg, err := metrics.MakeAndRegister( + m.stakeGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + + connectedValidators, err := tracker.NewMeteredPeers(stakeReg) if err != nil { return nil, fmt.Errorf("error creating peer tracker: %w", err) } vdrs.RegisterSetCallbackListener(ctx.SubnetID, connectedValidators) + p2pReg, err := metrics.MakeAndRegister( + m.p2pGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + peerTracker, err := p2p.NewPeerTracker( ctx.Log, "peer_tracker", - ctx.Registerer, + p2pReg, set.Of(ctx.NodeID), nil, ) @@ -773,6 +881,14 @@ func (m *manager) createAvalancheChain( return nil, fmt.Errorf("error creating peer tracker: %w", err) } + handlerReg, err := metrics.MakeAndRegister( + m.handlerGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + // Asynchronously passes messages from the network to the consensus engine h, err := handler.New( ctx, @@ -785,6 +901,7 @@ func (m *manager) createAvalancheChain( sb, connectedValidators, peerTracker, + handlerReg, ) if err != nil { return nil, fmt.Errorf("error initializing network handler: %w", err) @@ -867,7 +984,7 @@ func (m *manager) createAvalancheChain( ctx.Log, m.BootstrapMaxTimeGetAncestors, m.BootstrapAncestorsMaxContainersSent, - avalancheConsensusMetrics, + avalancheMetrics, ) if err != nil { return nil, fmt.Errorf("couldn't initialize avalanche base message handler: %w", err) @@ -899,7 +1016,7 @@ func (m *manager) createAvalancheChain( avalancheBootstrapper, err := avbootstrap.New( avalancheBootstrapperConfig, snowmanBootstrapper.Start, - avalancheConsensusMetrics, + avalancheMetrics, ) if err != nil { return nil, fmt.Errorf("error initializing avalanche bootstrapper: %w", err) @@ -923,12 +1040,12 @@ func (m *manager) createAvalancheChain( }) // Register health check for this chain - if err := m.Health.RegisterHealthCheck(chainAlias, h, ctx.SubnetID.String()); err != nil { - return nil, fmt.Errorf("couldn't add health check for chain %s: %w", chainAlias, err) + if err := m.Health.RegisterHealthCheck(primaryAlias, h, ctx.SubnetID.String()); err != nil { + return nil, fmt.Errorf("couldn't add health check for chain %s: %w", primaryAlias, err) } return &chain{ - Name: chainAlias, + Name: primaryAlias, Context: ctx, VM: dagVM, Handler: h, @@ -953,10 +1070,20 @@ func (m *manager) createSnowmanChain( State: snow.Initializing, }) - meterDB, err := meterdb.New("db", ctx.Registerer, m.DB) + primaryAlias := m.PrimaryAliasOrDefault(ctx.ChainID) + meterDBReg, err := metrics.MakeAndRegister( + m.MeterDBMetrics, + primaryAlias, + ) if err != nil { return nil, err } + + meterDB, err := meterdb.New(meterDBReg, m.DB) + if err != nil { + return nil, err + } + prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) vmDB := prefixdb.New(VMDBPrefix, prefixDB) bootstrappingDB := prefixdb.New(ChainBootstrappingDBPrefix, prefixDB) @@ -1049,9 +1176,16 @@ func (m *manager) createSnowmanChain( zap.Uint64("numHistoricalBlocks", numHistoricalBlocks), ) - chainAlias := m.PrimaryAliasOrDefault(ctx.ChainID) if m.TracingEnabled { - vm = tracedvm.NewBlockVM(vm, chainAlias, m.Tracer) + vm = tracedvm.NewBlockVM(vm, primaryAlias, m.Tracer) + } + + proposervmReg, err := metrics.MakeAndRegister( + m.proposervmGatherer, + primaryAlias, + ) + if err != nil { + return nil, err } vm = proposervm.New( @@ -1064,11 +1198,20 @@ func (m *manager) createSnowmanChain( NumHistoricalBlocks: numHistoricalBlocks, StakingLeafSigner: m.StakingTLSSigner, StakingCertLeaf: m.StakingTLSCert, + Registerer: proposervmReg, }, ) if m.MeterVMEnabled { - vm = metervm.NewBlockVM(vm) + meterchainvmReg, err := metrics.MakeAndRegister( + m.meterChainVMGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + + vm = metervm.NewBlockVM(vm, meterchainvmReg) } if m.TracingEnabled { vm = tracedvm.NewBlockVM(vm, "proposervm", m.Tracer) @@ -1103,16 +1246,32 @@ func (m *manager) createSnowmanChain( sampleK = int(bootstrapWeight) } - connectedValidators, err := tracker.NewMeteredPeers(ctx.Registerer) + stakeReg, err := metrics.MakeAndRegister( + m.stakeGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + + connectedValidators, err := tracker.NewMeteredPeers(stakeReg) if err != nil { return nil, fmt.Errorf("error creating peer tracker: %w", err) } vdrs.RegisterSetCallbackListener(ctx.SubnetID, connectedValidators) + p2pReg, err := metrics.MakeAndRegister( + m.p2pGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + peerTracker, err := p2p.NewPeerTracker( ctx.Log, "peer_tracker", - ctx.Registerer, + p2pReg, set.Of(ctx.NodeID), nil, ) @@ -1120,6 +1279,14 @@ func (m *manager) createSnowmanChain( return nil, fmt.Errorf("error creating peer tracker: %w", err) } + handlerReg, err := metrics.MakeAndRegister( + m.handlerGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + // Asynchronously passes messages from the network to the consensus engine h, err := handler.New( ctx, @@ -1132,6 +1299,7 @@ func (m *manager) createSnowmanChain( sb, connectedValidators, peerTracker, + handlerReg, ) if err != nil { return nil, fmt.Errorf("couldn't initialize message handler: %w", err) @@ -1244,12 +1412,12 @@ func (m *manager) createSnowmanChain( }) // Register health checks - if err := m.Health.RegisterHealthCheck(chainAlias, h, ctx.SubnetID.String()); err != nil { - return nil, fmt.Errorf("couldn't add health check for chain %s: %w", chainAlias, err) + if err := m.Health.RegisterHealthCheck(primaryAlias, h, ctx.SubnetID.String()); err != nil { + return nil, fmt.Errorf("couldn't add health check for chain %s: %w", primaryAlias, err) } return &chain{ - Name: chainAlias, + Name: primaryAlias, Context: ctx, VM: vm, Handler: h, @@ -1390,3 +1558,27 @@ func (m *manager) getChainConfig(id ids.ID) (ChainConfig, error) { return ChainConfig{}, nil } + +func (m *manager) getOrMakeVMRegisterer(vmID ids.ID, chainAlias string) (metrics.MultiGatherer, error) { + vmGatherer, ok := m.vmGatherer[vmID] + if !ok { + vmName := constants.VMName(vmID) + vmNamespace := metric.AppendNamespace(constants.PlatformName, vmName) + vmGatherer = metrics.NewLabelGatherer(ChainLabel) + err := m.Metrics.Register( + vmNamespace, + vmGatherer, + ) + if err != nil { + return nil, err + } + m.vmGatherer[vmID] = vmGatherer + } + + chainReg := metrics.NewPrefixGatherer() + err := vmGatherer.Register( + chainAlias, + chainReg, + ) + return chainReg, err +} diff --git a/database/meterdb/db.go b/database/meterdb/db.go index 5f9ef51df168..af41746b32e4 100644 --- a/database/meterdb/db.go +++ b/database/meterdb/db.go @@ -98,7 +98,6 @@ type Database struct { // New returns a new database with added metrics func New( - namespace string, reg prometheus.Registerer, db database.Database, ) (*Database, error) { @@ -106,25 +105,22 @@ func New( db: db, calls: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "calls", - Help: "number of calls to the database", + Name: "calls", + Help: "number of calls to the database", }, methodLabels, ), duration: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "duration", - Help: "time spent in database calls (ns)", + Name: "duration", + Help: "time spent in database calls (ns)", }, methodLabels, ), size: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "size", - Help: "size of data passed in database calls", + Name: "size", + Help: "size of data passed in database calls", }, methodLabels, ), diff --git a/database/meterdb/db_test.go b/database/meterdb/db_test.go index 48a8966b2772..57cedc181043 100644 --- a/database/meterdb/db_test.go +++ b/database/meterdb/db_test.go @@ -18,7 +18,7 @@ func TestInterface(t *testing.T) { for name, test := range database.Tests { t.Run(name, func(t *testing.T) { baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) + db, err := New(prometheus.NewRegistry(), baseDB) require.NoError(t, err) test(t, db) @@ -28,7 +28,7 @@ func TestInterface(t *testing.T) { func newDB(t testing.TB) database.Database { baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) + db, err := New(prometheus.NewRegistry(), baseDB) require.NoError(t, err) return db } diff --git a/node/node.go b/node/node.go index 847979b9b63a..09fb05d06e86 100644 --- a/node/node.go +++ b/node/node.go @@ -91,9 +91,17 @@ const ( ipResolutionTimeout = 30 * time.Second - apiNamespace = constants.PlatformName + metric.NamespaceSeparator + "api" - dbNamespace = constants.PlatformName + metric.NamespaceSeparator + "db_internal" - networkNamespace = constants.PlatformName + metric.NamespaceSeparator + "network" + apiNamespace = constants.PlatformName + metric.NamespaceSeparator + "api" + benchlistNamespace = constants.PlatformName + metric.NamespaceSeparator + "benchlist" + dbNamespace = constants.PlatformName + metric.NamespaceSeparator + "db" + healthNamespace = constants.PlatformName + metric.NamespaceSeparator + "health" + meterDBNamespace = constants.PlatformName + metric.NamespaceSeparator + "meterdb" + networkNamespace = constants.PlatformName + metric.NamespaceSeparator + "network" + processNamespace = constants.PlatformName + metric.NamespaceSeparator + "process" + requestsNamespace = constants.PlatformName + metric.NamespaceSeparator + "requests" + resourceTrackerNamespace = constants.PlatformName + metric.NamespaceSeparator + "resource_tracker" + responsesNamespace = constants.PlatformName + metric.NamespaceSeparator + "responses" + systemResourcesNamespace = constants.PlatformName + metric.NamespaceSeparator + "system_resources" ) var ( @@ -165,7 +173,10 @@ func New( return nil, fmt.Errorf("couldn't initialize tracer: %w", err) } - n.initMetrics() + if err := n.initMetrics(); err != nil { + return nil, fmt.Errorf("couldn't initialize metrics: %w", err) + } + n.initNAT() if err := n.initAPIServer(); err != nil { // Start the API Server return nil, fmt.Errorf("couldn't initialize API server: %w", err) @@ -213,7 +224,7 @@ func New( logger.Warn("sybil control is not enforced") n.vdrs = newOverriddenManager(constants.PrimaryNetworkID, n.vdrs) } - if err := n.initResourceManager(n.MetricsRegisterer); err != nil { + if err := n.initResourceManager(); err != nil { return nil, fmt.Errorf("problem initializing resource manager: %w", err) } n.initCPUTargeter(&config.CPUTargeterConfig) @@ -363,8 +374,8 @@ type Node struct { DoneShuttingDown sync.WaitGroup // Metrics Registerer - MetricsRegisterer *prometheus.Registry - MetricsGatherer metrics.MultiGatherer + MetricsGatherer metrics.MultiGatherer + MeterDBMetricsGatherer metrics.MultiGatherer VMAliaser ids.Aliaser VMManager vms.Manager @@ -531,6 +542,16 @@ func (n *Node) initNetworking(reg prometheus.Registerer) error { // Configure benchlist n.Config.BenchlistConfig.Validators = n.vdrs n.Config.BenchlistConfig.Benchable = n.chainRouter + n.Config.BenchlistConfig.BenchlistRegisterer = metrics.NewLabelGatherer(chains.ChainLabel) + + err = n.MetricsGatherer.Register( + benchlistNamespace, + n.Config.BenchlistConfig.BenchlistRegisterer, + ) + if err != nil { + return err + } + n.benchlistManager = benchlist.NewManager(&n.Config.BenchlistConfig) n.uptimeCalculator = uptime.NewLockedCalculator() @@ -770,7 +791,15 @@ func (n *Node) initDatabase() error { n.DB = versiondb.New(n.DB) } - n.DB, err = meterdb.New("db", n.MetricsRegisterer, n.DB) + meterDBReg, err := metrics.MakeAndRegister( + n.MeterDBMetricsGatherer, + "all", + ) + if err != nil { + return err + } + + n.DB, err = meterdb.New(meterDBReg, n.DB) if err != nil { return err } @@ -891,9 +920,13 @@ func (n *Node) initChains(genesisBytes []byte) error { return n.chainManager.StartChainCreator(platformChain) } -func (n *Node) initMetrics() { - n.MetricsRegisterer = prometheus.NewRegistry() - n.MetricsGatherer = metrics.NewMultiGatherer() +func (n *Node) initMetrics() error { + n.MetricsGatherer = metrics.NewPrefixGatherer() + n.MeterDBMetricsGatherer = metrics.NewLabelGatherer(chains.ChainLabel) + return n.MetricsGatherer.Register( + meterDBNamespace, + n.MeterDBMetricsGatherer, + ) } func (n *Node) initNAT() { @@ -1043,11 +1076,27 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { cChainID, ) + requestsReg, err := metrics.MakeAndRegister( + n.MetricsGatherer, + requestsNamespace, + ) + if err != nil { + return err + } + + responseReg, err := metrics.MakeAndRegister( + n.MetricsGatherer, + responsesNamespace, + ) + if err != nil { + return err + } + n.timeoutManager, err = timeout.NewManager( &n.Config.AdaptiveTimeoutConfig, n.benchlistManager, - "requests", - n.MetricsRegisterer, + requestsReg, + responseReg, ) if err != nil { return err @@ -1065,8 +1114,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { n.Config.TrackedSubnets, n.Shutdown, n.Config.RouterHealthConfig, - "requests", - n.MetricsRegisterer, + requestsReg, ) if err != nil { return fmt.Errorf("couldn't initialize chain router: %w", err) @@ -1076,7 +1124,8 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { if err != nil { return fmt.Errorf("failed to initialize subnets: %w", err) } - n.chainManager = chains.New( + + n.chainManager, err = chains.New( &chains.ManagerConfig{ SybilProtectionEnabled: n.Config.SybilProtectionEnabled, StakingTLSSigner: n.StakingTLSSigner, @@ -1108,6 +1157,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { ShutdownNodeFunc: n.Shutdown, MeterVMEnabled: n.Config.MeterVMEnabled, Metrics: n.MetricsGatherer, + MeterDBMetrics: n.MeterDBMetricsGatherer, SubnetConfigs: n.Config.SubnetConfigs, ChainConfigs: n.Config.ChainConfigs, FrontierPollFrequency: n.Config.FrontierPollFrequency, @@ -1125,6 +1175,9 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { Subnets: subnets, }, ) + if err != nil { + return err + } // Notify the API server when new chains are created n.chainManager.AddRegistrant(n.APIServer) @@ -1246,19 +1299,23 @@ func (n *Node) initMetricsAPI() error { return nil } - if err := n.MetricsGatherer.Register(constants.PlatformName, n.MetricsRegisterer); err != nil { + processReg, err := metrics.MakeAndRegister( + n.MetricsGatherer, + processNamespace, + ) + if err != nil { return err } // Current state of process metrics. processCollector := collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}) - if err := n.MetricsRegisterer.Register(processCollector); err != nil { + if err := processReg.Register(processCollector); err != nil { return err } // Go process metrics using debug.GCStats. goCollector := collectors.NewGoCollector() - if err := n.MetricsRegisterer.Register(goCollector); err != nil { + if err := processReg.Register(goCollector); err != nil { return err } @@ -1375,11 +1432,18 @@ func (n *Node) initInfoAPI() error { // initHealthAPI initializes the Health API service // Assumes n.Log, n.Net, n.APIServer, n.HTTPLog already initialized func (n *Node) initHealthAPI() error { - healthChecker, err := health.New(n.Log, n.MetricsRegisterer) + healthReg, err := metrics.MakeAndRegister( + n.MetricsGatherer, + healthNamespace, + ) + if err != nil { + return err + } + + n.health, err = health.New(n.Log, healthReg) if err != nil { return err } - n.health = healthChecker if !n.Config.HealthAPIEnabled { n.Log.Info("skipping health API initialization because it has been disabled") @@ -1387,18 +1451,18 @@ func (n *Node) initHealthAPI() error { } n.Log.Info("initializing Health API") - err = healthChecker.RegisterHealthCheck("network", n.Net, health.ApplicationTag) + err = n.health.RegisterHealthCheck("network", n.Net, health.ApplicationTag) if err != nil { return fmt.Errorf("couldn't register network health check: %w", err) } - err = healthChecker.RegisterHealthCheck("router", n.chainRouter, health.ApplicationTag) + err = n.health.RegisterHealthCheck("router", n.chainRouter, health.ApplicationTag) if err != nil { return fmt.Errorf("couldn't register router health check: %w", err) } // TODO: add database health to liveness check - err = healthChecker.RegisterHealthCheck("database", n.DB, health.ApplicationTag) + err = n.health.RegisterHealthCheck("database", n.DB, health.ApplicationTag) if err != nil { return fmt.Errorf("couldn't register database health check: %w", err) } @@ -1430,7 +1494,7 @@ func (n *Node) initHealthAPI() error { return fmt.Errorf("couldn't register resource health check: %w", err) } - handler, err := health.NewGetAndPostHandler(n.Log, healthChecker) + handler, err := health.NewGetAndPostHandler(n.Log, n.health) if err != nil { return err } @@ -1445,7 +1509,7 @@ func (n *Node) initHealthAPI() error { } err = n.APIServer.AddRoute( - health.NewGetHandler(healthChecker.Readiness), + health.NewGetHandler(n.health.Readiness), "health", "/readiness", ) @@ -1454,7 +1518,7 @@ func (n *Node) initHealthAPI() error { } err = n.APIServer.AddRoute( - health.NewGetHandler(healthChecker.Health), + health.NewGetHandler(n.health.Health), "health", "/health", ) @@ -1463,7 +1527,7 @@ func (n *Node) initHealthAPI() error { } return n.APIServer.AddRoute( - health.NewGetHandler(healthChecker.Liveness), + health.NewGetHandler(n.health.Liveness), "health", "/liveness", ) @@ -1513,14 +1577,21 @@ func (n *Node) initAPIAliases(genesisBytes []byte) error { } // Initialize [n.resourceManager]. -func (n *Node) initResourceManager(reg prometheus.Registerer) error { +func (n *Node) initResourceManager() error { + systemResourcesRegisterer, err := metrics.MakeAndRegister( + n.MetricsGatherer, + systemResourcesNamespace, + ) + if err != nil { + return err + } resourceManager, err := resource.NewManager( n.Log, n.Config.DatabaseConfig.Path, n.Config.SystemTrackerFrequency, n.Config.SystemTrackerCPUHalflife, n.Config.SystemTrackerDiskHalflife, - reg, + systemResourcesRegisterer, ) if err != nil { return err @@ -1528,7 +1599,19 @@ func (n *Node) initResourceManager(reg prometheus.Registerer) error { n.resourceManager = resourceManager n.resourceManager.TrackProcess(os.Getpid()) - n.resourceTracker, err = tracker.NewResourceTracker(reg, n.resourceManager, &meter.ContinuousFactory{}, n.Config.SystemTrackerProcessingHalflife) + resourceTrackerRegisterer, err := metrics.MakeAndRegister( + n.MetricsGatherer, + resourceTrackerNamespace, + ) + if err != nil { + return err + } + n.resourceTracker, err = tracker.NewResourceTracker( + resourceTrackerRegisterer, + n.resourceManager, + &meter.ContinuousFactory{}, + n.Config.SystemTrackerProcessingHalflife, + ) return err } diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go index b4cb5b03a494..bb51790b76f7 100644 --- a/snow/consensus/snowman/consensus_test.go +++ b/snow/consensus/snowman/consensus_test.go @@ -1369,7 +1369,7 @@ func ErrorOnAddDecidedBlockTest(t *testing.T, factory Factory) { require.ErrorIs(err, errUnknownParentBlock) } -func gatherCounterGauge(t *testing.T, reg *prometheus.Registry) map[string]float64 { +func gatherCounterGauge(t *testing.T, reg prometheus.Gatherer) map[string]float64 { ms, err := reg.Gather() require.NoError(t, err) mss := make(map[string]float64) diff --git a/snow/context.go b/snow/context.go index 2fa501571890..26fc67f213a8 100644 --- a/snow/context.go +++ b/snow/context.go @@ -65,6 +65,10 @@ type Registerer interface { type ConsensusContext struct { *Context + // PrimaryAlias is the primary alias of the chain this context exists + // within. + PrimaryAlias string + // Registers all consensus metrics. Registerer Registerer diff --git a/snow/networking/benchlist/benchlist.go b/snow/networking/benchlist/benchlist.go index 2bf68e049864..453395379435 100644 --- a/snow/networking/benchlist/benchlist.go +++ b/snow/networking/benchlist/benchlist.go @@ -9,11 +9,13 @@ import ( "sync" "time" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -50,8 +52,9 @@ type failureStreak struct { type benchlist struct { lock sync.RWMutex // Context of the chain this is the benchlist for - ctx *snow.ConsensusContext - metrics metrics + ctx *snow.ConsensusContext + + numBenched, weightBenched prometheus.Gauge // Used to notify the timer that it should recalculate when it should fire resetTimer chan struct{} @@ -99,13 +102,22 @@ func NewBenchlist( minimumFailingDuration, duration time.Duration, maxPortion float64, + reg prometheus.Registerer, ) (Benchlist, error) { if maxPortion < 0 || maxPortion >= 1 { return nil, fmt.Errorf("max portion of benched stake must be in [0,1) but got %f", maxPortion) } benchlist := &benchlist{ - ctx: ctx, + ctx: ctx, + numBenched: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "benched_num", + Help: "Number of currently benched validators", + }), + weightBenched: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "benched_weight", + Help: "Weight of currently benched validators", + }), resetTimer: make(chan struct{}, 1), failureStreaks: make(map[ids.NodeID]failureStreak), benchlistSet: set.Set[ids.NodeID]{}, @@ -117,7 +129,12 @@ func NewBenchlist( duration: duration, maxPortion: maxPortion, } - if err := benchlist.metrics.Initialize(ctx.Registerer); err != nil { + + err := utils.Err( + reg.Register(benchlist.numBenched), + reg.Register(benchlist.weightBenched), + ) + if err != nil { return nil, err } @@ -188,7 +205,7 @@ func (b *benchlist) removedExpiredNodes() { b.benchable.Unbenched(b.ctx.ChainID, nodeID) } - b.metrics.numBenched.Set(float64(b.benchedHeap.Len())) + b.numBenched.Set(float64(b.benchedHeap.Len())) benchedStake, err := b.vdrs.SubsetWeight(b.ctx.SubnetID, b.benchlistSet) if err != nil { b.ctx.Log.Error("error calculating benched stake", @@ -197,7 +214,7 @@ func (b *benchlist) removedExpiredNodes() { ) return } - b.metrics.weightBenched.Set(float64(benchedStake)) + b.weightBenched.Set(float64(benchedStake)) } func (b *benchlist) durationToSleep() time.Duration { @@ -338,6 +355,6 @@ func (b *benchlist) bench(nodeID ids.NodeID) { } // Update metrics - b.metrics.numBenched.Set(float64(b.benchedHeap.Len())) - b.metrics.weightBenched.Set(float64(newBenchedStake)) + b.numBenched.Set(float64(b.benchedHeap.Len())) + b.weightBenched.Set(float64(newBenchedStake)) } diff --git a/snow/networking/benchlist/benchlist_test.go b/snow/networking/benchlist/benchlist_test.go index 45568392297e..3a52be818f75 100644 --- a/snow/networking/benchlist/benchlist_test.go +++ b/snow/networking/benchlist/benchlist_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" @@ -49,6 +50,7 @@ func TestBenchlistAdd(t *testing.T) { minimumFailingDuration, duration, maxPortion, + prometheus.NewRegistry(), ) require.NoError(err) b := benchIntf.(*benchlist) @@ -173,6 +175,7 @@ func TestBenchlistMaxStake(t *testing.T) { minimumFailingDuration, duration, maxPortion, + prometheus.NewRegistry(), ) require.NoError(err) b := benchIntf.(*benchlist) @@ -295,6 +298,7 @@ func TestBenchlistRemove(t *testing.T) { minimumFailingDuration, duration, maxPortion, + prometheus.NewRegistry(), ) require.NoError(err) b := benchIntf.(*benchlist) diff --git a/snow/networking/benchlist/manager.go b/snow/networking/benchlist/manager.go index e6ac45da4400..e19c54410447 100644 --- a/snow/networking/benchlist/manager.go +++ b/snow/networking/benchlist/manager.go @@ -7,6 +7,7 @@ import ( "sync" "time" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" @@ -39,12 +40,13 @@ type Manager interface { // Config defines the configuration for a benchlist type Config struct { - Benchable Benchable `json:"-"` - Validators validators.Manager `json:"-"` - Threshold int `json:"threshold"` - MinimumFailingDuration time.Duration `json:"minimumFailingDuration"` - Duration time.Duration `json:"duration"` - MaxPortion float64 `json:"maxPortion"` + Benchable Benchable `json:"-"` + Validators validators.Manager `json:"-"` + BenchlistRegisterer metrics.MultiGatherer `json:"-"` + Threshold int `json:"threshold"` + MinimumFailingDuration time.Duration `json:"minimumFailingDuration"` + Duration time.Duration `json:"duration"` + MaxPortion float64 `json:"maxPortion"` } type manager struct { @@ -108,6 +110,14 @@ func (m *manager) RegisterChain(ctx *snow.ConsensusContext) error { return nil } + reg, err := metrics.MakeAndRegister( + m.config.BenchlistRegisterer, + ctx.PrimaryAlias, + ) + if err != nil { + return err + } + benchlist, err := NewBenchlist( ctx, m.config.Benchable, @@ -116,6 +126,7 @@ func (m *manager) RegisterChain(ctx *snow.ConsensusContext) error { m.config.MinimumFailingDuration, m.config.Duration, m.config.MaxPortion, + reg, ) if err != nil { return err diff --git a/snow/networking/benchlist/metrics.go b/snow/networking/benchlist/metrics.go deleted file mode 100644 index 25f9e50f7da8..000000000000 --- a/snow/networking/benchlist/metrics.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package benchlist - -import ( - "fmt" - - "github.com/prometheus/client_golang/prometheus" -) - -type metrics struct { - numBenched, weightBenched prometheus.Gauge -} - -func (m *metrics) Initialize(registerer prometheus.Registerer) error { - m.numBenched = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "benchlist", - Name: "benched_num", - Help: "Number of currently benched validators", - }) - if err := registerer.Register(m.numBenched); err != nil { - return fmt.Errorf("failed to register num benched statistics due to %w", err) - } - - m.weightBenched = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "benchlist", - Name: "benched_weight", - Help: "Weight of currently benched validators", - }) - if err := registerer.Register(m.weightBenched); err != nil { - return fmt.Errorf("failed to register weight benched statistics due to %w", err) - } - - return nil -} diff --git a/snow/networking/handler/handler.go b/snow/networking/handler/handler.go index 9388d2d66be6..1eb42ca0dcdc 100644 --- a/snow/networking/handler/handler.go +++ b/snow/networking/handler/handler.go @@ -140,6 +140,7 @@ func New( subnet subnets.Subnet, peerTracker commontracker.Peers, p2pTracker *p2p.PeerTracker, + reg prometheus.Registerer, ) (Handler, error) { h := &handler{ ctx: ctx, @@ -160,7 +161,7 @@ func New( var err error - h.metrics, err = newMetrics("handler", h.ctx.Registerer) + h.metrics, err = newMetrics(reg) if err != nil { return nil, fmt.Errorf("initializing handler metrics errored with: %w", err) } @@ -170,8 +171,8 @@ func New( h.ctx.SubnetID, h.validators, cpuTracker, - "handler", - h.ctx.Registerer, + "sync", + reg, ) if err != nil { return nil, fmt.Errorf("initializing sync message queue errored with: %w", err) @@ -181,8 +182,8 @@ func New( h.ctx.SubnetID, h.validators, cpuTracker, - "handler_async", - h.ctx.Registerer, + "async", + reg, ) if err != nil { return nil, fmt.Errorf("initializing async message queue errored with: %w", err) diff --git a/snow/networking/handler/handler_test.go b/snow/networking/handler/handler_test.go index e8ab5f85ebb1..cb24040643f3 100644 --- a/snow/networking/handler/handler_test.go +++ b/snow/networking/handler/handler_test.go @@ -77,6 +77,7 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) handler := handlerIntf.(*handler) @@ -183,6 +184,7 @@ func TestHandlerClosesOnError(t *testing.T) { subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) handler := handlerIntf.(*handler) @@ -285,6 +287,7 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) handler := handlerIntf.(*handler) @@ -375,6 +378,7 @@ func TestHandlerDispatchInternal(t *testing.T) { subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -460,6 +464,7 @@ func TestHandlerSubnetConnector(t *testing.T) { subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -641,6 +646,7 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { subnets.New(ids.EmptyNodeID, subnets.Config{}), commontracker.NewPeers(), peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -723,6 +729,7 @@ func TestHandlerStartError(t *testing.T) { subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) diff --git a/snow/networking/handler/health_test.go b/snow/networking/handler/health_test.go index 163332735ea2..789d3464187e 100644 --- a/snow/networking/handler/health_test.go +++ b/snow/networking/handler/health_test.go @@ -93,6 +93,7 @@ func TestHealthCheckSubnet(t *testing.T) { sb, peerTracker, p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) diff --git a/snow/networking/handler/metrics.go b/snow/networking/handler/metrics.go index 9cd6c9ec4096..f3a21149f26c 100644 --- a/snow/networking/handler/metrics.go +++ b/snow/networking/handler/metrics.go @@ -16,36 +16,32 @@ type metrics struct { messageHandlingTime *prometheus.GaugeVec // op } -func newMetrics(namespace string, reg prometheus.Registerer) (*metrics, error) { +func newMetrics(reg prometheus.Registerer) (*metrics, error) { m := &metrics{ expired: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "expired", - Help: "messages dropped because the deadline expired", + Name: "expired", + Help: "messages dropped because the deadline expired", }, opLabels, ), messages: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "messages", - Help: "messages handled", + Name: "messages", + Help: "messages handled", }, opLabels, ), messageHandlingTime: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "message_handling_time", - Help: "time spent handling messages", + Name: "message_handling_time", + Help: "time spent handling messages", }, opLabels, ), lockingTime: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "locking_time", - Help: "time spent acquiring the context lock", + Name: "locking_time", + Help: "time spent acquiring the context lock", }), } return m, utils.Err( diff --git a/snow/networking/router/chain_router.go b/snow/networking/router/chain_router.go index 8d471fb768c6..27bf891ab4f9 100644 --- a/snow/networking/router/chain_router.go +++ b/snow/networking/router/chain_router.go @@ -101,8 +101,7 @@ func (cr *ChainRouter) Initialize( trackedSubnets set.Set[ids.ID], onFatal func(exitCode int), healthConfig HealthConfig, - metricsNamespace string, - metricsRegisterer prometheus.Registerer, + reg prometheus.Registerer, ) error { cr.log = log cr.chainHandlers = make(map[ids.ID]handler.Handler) @@ -126,7 +125,7 @@ func (cr *ChainRouter) Initialize( cr.peers[nodeID] = myself // Register metrics - rMetrics, err := newRouterMetrics(metricsNamespace, metricsRegisterer) + rMetrics, err := newRouterMetrics(reg) if err != nil { return err } diff --git a/snow/networking/router/chain_router_metrics.go b/snow/networking/router/chain_router_metrics.go index bc8f26223586..8855acc5ccdf 100644 --- a/snow/networking/router/chain_router_metrics.go +++ b/snow/networking/router/chain_router_metrics.go @@ -16,27 +16,24 @@ type routerMetrics struct { droppedRequests prometheus.Counter } -func newRouterMetrics(namespace string, registerer prometheus.Registerer) (*routerMetrics, error) { +func newRouterMetrics(registerer prometheus.Registerer) (*routerMetrics, error) { rMetrics := &routerMetrics{} rMetrics.outstandingRequests = prometheus.NewGauge( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "outstanding", - Help: "Number of outstanding requests (all types)", + Name: "outstanding", + Help: "Number of outstanding requests (all types)", }, ) rMetrics.longestRunningRequest = prometheus.NewGauge( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "longest_running", - Help: "Time (in ns) the longest request took", + Name: "longest_running", + Help: "Time (in ns) the longest request took", }, ) rMetrics.droppedRequests = prometheus.NewCounter( prometheus.CounterOpts{ - Namespace: namespace, - Name: "dropped", - Help: "Number of dropped requests (all types)", + Name: "dropped", + Help: "Number of dropped requests (all types)", }, ) diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index c17360f02486..19b889cd2d94 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -61,7 +61,7 @@ func TestShutdown(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist, - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -80,7 +80,6 @@ func TestShutdown(t *testing.T) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -114,6 +113,7 @@ func TestShutdown(t *testing.T) { subnets.New(chainCtx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -200,7 +200,6 @@ func TestShutdownTimesOut(t *testing.T) { vdrs := validators.NewManager() require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() - metrics := prometheus.NewRegistry() // Ensure that the Ancestors request does not timeout tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -211,8 +210,8 @@ func TestShutdownTimesOut(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist, - "", - metrics, + prometheus.NewRegistry(), + prometheus.NewRegistry(), ) require.NoError(err) @@ -231,8 +230,7 @@ func TestShutdownTimesOut(t *testing.T) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", - metrics, + prometheus.NewRegistry(), )) resourceTracker, err := tracker.NewResourceTracker( @@ -263,6 +261,7 @@ func TestShutdownTimesOut(t *testing.T) { subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -360,7 +359,7 @@ func TestRouterTimeout(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist.NewNoBenchlist(), - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -380,7 +379,6 @@ func TestRouterTimeout(t *testing.T) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) defer chainRouter.Shutdown(context.Background()) @@ -433,6 +431,7 @@ func TestRouterTimeout(t *testing.T) { subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -729,7 +728,7 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist.NewNoBenchlist(), - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -749,7 +748,6 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) defer chainRouter.Shutdown(context.Background()) @@ -954,7 +952,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist.NewNoBenchlist(), - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -974,7 +972,6 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) defer chainRouter.Shutdown(context.Background()) @@ -1017,6 +1014,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { sb, commontracker.NewPeers(), p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -1115,7 +1113,7 @@ func TestConnectedSubnet(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist.NewNoBenchlist(), - "timeoutManager", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -1140,7 +1138,6 @@ func TestConnectedSubnet(t *testing.T) { trackedSubnets, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -1232,7 +1229,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist.NewNoBenchlist(), - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -1252,7 +1249,6 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) defer chainRouter.Shutdown(context.Background()) @@ -1299,6 +1295,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { sb, commontracker.NewPeers(), p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -1582,7 +1579,7 @@ func newChainRouterTest(t *testing.T) (*ChainRouter, *common.EngineTest) { TimeoutHalflife: 5 * time.Minute, }, benchlist.NewNoBenchlist(), - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(t, err) @@ -1601,7 +1598,6 @@ func newChainRouterTest(t *testing.T) (*ChainRouter, *common.EngineTest) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -1639,6 +1635,7 @@ func newChainRouterTest(t *testing.T) (*ChainRouter, *common.EngineTest) { subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, + prometheus.NewRegistry(), ) require.NoError(t, err) diff --git a/snow/networking/router/mock_router.go b/snow/networking/router/mock_router.go index c9146a777138..548b32110775 100644 --- a/snow/networking/router/mock_router.go +++ b/snow/networking/router/mock_router.go @@ -125,17 +125,17 @@ func (mr *MockRouterMockRecorder) HealthCheck(arg0 any) *gomock.Call { } // Initialize mocks base method. -func (m *MockRouter) Initialize(nodeID ids.NodeID, log logging.Logger, timeouts timeout.Manager, shutdownTimeout time.Duration, criticalChains set.Set[ids.ID], sybilProtectionEnabled bool, trackedSubnets set.Set[ids.ID], onFatal func(int), healthConfig HealthConfig, metricsNamespace string, metricsRegisterer prometheus.Registerer) error { +func (m *MockRouter) Initialize(nodeID ids.NodeID, log logging.Logger, timeouts timeout.Manager, shutdownTimeout time.Duration, criticalChains set.Set[ids.ID], sybilProtectionEnabled bool, trackedSubnets set.Set[ids.ID], onFatal func(int), healthConfig HealthConfig, reg prometheus.Registerer) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Initialize", nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, metricsNamespace, metricsRegisterer) + ret := m.ctrl.Call(m, "Initialize", nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, reg) ret0, _ := ret[0].(error) return ret0 } // Initialize indicates an expected call of Initialize. -func (mr *MockRouterMockRecorder) Initialize(nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, metricsNamespace, metricsRegisterer any) *gomock.Call { +func (mr *MockRouterMockRecorder) Initialize(nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, reg any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockRouter)(nil).Initialize), nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, metricsNamespace, metricsRegisterer) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockRouter)(nil).Initialize), nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, reg) } // RegisterRequest mocks base method. diff --git a/snow/networking/router/router.go b/snow/networking/router/router.go index 4df5614c25fb..ef4765cb0965 100644 --- a/snow/networking/router/router.go +++ b/snow/networking/router/router.go @@ -36,8 +36,7 @@ type Router interface { trackedSubnets set.Set[ids.ID], onFatal func(exitCode int), healthConfig HealthConfig, - metricsNamespace string, - metricsRegisterer prometheus.Registerer, + reg prometheus.Registerer, ) error Shutdown(context.Context) AddChain(ctx context.Context, chain handler.Handler) diff --git a/snow/networking/router/traced_router.go b/snow/networking/router/traced_router.go index 4c52bce0827a..cbd2b6ed1205 100644 --- a/snow/networking/router/traced_router.go +++ b/snow/networking/router/traced_router.go @@ -47,8 +47,7 @@ func (r *tracedRouter) Initialize( trackedSubnets set.Set[ids.ID], onFatal func(exitCode int), healthConfig HealthConfig, - metricsNamespace string, - metricsRegisterer prometheus.Registerer, + reg prometheus.Registerer, ) error { return r.router.Initialize( nodeID, @@ -60,8 +59,7 @@ func (r *tracedRouter) Initialize( trackedSubnets, onFatal, healthConfig, - metricsNamespace, - metricsRegisterer, + reg, ) } diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 9453f43e4faa..34f138f6db21 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -58,7 +58,7 @@ func TestTimeout(t *testing.T) { TimeoutCoefficient: 1.25, }, benchlist, - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -85,7 +85,6 @@ func TestTimeout(t *testing.T) { set.Set[ids.ID]{}, nil, router.HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -133,6 +132,7 @@ func TestTimeout(t *testing.T) { subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -334,7 +334,7 @@ func TestReliableMessages(t *testing.T) { TimeoutCoefficient: 1.25, }, benchlist, - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -362,7 +362,6 @@ func TestReliableMessages(t *testing.T) { set.Set[ids.ID]{}, nil, router.HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -410,6 +409,7 @@ func TestReliableMessages(t *testing.T) { subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -491,7 +491,7 @@ func TestReliableMessagesToMyself(t *testing.T) { TimeoutCoefficient: 1.25, }, benchlist, - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -519,7 +519,6 @@ func TestReliableMessagesToMyself(t *testing.T) { set.Set[ids.ID]{}, nil, router.HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -567,6 +566,7 @@ func TestReliableMessagesToMyself(t *testing.T) { subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) diff --git a/snow/networking/timeout/manager.go b/snow/networking/timeout/manager.go index 89a7cc56d869..573dbe712bc5 100644 --- a/snow/networking/timeout/manager.go +++ b/snow/networking/timeout/manager.go @@ -71,27 +71,33 @@ type Manager interface { func NewManager( timeoutConfig *timer.AdaptiveTimeoutConfig, benchlistMgr benchlist.Manager, - metricsNamespace string, - metricsRegister prometheus.Registerer, + requestReg prometheus.Registerer, + responseReg prometheus.Registerer, ) (Manager, error) { tm, err := timer.NewAdaptiveTimeoutManager( timeoutConfig, - metricsNamespace, - metricsRegister, + requestReg, ) if err != nil { return nil, fmt.Errorf("couldn't create timeout manager: %w", err) } + + m, err := newTimeoutMetrics(responseReg) + if err != nil { + return nil, fmt.Errorf("couldn't create timeout metrics: %w", err) + } + return &manager{ - benchlistMgr: benchlistMgr, tm: tm, + benchlistMgr: benchlistMgr, + metrics: m, }, nil } type manager struct { tm timer.AdaptiveTimeoutManager benchlistMgr benchlist.Manager - metrics metrics + metrics *timeoutMetrics stopOnce sync.Once } diff --git a/snow/networking/timeout/manager_test.go b/snow/networking/timeout/manager_test.go index 49a05f78d8d8..d6109002f615 100644 --- a/snow/networking/timeout/manager_test.go +++ b/snow/networking/timeout/manager_test.go @@ -27,7 +27,7 @@ func TestManagerFire(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist, - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(t, err) diff --git a/snow/networking/timeout/metrics.go b/snow/networking/timeout/metrics.go index 101bda856255..3f217d5f7ad7 100644 --- a/snow/networking/timeout/metrics.go +++ b/snow/networking/timeout/metrics.go @@ -4,7 +4,6 @@ package timeout import ( - "fmt" "sync" "time" @@ -17,83 +16,61 @@ import ( ) const ( - responseNamespace = "response" - opLabel = "op" + chainLabel = "chain" + opLabel = "op" ) -var opLabels = []string{opLabel} +var opLabels = []string{chainLabel, opLabel} -type metrics struct { - lock sync.Mutex - chainToMetrics map[ids.ID]*chainMetrics -} - -func (m *metrics) RegisterChain(ctx *snow.ConsensusContext) error { - m.lock.Lock() - defer m.lock.Unlock() +type timeoutMetrics struct { + messages *prometheus.CounterVec // chain + op + messageLatencies *prometheus.GaugeVec // chain + op - if m.chainToMetrics == nil { - m.chainToMetrics = map[ids.ID]*chainMetrics{} - } - if _, exists := m.chainToMetrics[ctx.ChainID]; exists { - return fmt.Errorf("chain %s has already been registered", ctx.ChainID) - } - cm, err := newChainMetrics(ctx.Registerer) - if err != nil { - return fmt.Errorf("couldn't create metrics for chain %s: %w", ctx.ChainID, err) - } - m.chainToMetrics[ctx.ChainID] = cm - return nil + lock sync.RWMutex + chainIDToAlias map[ids.ID]string } -// Record that a response of type [op] took [latency] -func (m *metrics) Observe(chainID ids.ID, op message.Op, latency time.Duration) { - m.lock.Lock() - defer m.lock.Unlock() - - cm, exists := m.chainToMetrics[chainID] - if !exists { - // TODO should this log an error? - return - } - cm.observe(op, latency) -} - -// chainMetrics contains message response time metrics for a chain -type chainMetrics struct { - messages *prometheus.CounterVec // op - messageLatencies *prometheus.GaugeVec // op -} - -func newChainMetrics(reg prometheus.Registerer) (*chainMetrics, error) { - cm := &chainMetrics{ +func newTimeoutMetrics(reg prometheus.Registerer) (*timeoutMetrics, error) { + m := &timeoutMetrics{ messages: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: responseNamespace, - Name: "messages", - Help: "number of responses", + Name: "messages", + Help: "number of responses", }, opLabels, ), messageLatencies: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: responseNamespace, - Name: "message_latencies", - Help: "message latencies (ns)", + Name: "message_latencies", + Help: "message latencies (ns)", }, opLabels, ), + chainIDToAlias: make(map[ids.ID]string), } - return cm, utils.Err( - reg.Register(cm.messages), - reg.Register(cm.messageLatencies), + return m, utils.Err( + reg.Register(m.messages), + reg.Register(m.messageLatencies), ) } -func (cm *chainMetrics) observe(op message.Op, latency time.Duration) { +func (m *timeoutMetrics) RegisterChain(ctx *snow.ConsensusContext) error { + m.lock.Lock() + defer m.lock.Unlock() + + m.chainIDToAlias[ctx.ChainID] = ctx.PrimaryAlias + return nil +} + +// Record that a response of type [op] took [latency] +func (m *timeoutMetrics) Observe(chainID ids.ID, op message.Op, latency time.Duration) { + m.lock.RLock() + defer m.lock.RUnlock() + labels := prometheus.Labels{ - opLabel: op.String(), + chainLabel: m.chainIDToAlias[chainID], + opLabel: op.String(), } - cm.messages.With(labels).Inc() - cm.messageLatencies.With(labels).Add(float64(latency)) + m.messages.With(labels).Inc() + m.messageLatencies.With(labels).Add(float64(latency)) } diff --git a/snow/networking/tracker/resource_tracker.go b/snow/networking/tracker/resource_tracker.go index 7b480d242551..d8f5da99192f 100644 --- a/snow/networking/tracker/resource_tracker.go +++ b/snow/networking/tracker/resource_tracker.go @@ -218,7 +218,7 @@ func NewResourceTracker( meters: linked.NewHashmap[ids.NodeID, meter.Meter](), } var err error - t.metrics, err = newCPUTrackerMetrics("resource_tracker", reg) + t.metrics, err = newCPUTrackerMetrics(reg) if err != nil { return nil, fmt.Errorf("initializing resourceTracker metrics errored with: %w", err) } @@ -293,32 +293,27 @@ type trackerMetrics struct { diskSpaceAvailable prometheus.Gauge } -func newCPUTrackerMetrics(namespace string, reg prometheus.Registerer) (*trackerMetrics, error) { +func newCPUTrackerMetrics(reg prometheus.Registerer) (*trackerMetrics, error) { m := &trackerMetrics{ processingTimeMetric: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "processing_time", - Help: "Tracked processing time over all nodes. Value expected to be in [0, number of CPU cores], but can go higher due to IO bound processes and thread multiplexing", + Name: "processing_time", + Help: "Tracked processing time over all nodes. Value expected to be in [0, number of CPU cores], but can go higher due to IO bound processes and thread multiplexing", }), cpuMetric: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "cpu_usage", - Help: "CPU usage tracked by the resource manager. Value should be in [0, number of CPU cores]", + Name: "cpu_usage", + Help: "CPU usage tracked by the resource manager. Value should be in [0, number of CPU cores]", }), diskReadsMetric: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "disk_reads", - Help: "Disk reads (bytes/sec) tracked by the resource manager", + Name: "disk_reads", + Help: "Disk reads (bytes/sec) tracked by the resource manager", }), diskWritesMetric: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "disk_writes", - Help: "Disk writes (bytes/sec) tracked by the resource manager", + Name: "disk_writes", + Help: "Disk writes (bytes/sec) tracked by the resource manager", }), diskSpaceAvailable: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "disk_available_space", - Help: "Available space remaining (bytes) on the database volume", + Name: "disk_available_space", + Help: "Available space remaining (bytes) on the database volume", }), } err := utils.Err( diff --git a/snow/snowtest/snowtest.go b/snow/snowtest/snowtest.go index 86374f766514..3cacc8e873bf 100644 --- a/snow/snowtest/snowtest.go +++ b/snow/snowtest/snowtest.go @@ -40,6 +40,7 @@ func (noOpAcceptor) Accept(*snow.ConsensusContext, ids.ID, []byte) error { func ConsensusContext(ctx *snow.Context) *snow.ConsensusContext { return &snow.ConsensusContext{ Context: ctx, + PrimaryAlias: ctx.ChainID.String(), Registerer: prometheus.NewRegistry(), BlockAcceptor: noOpAcceptor{}, TxAcceptor: noOpAcceptor{}, @@ -89,7 +90,7 @@ func Context(tb testing.TB, chainID ids.ID) *snow.Context { Log: logging.NoLog{}, BCLookup: aliaser, - Metrics: metrics.NewMultiGatherer(), + Metrics: metrics.NewPrefixGatherer(), ValidatorState: validatorState, ChainDataDir: "", diff --git a/tests/e2e/x/transfer/virtuous.go b/tests/e2e/x/transfer/virtuous.go index 35d6afe1b17e..10a2359e7f9e 100644 --- a/tests/e2e/x/transfer/virtuous.go +++ b/tests/e2e/x/transfer/virtuous.go @@ -9,8 +9,10 @@ import ( "math/rand" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/tests" @@ -28,10 +30,14 @@ import ( const ( totalRounds = 50 - xBlksProcessingMetric = "avalanche_X_blks_processing" - xBlksAcceptedMetric = "avalanche_X_blks_accepted_count" + blksProcessingMetric = "avalanche_snowman_blks_processing" + blksAcceptedMetric = "avalanche_snowman_blks_accepted_count" ) +var xChainMetricLabels = prometheus.Labels{ + chains.ChainLabel: "X", +} + // This test requires that the network not have ongoing blocks and // cannot reliably be run in parallel. var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { @@ -55,7 +61,7 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { require.NoError(err) for _, metrics := range allNodeMetrics { - xBlksProcessing, ok := tests.GetMetricValue(metrics, xBlksProcessingMetric, nil) + xBlksProcessing, ok := tests.GetMetricValue(metrics, blksProcessingMetric, xChainMetricLabels) if !ok || xBlksProcessing > 0 { return false } @@ -248,13 +254,13 @@ RECEIVER NEW BALANCE (AFTER) : %21d AVAX // +0 since X-chain tx must have been processed and accepted // by now - currentXBlksProcessing, _ := tests.GetMetricValue(mm, xBlksProcessingMetric, nil) - previousXBlksProcessing, _ := tests.GetMetricValue(prev, xBlksProcessingMetric, nil) + currentXBlksProcessing, _ := tests.GetMetricValue(mm, blksProcessingMetric, xChainMetricLabels) + previousXBlksProcessing, _ := tests.GetMetricValue(prev, blksProcessingMetric, xChainMetricLabels) require.Equal(currentXBlksProcessing, previousXBlksProcessing) // +1 since X-chain tx must have been accepted by now - currentXBlksAccepted, _ := tests.GetMetricValue(mm, xBlksAcceptedMetric, nil) - previousXBlksAccepted, _ := tests.GetMetricValue(prev, xBlksAcceptedMetric, nil) + currentXBlksAccepted, _ := tests.GetMetricValue(mm, blksAcceptedMetric, xChainMetricLabels) + previousXBlksAccepted, _ := tests.GetMetricValue(prev, blksAcceptedMetric, xChainMetricLabels) require.Equal(currentXBlksAccepted, previousXBlksAccepted+1) metricsBeforeTx[u] = mm diff --git a/utils/resource/metrics.go b/utils/resource/metrics.go index 3ce87ade258c..42d12f1ccc74 100644 --- a/utils/resource/metrics.go +++ b/utils/resource/metrics.go @@ -17,45 +17,40 @@ type metrics struct { numDiskWritesBytes *prometheus.GaugeVec } -func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, error) { +func newMetrics(registerer prometheus.Registerer) (*metrics, error) { m := &metrics{ numCPUCycles: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "num_cpu_cycles", - Help: "Total number of CPU cycles", + Name: "num_cpu_cycles", + Help: "Total number of CPU cycles", }, []string{"processID"}, ), numDiskReads: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "num_disk_reads", - Help: "Total number of disk reads", + Name: "num_disk_reads", + Help: "Total number of disk reads", }, []string{"processID"}, ), numDiskReadBytes: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "num_disk_read_bytes", - Help: "Total number of disk read bytes", + Name: "num_disk_read_bytes", + Help: "Total number of disk read bytes", }, []string{"processID"}, ), numDiskWrites: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "num_disk_writes", - Help: "Total number of disk writes", + Name: "num_disk_writes", + Help: "Total number of disk writes", }, []string{"processID"}, ), numDiskWritesBytes: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "num_disk_write_bytes", - Help: "Total number of disk write bytes", + Name: "num_disk_write_bytes", + Help: "Total number of disk write bytes", }, []string{"processID"}, ), diff --git a/utils/resource/usage.go b/utils/resource/usage.go index 32a9d1965c90..32ffbfe4aa85 100644 --- a/utils/resource/usage.go +++ b/utils/resource/usage.go @@ -94,7 +94,7 @@ func NewManager( diskHalflife time.Duration, metricsRegisterer prometheus.Registerer, ) (Manager, error) { - processMetrics, err := newMetrics("system_resources", metricsRegisterer) + processMetrics, err := newMetrics(metricsRegisterer) if err != nil { return nil, err } diff --git a/utils/timer/adaptive_timeout_manager.go b/utils/timer/adaptive_timeout_manager.go index 493769018ba2..5d8670bb56e2 100644 --- a/utils/timer/adaptive_timeout_manager.go +++ b/utils/timer/adaptive_timeout_manager.go @@ -92,8 +92,7 @@ type adaptiveTimeoutManager struct { func NewAdaptiveTimeoutManager( config *AdaptiveTimeoutConfig, - metricsNamespace string, - metricsRegister prometheus.Registerer, + reg prometheus.Registerer, ) (AdaptiveTimeoutManager, error) { switch { case config.InitialTimeout > config.MaximumTimeout: @@ -108,24 +107,20 @@ func NewAdaptiveTimeoutManager( tm := &adaptiveTimeoutManager{ networkTimeoutMetric: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: metricsNamespace, - Name: "current_timeout", - Help: "Duration of current network timeout in nanoseconds", + Name: "current_timeout", + Help: "Duration of current network timeout in nanoseconds", }), avgLatency: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: metricsNamespace, - Name: "average_latency", - Help: "Average network latency in nanoseconds", + Name: "average_latency", + Help: "Average network latency in nanoseconds", }), numTimeouts: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: metricsNamespace, - Name: "timeouts", - Help: "Number of timed out requests", + Name: "timeouts", + Help: "Number of timed out requests", }), numPendingTimeouts: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: metricsNamespace, - Name: "pending_timeouts", - Help: "Number of pending timeouts", + Name: "pending_timeouts", + Help: "Number of pending timeouts", }), minimumTimeout: config.MinimumTimeout, maximumTimeout: config.MaximumTimeout, @@ -139,10 +134,10 @@ func NewAdaptiveTimeoutManager( tm.averager = math.NewAverager(float64(config.InitialTimeout), config.TimeoutHalflife, tm.clock.Time()) err := utils.Err( - metricsRegister.Register(tm.networkTimeoutMetric), - metricsRegister.Register(tm.avgLatency), - metricsRegister.Register(tm.numTimeouts), - metricsRegister.Register(tm.numPendingTimeouts), + reg.Register(tm.networkTimeoutMetric), + reg.Register(tm.avgLatency), + reg.Register(tm.numTimeouts), + reg.Register(tm.numPendingTimeouts), ) return tm, err } diff --git a/utils/timer/adaptive_timeout_manager_test.go b/utils/timer/adaptive_timeout_manager_test.go index 5b725303f385..e522b525272e 100644 --- a/utils/timer/adaptive_timeout_manager_test.go +++ b/utils/timer/adaptive_timeout_manager_test.go @@ -83,7 +83,7 @@ func TestAdaptiveTimeoutManagerInit(t *testing.T) { } for _, test := range tests { - _, err := NewAdaptiveTimeoutManager(&test.config, "", prometheus.NewRegistry()) + _, err := NewAdaptiveTimeoutManager(&test.config, prometheus.NewRegistry()) require.ErrorIs(t, err, test.expectedErr) } } @@ -97,7 +97,6 @@ func TestAdaptiveTimeoutManager(t *testing.T) { TimeoutHalflife: 5 * time.Minute, TimeoutCoefficient: 1.25, }, - "", prometheus.NewRegistry(), ) require.NoError(t, err) diff --git a/vms/metervm/block_vm.go b/vms/metervm/block_vm.go index 6d951f344b2b..da64f9af01d2 100644 --- a/vms/metervm/block_vm.go +++ b/vms/metervm/block_vm.go @@ -8,7 +8,6 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -32,10 +31,14 @@ type blockVM struct { ssVM block.StateSyncableVM blockMetrics - clock mockable.Clock + registry prometheus.Registerer + clock mockable.Clock } -func NewBlockVM(vm block.ChainVM) block.ChainVM { +func NewBlockVM( + vm block.ChainVM, + reg prometheus.Registerer, +) block.ChainVM { buildBlockVM, _ := vm.(block.BuildBlockWithContextChainVM) batchedVM, _ := vm.(block.BatchedChainVM) ssVM, _ := vm.(block.StateSyncableVM) @@ -44,6 +47,7 @@ func NewBlockVM(vm block.ChainVM) block.ChainVM { buildBlockVM: buildBlockVM, batchedVM: batchedVM, ssVM: ssVM, + registry: reg, } } @@ -58,26 +62,16 @@ func (vm *blockVM) Initialize( fxs []*common.Fx, appSender common.AppSender, ) error { - registerer := prometheus.NewRegistry() err := vm.blockMetrics.Initialize( vm.buildBlockVM != nil, vm.batchedVM != nil, vm.ssVM != nil, - registerer, + vm.registry, ) if err != nil { return err } - multiGatherer := metrics.NewMultiGatherer() - if err := chainCtx.Metrics.Register("metervm", registerer); err != nil { - return err - } - if err := chainCtx.Metrics.Register("", multiGatherer); err != nil { - return err - } - chainCtx.Metrics = multiGatherer - return vm.ChainVM.Initialize(ctx, chainCtx, db, genesisBytes, upgradeBytes, configBytes, toEngine, fxs, appSender) } diff --git a/vms/metervm/vertex_vm.go b/vms/metervm/vertex_vm.go index 7cbd47a67475..936a688de99d 100644 --- a/vms/metervm/vertex_vm.go +++ b/vms/metervm/vertex_vm.go @@ -8,7 +8,6 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" @@ -22,16 +21,21 @@ var ( _ snowstorm.Tx = (*meterTx)(nil) ) -func NewVertexVM(vm vertex.LinearizableVMWithEngine) vertex.LinearizableVMWithEngine { +func NewVertexVM( + vm vertex.LinearizableVMWithEngine, + reg prometheus.Registerer, +) vertex.LinearizableVMWithEngine { return &vertexVM{ LinearizableVMWithEngine: vm, + registry: reg, } } type vertexVM struct { vertex.LinearizableVMWithEngine vertexMetrics - clock mockable.Clock + registry prometheus.Registerer + clock mockable.Clock } func (vm *vertexVM) Initialize( @@ -45,20 +49,10 @@ func (vm *vertexVM) Initialize( fxs []*common.Fx, appSender common.AppSender, ) error { - registerer := prometheus.NewRegistry() - if err := vm.vertexMetrics.Initialize(registerer); err != nil { + if err := vm.vertexMetrics.Initialize(vm.registry); err != nil { return err } - multiGatherer := metrics.NewMultiGatherer() - if err := chainCtx.Metrics.Register("metervm", registerer); err != nil { - return err - } - if err := chainCtx.Metrics.Register("", multiGatherer); err != nil { - return err - } - chainCtx.Metrics = multiGatherer - return vm.LinearizableVMWithEngine.Initialize( ctx, chainCtx, diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index e6e645d74242..24fa707a8e32 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -1429,7 +1429,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { TimeoutCoefficient: 1.25, }, benchlist, - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -1453,7 +1453,6 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { set.Set[ids.ID]{}, nil, router.HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -1544,6 +1543,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { subnets.New(ctx.NodeID, subnets.Config{}), tracker.NewPeers(), peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) diff --git a/vms/proposervm/batched_vm_test.go b/vms/proposervm/batched_vm_test.go index a6e9ffb2b1d6..be134823c894 100644 --- a/vms/proposervm/batched_vm_test.go +++ b/vms/proposervm/batched_vm_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -868,6 +869,7 @@ func initTestRemoteProposerVM( NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) diff --git a/vms/proposervm/block_test.go b/vms/proposervm/block_test.go index d8c867058f52..12b18a75d681 100644 --- a/vms/proposervm/block_test.go +++ b/vms/proposervm/block_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -74,6 +75,7 @@ func TestPostForkCommonComponents_buildChild(t *testing.T) { DurangoTime: time.Unix(0, 0), StakingCertLeaf: &staking.Certificate{}, StakingLeafSigner: pk, + Registerer: prometheus.NewRegistry(), }, ChainVM: innerVM, blockBuilderVM: innerBlockBuilderVM, @@ -386,6 +388,7 @@ func TestPostDurangoBuildChildResetScheduler(t *testing.T) { DurangoTime: time.Unix(0, 0), StakingCertLeaf: &staking.Certificate{}, StakingLeafSigner: pk, + Registerer: prometheus.NewRegistry(), }, ChainVM: block.NewMockChainVM(ctrl), ctx: &snow.Context{ diff --git a/vms/proposervm/config.go b/vms/proposervm/config.go index a7eb4ff0db9b..296f6a60520c 100644 --- a/vms/proposervm/config.go +++ b/vms/proposervm/config.go @@ -7,6 +7,8 @@ import ( "crypto" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/staking" ) @@ -32,6 +34,9 @@ type Config struct { // Block certificate StakingCertLeaf *staking.Certificate + + // Registerer for prometheus metrics + Registerer prometheus.Registerer } func (c *Config) IsDurangoActivated(timestamp time.Time) bool { diff --git a/vms/proposervm/post_fork_option_test.go b/vms/proposervm/post_fork_option_test.go index 39c6434dddf6..43b7d5f5b90f 100644 --- a/vms/proposervm/post_fork_option_test.go +++ b/vms/proposervm/post_fork_option_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -548,6 +549,7 @@ func TestOptionTimestampValidity(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) diff --git a/vms/proposervm/state_syncable_vm_test.go b/vms/proposervm/state_syncable_vm_test.go index 4f44adc0bf74..479c311b5fed 100644 --- a/vms/proposervm/state_syncable_vm_test.go +++ b/vms/proposervm/state_syncable_vm_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -65,6 +66,7 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) diff --git a/vms/proposervm/vm.go b/vms/proposervm/vm.go index dfff407a03d2..4442aca65a9b 100644 --- a/vms/proposervm/vm.go +++ b/vms/proposervm/vm.go @@ -9,10 +9,8 @@ import ( "fmt" "time" - "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" - "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" "github.com/ava-labs/avalanchego/database" @@ -130,21 +128,9 @@ func (vm *VM) Initialize( fxs []*common.Fx, appSender common.AppSender, ) error { - // TODO: Add a helper for this metrics override, it is performed in multiple - // places. - registerer := prometheus.NewRegistry() - if err := chainCtx.Metrics.Register("proposervm", registerer); err != nil { - return err - } - multiGatherer := metrics.NewMultiGatherer() - if err := chainCtx.Metrics.Register("", multiGatherer); err != nil { - return err - } - chainCtx.Metrics = multiGatherer - vm.ctx = chainCtx vm.db = versiondb.New(prefixdb.New(dbPrefix, db)) - baseState, err := state.NewMetered(vm.db, "state", registerer) + baseState, err := state.NewMetered(vm.db, "state", vm.Config.Registerer) if err != nil { return err } @@ -153,7 +139,7 @@ func (vm *VM) Initialize( vm.Tree = tree.New() innerBlkCache, err := metercacher.New( "inner_block_cache", - registerer, + vm.Config.Registerer, cache.NewSizedLRU( innerBlkCacheSize, cachedBlockSize, diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index fb717c203f7f..a2536375d48c 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -132,6 +133,7 @@ func initTestProposerVM( NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -812,6 +814,7 @@ func TestExpiredBuildBlock(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -1128,6 +1131,7 @@ func TestInnerVMRollback(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -1206,6 +1210,7 @@ func TestInnerVMRollback(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -1608,6 +1613,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -1779,6 +1785,7 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -1913,6 +1920,7 @@ func TestVMInnerBlkCache(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -2123,6 +2131,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -2324,6 +2333,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -2415,6 +2425,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { NumHistoricalBlocks: numHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -2459,6 +2470,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { NumHistoricalBlocks: newNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) diff --git a/vms/rpcchainvm/vm_client.go b/vms/rpcchainvm/vm_client.go index 038a728c0ffe..6e6417725f11 100644 --- a/vms/rpcchainvm/vm_client.go +++ b/vms/rpcchainvm/vm_client.go @@ -18,6 +18,7 @@ import ( "google.golang.org/protobuf/types/known/emptypb" "github.com/ava-labs/avalanchego/api/keystore/gkeystore" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/chains/atomic/gsharedmemory" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/rpcdb" @@ -135,15 +136,19 @@ func (vm *VMClient) Initialize( } // Register metrics - registerer := prometheus.NewRegistry() - vm.grpcServerMetrics = grpc_prometheus.NewServerMetrics() - if err := registerer.Register(vm.grpcServerMetrics); err != nil { + serverReg, err := metrics.MakeAndRegister( + chainCtx.Metrics, + "rpcchainvm", + ) + if err != nil { return err } - if err := chainCtx.Metrics.Register("rpcchainvm", registerer); err != nil { + vm.grpcServerMetrics = grpc_prometheus.NewServerMetrics() + if err := serverReg.Register(vm.grpcServerMetrics); err != nil { return err } - if err := chainCtx.Metrics.Register("", vm); err != nil { + + if err := chainCtx.Metrics.Register("plugin", vm); err != nil { return err } @@ -225,7 +230,7 @@ func (vm *VMClient) Initialize( } vm.State, err = chain.NewMeteredState( - registerer, + serverReg, &chain.Config{ DecidedCacheSize: decidedCacheSize, MissingCacheSize: missingCacheSize, diff --git a/vms/rpcchainvm/vm_server.go b/vms/rpcchainvm/vm_server.go index 67a55187426a..b33fd3e5b5fe 100644 --- a/vms/rpcchainvm/vm_server.go +++ b/vms/rpcchainvm/vm_server.go @@ -72,9 +72,9 @@ type VMServer struct { allowShutdown *utils.Atomic[bool] - processMetrics prometheus.Gatherer - db database.Database - log logging.Logger + metrics prometheus.Gatherer + db database.Database + log logging.Logger serverCloser grpcutils.ServerCloser connCloser wrappers.Closer @@ -125,28 +125,47 @@ func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) return nil, err } - registerer := prometheus.NewRegistry() + pluginMetrics := metrics.NewPrefixGatherer() + vm.metrics = pluginMetrics + + processMetrics, err := metrics.MakeAndRegister( + pluginMetrics, + "process", + ) + if err != nil { + return nil, err + } // Current state of process metrics processCollector := collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}) - if err := registerer.Register(processCollector); err != nil { + if err := processMetrics.Register(processCollector); err != nil { return nil, err } // Go process metrics using debug.GCStats goCollector := collectors.NewGoCollector() - if err := registerer.Register(goCollector); err != nil { + if err := processMetrics.Register(goCollector); err != nil { + return nil, err + } + + grpcMetrics, err := metrics.MakeAndRegister( + pluginMetrics, + "grpc", + ) + if err != nil { return nil, err } // gRPC client metrics grpcClientMetrics := grpc_prometheus.NewClientMetrics() - if err := registerer.Register(grpcClientMetrics); err != nil { + if err := grpcMetrics.Register(grpcClientMetrics); err != nil { return nil, err } - // Register metrics for each Go plugin processes - vm.processMetrics = registerer + vmMetrics := metrics.NewPrefixGatherer() + if err := pluginMetrics.Register("vm", vmMetrics); err != nil { + return nil, err + } // Dial the database dbClientConn, err := grpcutils.Dial( @@ -225,7 +244,7 @@ func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) Keystore: keystoreClient, SharedMemory: sharedMemoryClient, BCLookup: bcLookupClient, - Metrics: metrics.NewMultiGatherer(), + Metrics: vmMetrics, // Signs warp messages WarpSigner: warpSignerClient, @@ -567,22 +586,8 @@ func (vm *VMServer) AppGossip(ctx context.Context, req *vmpb.AppGossipMsg) (*emp } func (vm *VMServer) Gather(context.Context, *emptypb.Empty) (*vmpb.GatherResponse, error) { - // Gather metrics registered to snow context Gatherer. These - // metrics are defined by the underlying vm implementation. - mfs, err := vm.ctx.Metrics.Gather() - if err != nil { - return nil, err - } - - // Gather metrics registered by rpcchainvm server Gatherer. These - // metrics are collected for each Go plugin process. - pluginMetrics, err := vm.processMetrics.Gather() - if err != nil { - return nil, err - } - mfs = append(mfs, pluginMetrics...) - - return &vmpb.GatherResponse{MetricFamilies: mfs}, err + metrics, err := vm.metrics.Gather() + return &vmpb.GatherResponse{MetricFamilies: metrics}, err } func (vm *VMServer) GetAncestors(ctx context.Context, req *vmpb.GetAncestorsRequest) (*vmpb.GetAncestorsResponse, error) { From 59bc3cfd590a6dd65ba1fd3b245ad0bf9de0ac40 Mon Sep 17 00:00:00 2001 From: Tsachi Herman <24438559+tsachiherman@users.noreply.github.com> Date: Thu, 6 Jun 2024 14:25:04 -0400 Subject: [PATCH 051/102] Add proposervm slot metrics (#3048) Signed-off-by: Tsachi Herman <24438559+tsachiherman@users.noreply.github.com> --- vms/proposervm/block.go | 8 +++++++ vms/proposervm/block_test.go | 5 ++-- vms/proposervm/post_fork_block.go | 13 ++++++++++- vms/proposervm/vm.go | 39 ++++++++++++++++++++++++++++--- 4 files changed, 59 insertions(+), 6 deletions(-) diff --git a/vms/proposervm/block.go b/vms/proposervm/block.go index 464acb9fc8cd..d320a80543bc 100644 --- a/vms/proposervm/block.go +++ b/vms/proposervm/block.go @@ -369,7 +369,10 @@ func (p *postForkCommonComponents) verifyPostDurangoBlockDelay( currentSlot = proposer.TimeToSlot(parentTimestamp, blkTimestamp) proposerID = blk.Proposer() ) + // populate the slot for the block. + blk.slot = ¤tSlot + // find the expected proposer expectedProposerID, err := p.vm.Windower.ExpectedProposer( ctx, blkHeight, @@ -452,6 +455,11 @@ func (p *postForkCommonComponents) shouldBuildSignedBlockPostDurango( ) return false, err } + + // report the build slot to the metrics. + p.vm.proposerBuildSlotGauge.Set(float64(proposer.TimeToSlot(parentTimestamp, nextStartTime))) + + // set the scheduler to let us know when the next block need to be built. p.vm.Scheduler.SetBuildBlockTime(nextStartTime) // In case the inner VM only issued one pendingTxs message, we should diff --git a/vms/proposervm/block_test.go b/vms/proposervm/block_test.go index 12b18a75d681..d55a615537d0 100644 --- a/vms/proposervm/block_test.go +++ b/vms/proposervm/block_test.go @@ -396,8 +396,9 @@ func TestPostDurangoBuildChildResetScheduler(t *testing.T) { ValidatorState: vdrState, Log: logging.NoLog{}, }, - Windower: windower, - Scheduler: scheduler, + Windower: windower, + Scheduler: scheduler, + proposerBuildSlotGauge: prometheus.NewGauge(prometheus.GaugeOpts{}), } vm.Clock.Set(now) diff --git a/vms/proposervm/post_fork_block.go b/vms/proposervm/post_fork_block.go index 707b6dc327c7..2c875807eb79 100644 --- a/vms/proposervm/post_fork_block.go +++ b/vms/proposervm/post_fork_block.go @@ -17,6 +17,11 @@ var _ PostForkBlock = (*postForkBlock)(nil) type postForkBlock struct { block.SignedBlock postForkCommonComponents + + // slot of the proposer that produced this block. + // It is populated in verifyPostDurangoBlockDelay. + // It is used to report metrics during Accept. + slot *uint64 } // Accept: @@ -27,7 +32,13 @@ func (b *postForkBlock) Accept(ctx context.Context) error { if err := b.acceptOuterBlk(); err != nil { return err } - return b.acceptInnerBlk(ctx) + if err := b.acceptInnerBlk(ctx); err != nil { + return err + } + if b.slot != nil { + b.vm.acceptedBlocksSlotHistogram.Observe(float64(*b.slot)) + } + return nil } func (b *postForkBlock) acceptOuterBlk() error { diff --git a/vms/proposervm/vm.go b/vms/proposervm/vm.go index 4442aca65a9b..f5916fcf9f42 100644 --- a/vms/proposervm/vm.go +++ b/vms/proposervm/vm.go @@ -9,6 +9,7 @@ import ( "fmt" "time" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" @@ -22,6 +23,7 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -97,6 +99,14 @@ type VM struct { // lastAcceptedHeight is set to the last accepted PostForkBlock's height. lastAcceptedHeight uint64 + + // proposerBuildSlotGauge reports the slot index when this node may attempt + // to build a block. + proposerBuildSlotGauge prometheus.Gauge + + // acceptedBlocksSlotHistogram reports the slots that accepted blocks were + // proposed in. + acceptedBlocksSlotHistogram prometheus.Histogram } // New performs best when [minBlkDelay] is whole seconds. This is because block @@ -206,7 +216,28 @@ func (vm *VM) Initialize( default: return err } - return nil + + vm.proposerBuildSlotGauge = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "block_building_slot", + Help: "the slot that this node may attempt to build a block", + }) + vm.acceptedBlocksSlotHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "accepted_blocks_slot", + Help: "the slot accepted blocks were proposed in", + // define the following ranges: + // (-inf, 0] + // (0, 1] + // (1, 2] + // (2, inf) + // the usage of ".5" before was to ensure we work around the limitation + // of comparing floating point of the same numerical value. + Buckets: []float64{0.5, 1.5, 2.5}, + }) + + return utils.Err( + vm.Config.Registerer.Register(vm.proposerBuildSlotGauge), + vm.Config.Registerer.Register(vm.acceptedBlocksSlotHistogram), + ) } // shutdown ops then propagate shutdown to innerVM @@ -294,13 +325,15 @@ func (vm *VM) SetPreference(ctx context.Context, preferred ids.ID) error { ) if vm.IsDurangoActivated(parentTimestamp) { currentTime := vm.Clock.Time().Truncate(time.Second) - nextStartTime, err = vm.getPostDurangoSlotTime( + if nextStartTime, err = vm.getPostDurangoSlotTime( ctx, childBlockHeight, pChainHeight, proposer.TimeToSlot(parentTimestamp, currentTime), parentTimestamp, - ) + ); err == nil { + vm.proposerBuildSlotGauge.Set(float64(proposer.TimeToSlot(parentTimestamp, nextStartTime))) + } } else { nextStartTime, err = vm.getPreDurangoSlotTime( ctx, From 6caa655782a7c76ee1b106107529ac52a31a8c29 Mon Sep 17 00:00:00 2001 From: marun Date: Thu, 6 Jun 2024 20:28:59 +0200 Subject: [PATCH 052/102] [ci] Switch to gh workers for arm64 (#3090) --- .github/workflows/build-linux-binaries.yml | 2 +- .github/workflows/build-ubuntu-arm64-release.yml | 4 ++-- .github/workflows/ci.yml | 8 +++++++- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-linux-binaries.yml b/.github/workflows/build-linux-binaries.yml index 7739c679c8a9..bee07d0faa8c 100644 --- a/.github/workflows/build-linux-binaries.yml +++ b/.github/workflows/build-linux-binaries.yml @@ -71,7 +71,7 @@ jobs: rm -rf /tmp/avalanchego build-arm64-binaries-tarball: - runs-on: [self-hosted, linux, ARM64, focal] + runs-on: custom-arm64-focal steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/build-ubuntu-arm64-release.yml b/.github/workflows/build-ubuntu-arm64-release.yml index 9766705a2d15..9605a16dee9d 100644 --- a/.github/workflows/build-ubuntu-arm64-release.yml +++ b/.github/workflows/build-ubuntu-arm64-release.yml @@ -12,7 +12,7 @@ on: jobs: build-jammy-arm64-package: - runs-on: [self-hosted, linux, ARM64, jammy] + runs-on: custom-arm64-jammy steps: - uses: actions/checkout@v4 @@ -69,7 +69,7 @@ jobs: rm -rf /tmp/avalanchego build-focal-arm64-package: - runs-on: [self-hosted, linux, ARM64, focal] + runs-on: custom-arm64-focal steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b5b38796821a..7251dadcc951 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,7 +28,7 @@ jobs: strategy: fail-fast: false matrix: - os: [macos-12, ubuntu-20.04, ubuntu-22.04, windows-2022, [self-hosted, linux, ARM64, focal], [self-hosted, linux, ARM64, jammy]] + os: [macos-12, ubuntu-20.04, ubuntu-22.04, windows-2022, custom-arm64-focal, custom-arm64-jammy] steps: - uses: actions/checkout@v4 - uses: ./.github/actions/setup-go-for-project-v3 @@ -36,6 +36,12 @@ jobs: shell: bash if: matrix.os == 'windows-2022' run: echo "TIMEOUT=240s" >> $GITHUB_ENV + - name: Install build dependencies not available by default on custom-arm64-focal runners + shell: bash + if: matrix.os == 'custom-arm64-focal' + run: | + sudo apt update + sudo apt -y install build-essential - name: build_test shell: bash run: ./scripts/build_test.sh From c28af7dfd1a165116fcd668b1b472c002afe2259 Mon Sep 17 00:00:00 2001 From: marun Date: Thu, 6 Jun 2024 23:15:08 +0200 Subject: [PATCH 053/102] [ci] Ensure focal arm64 builds all have their required dependencies (#3091) --- .github/actions/install-focal-deps/action.yml | 16 ++++++++++++++++ .github/workflows/build-linux-binaries.yml | 2 ++ .github/workflows/build-ubuntu-arm64-release.yml | 1 + .github/workflows/ci.yml | 8 ++------ 4 files changed, 21 insertions(+), 6 deletions(-) create mode 100644 .github/actions/install-focal-deps/action.yml diff --git a/.github/actions/install-focal-deps/action.yml b/.github/actions/install-focal-deps/action.yml new file mode 100644 index 000000000000..0770ca11a6d6 --- /dev/null +++ b/.github/actions/install-focal-deps/action.yml @@ -0,0 +1,16 @@ +# This action installs dependencies missing from the default +# focal image used by arm64 github workers. +# +# TODO(marun): Find an image with the required dependencies already installed. + +name: 'Install focal arm64 dependencies' +description: 'Installs the dependencies required to build avalanchego on an arm64 github worker running Ubuntu 20.04 (focal)' + +runs: + using: composite + steps: + - name: Install build-essential + run: | + sudo apt update + sudo apt -y install build-essential + shell: bash diff --git a/.github/workflows/build-linux-binaries.yml b/.github/workflows/build-linux-binaries.yml index bee07d0faa8c..9f5cdfe97475 100644 --- a/.github/workflows/build-linux-binaries.yml +++ b/.github/workflows/build-linux-binaries.yml @@ -76,6 +76,8 @@ jobs: steps: - uses: actions/checkout@v4 + - uses: ./.github/actions/install-focal-deps + - uses: ./.github/actions/setup-go-for-project-v3 - run: go version diff --git a/.github/workflows/build-ubuntu-arm64-release.yml b/.github/workflows/build-ubuntu-arm64-release.yml index 9605a16dee9d..096137b1a2ef 100644 --- a/.github/workflows/build-ubuntu-arm64-release.yml +++ b/.github/workflows/build-ubuntu-arm64-release.yml @@ -73,6 +73,7 @@ jobs: steps: - uses: actions/checkout@v4 + - uses: ./.github/actions/install-focal-deps - uses: ./.github/actions/setup-go-for-project-v3 - run: go version diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7251dadcc951..7fc80756daed 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,17 +31,13 @@ jobs: os: [macos-12, ubuntu-20.04, ubuntu-22.04, windows-2022, custom-arm64-focal, custom-arm64-jammy] steps: - uses: actions/checkout@v4 + - uses: ./.github/actions/install-focal-deps + if: matrix.os == 'custom-arm64-focal' - uses: ./.github/actions/setup-go-for-project-v3 - name: Set timeout on Windows # Windows UT run slower and need a longer timeout shell: bash if: matrix.os == 'windows-2022' run: echo "TIMEOUT=240s" >> $GITHUB_ENV - - name: Install build dependencies not available by default on custom-arm64-focal runners - shell: bash - if: matrix.os == 'custom-arm64-focal' - run: | - sudo apt update - sudo apt -y install build-essential - name: build_test shell: bash run: ./scripts/build_test.sh From 504766e54aa0128da620679660ef35072cb165fa Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Mon, 10 Jun 2024 17:02:38 +0200 Subject: [PATCH 054/102] X-chain - consolidate tx creation in unit tests (#2736) Signed-off-by: Alberto Benegiamo Co-authored-by: Stephen Buttolph --- vms/avm/environment_test.go | 37 +- vms/avm/index_test.go | 43 +- vms/avm/service_test.go | 1093 +++++++++++++++++++------------- vms/avm/state_test.go | 16 +- vms/avm/txs/txstest/builder.go | 231 +++++++ vms/avm/txs/txstest/context.go | 25 + vms/avm/txs/txstest/utxos.go | 103 +++ vms/avm/vm_benchmark_test.go | 11 +- vms/avm/vm_regression_test.go | 133 ++-- vms/avm/vm_test.go | 620 +++++++----------- vms/avm/wallet_service_test.go | 16 +- 11 files changed, 1340 insertions(+), 988 deletions(-) create mode 100644 vms/avm/txs/txstest/builder.go create mode 100644 vms/avm/txs/txstest/context.go create mode 100644 vms/avm/txs/txstest/utxos.go diff --git a/vms/avm/environment_test.go b/vms/avm/environment_test.go index 52a76425e7ae..d4375aa092d1 100644 --- a/vms/avm/environment_test.go +++ b/vms/avm/environment_test.go @@ -24,7 +24,6 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" - "github.com/ava-labs/avalanchego/utils/linked" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -32,6 +31,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/txstest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -77,12 +77,6 @@ var ( keys = secp256k1.TestKeys()[:3] // TODO: Remove [:3] addrs []ids.ShortID // addrs[i] corresponds to keys[i] - - noFeesTestConfig = &config.Config{ - EUpgradeTime: mockable.MaxTime, - TxFee: 0, - CreateAssetTxFee: 0, - } ) func init() { @@ -110,13 +104,12 @@ type envConfig struct { } type environment struct { - genesisBytes []byte - genesisTx *txs.Tx - sharedMemory *atomic.Memory - issuer chan common.Message - vm *VM - service *Service - walletService *WalletService + genesisBytes []byte + genesisTx *txs.Tx + sharedMemory *atomic.Memory + issuer chan common.Message + vm *VM + txBuilder *txstest.Builder } // setup the testing environment @@ -210,13 +203,7 @@ func setup(tb testing.TB, c *envConfig) *environment { sharedMemory: m, issuer: issuer, vm: vm, - service: &Service{ - vm: vm, - }, - walletService: &WalletService{ - vm: vm, - pendingTxs: linked.NewHashmap[ids.ID, *txs.Tx](), - }, + txBuilder: txstest.New(vm.parser.Codec(), vm.ctx, &vm.Config, vm.feeAssetID, vm.state), } require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) @@ -230,6 +217,14 @@ func setup(tb testing.TB, c *envConfig) *environment { } require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + + tb.Cleanup(func() { + env.vm.ctx.Lock.Lock() + defer env.vm.ctx.Lock.Unlock() + + require.NoError(env.vm.Shutdown(context.Background())) + }) + return env } diff --git a/vms/avm/index_test.go b/vms/avm/index_test.go index 459d4230f32c..3d8614d5ee6d 100644 --- a/vms/avm/index_test.go +++ b/vms/avm/index_test.go @@ -4,7 +4,6 @@ package avm import ( - "context" "testing" "github.com/prometheus/client_golang/prometheus" @@ -28,13 +27,8 @@ import ( func TestIndexTransaction_Ordered(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, - }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{fork: durango}) + defer env.vm.ctx.Lock.Unlock() key := keys[0] addr := key.PublicKey().Address() @@ -72,13 +66,8 @@ func TestIndexTransaction_Ordered(t *testing.T) { func TestIndexTransaction_MultipleTransactions(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, - }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{fork: durango}) + defer env.vm.ctx.Lock.Unlock() addressTxMap := map[ids.ShortID]*txs.Tx{} txAssetID := avax.Asset{ID: env.genesisTx.ID()} @@ -120,13 +109,8 @@ func TestIndexTransaction_MultipleTransactions(t *testing.T) { func TestIndexTransaction_MultipleAddresses(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, - }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{fork: durango}) + defer env.vm.ctx.Lock.Unlock() addrs := make([]ids.ShortID, len(keys)) for i, key := range keys { @@ -163,13 +147,8 @@ func TestIndexTransaction_MultipleAddresses(t *testing.T) { func TestIndexer_Read(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, - }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{fork: durango}) + defer env.vm.ctx.Lock.Unlock() // generate test address and asset IDs assetID := ids.GenerateTestID() @@ -259,7 +238,7 @@ func buildUTXO(utxoID avax.UTXOID, txAssetID avax.Asset, addr ids.ShortID) *avax UTXOID: utxoID, Asset: txAssetID, Out: &secp256k1fx.TransferOutput{ - Amt: 1000, + Amt: startBalance, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, @@ -277,14 +256,14 @@ func buildTX(chainID ids.ID, utxoID avax.UTXOID, txAssetID avax.Asset, address . UTXOID: utxoID, Asset: txAssetID, In: &secp256k1fx.TransferInput{ - Amt: 1000, + Amt: startBalance, Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, Outs: []*avax.TransferableOutput{{ Asset: txAssetID, Out: &secp256k1fx.TransferOutput{ - Amt: 1000, + Amt: startBalance - testTxFee, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: address, diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index 677a0193ba97..d9dbb8db6f12 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -4,7 +4,6 @@ package avm import ( - "context" "encoding/json" "strings" "testing" @@ -28,8 +27,11 @@ import ( "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/block/executor" + "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -48,17 +50,12 @@ func TestServiceIssueTx(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - txArgs := &api.FormattedTx{} txReply := &api.JSONTxID{} - err := env.service.IssueTx(nil, txArgs, txReply) + err := service.IssueTx(nil, txArgs, txReply) require.ErrorIs(err, codec.ErrCantUnpackVersion) tx := newTx(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.parser, "AVAX") @@ -66,7 +63,7 @@ func TestServiceIssueTx(t *testing.T) { require.NoError(err) txArgs.Encoding = formatting.Hex txReply = &api.JSONTxID{} - require.NoError(env.service.IssueTx(nil, txArgs, txReply)) + require.NoError(service.IssueTx(nil, txArgs, txReply)) require.Equal(tx.ID(), txReply.TxID) } @@ -76,33 +73,28 @@ func TestServiceGetTxStatus(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - statusArgs := &api.JSONTxID{} statusReply := &GetTxStatusReply{} - err := env.service.GetTxStatus(nil, statusArgs, statusReply) + err := service.GetTxStatus(nil, statusArgs, statusReply) require.ErrorIs(err, errNilTxID) - newTx := newAvaxBaseTxWithOutputs(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.TxFee, env.vm.parser) + newTx := newAvaxBaseTxWithOutputs(t, env) txID := newTx.ID() statusArgs = &api.JSONTxID{ TxID: txID, } statusReply = &GetTxStatusReply{} - require.NoError(env.service.GetTxStatus(nil, statusArgs, statusReply)) + require.NoError(service.GetTxStatus(nil, statusArgs, statusReply)) require.Equal(choices.Unknown, statusReply.Status) issueAndAccept(require, env.vm, env.issuer, newTx) statusReply = &GetTxStatusReply{} - require.NoError(env.service.GetTxStatus(nil, statusArgs, statusReply)) + require.NoError(service.GetTxStatus(nil, statusArgs, statusReply)) require.Equal(choices.Accepted, statusReply.Status) } @@ -113,11 +105,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + service := &Service{vm: env.vm} assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() @@ -153,7 +141,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply := &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] require.Equal(uint64(1337), uint64(balanceReply.Balance)) require.Len(balanceReply.UTXOIDs, 1) @@ -164,7 +152,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Zero(balanceReply.Balance) require.Empty(balanceReply.UTXOIDs) @@ -200,7 +188,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply = &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] require.Equal(uint64(1337+1337), uint64(balanceReply.Balance)) require.Len(balanceReply.UTXOIDs, 2) @@ -211,7 +199,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Zero(balanceReply.Balance) require.Empty(balanceReply.UTXOIDs) @@ -249,7 +237,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply = &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] require.Equal(uint64(1337*3), uint64(balanceReply.Balance)) require.Len(balanceReply.UTXOIDs, 3) @@ -260,7 +248,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Zero(balanceReply.Balance) require.Empty(balanceReply.UTXOIDs) @@ -271,14 +259,11 @@ func TestServiceGetTxs(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) + service := &Service{vm: env.vm} + var err error env.vm.addressTxsIndexer, err = index.NewIndexer(env.vm.db, env.vm.ctx.Log, "", prometheus.NewRegistry(), false) require.NoError(err) - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() @@ -297,14 +282,14 @@ func TestServiceGetTxs(t *testing.T) { AssetID: assetID.String(), } getTxsReply := &GetAddressTxsReply{} - require.NoError(env.service.GetAddressTxs(nil, getTxsArgs, getTxsReply)) + require.NoError(service.GetAddressTxs(nil, getTxsArgs, getTxsReply)) require.Len(getTxsReply.TxIDs, 10) require.Equal(getTxsReply.TxIDs, testTxs[:10]) // get the second page getTxsArgs.Cursor = getTxsReply.Cursor getTxsReply = &GetAddressTxsReply{} - require.NoError(env.service.GetAddressTxs(nil, getTxsArgs, getTxsReply)) + require.NoError(service.GetAddressTxs(nil, getTxsArgs, getTxsReply)) require.Len(getTxsReply.TxIDs, 10) require.Equal(getTxsReply.TxIDs, testTxs[10:20]) } @@ -315,11 +300,7 @@ func TestServiceGetAllBalances(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + service := &Service{vm: env.vm} assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() @@ -353,7 +334,7 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply := &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] require.Len(reply.Balances, 1) require.Equal(assetID.String(), reply.Balances[0].AssetID) @@ -364,7 +345,7 @@ func TestServiceGetAllBalances(t *testing.T) { JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) require.Empty(reply.Balances) env.vm.ctx.Lock.Lock() @@ -397,7 +378,7 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] require.Len(reply.Balances, 1) require.Equal(assetID.String(), reply.Balances[0].AssetID) @@ -408,7 +389,7 @@ func TestServiceGetAllBalances(t *testing.T) { JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Empty(reply.Balances) @@ -444,7 +425,7 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] // The balance should include the UTXO since it is partly owned by [addr] require.Len(reply.Balances, 1) @@ -455,7 +436,7 @@ func TestServiceGetAllBalances(t *testing.T) { JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Empty(reply.Balances) @@ -489,7 +470,7 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] require.Len(reply.Balances, 2) gotAssetIDs := []string{reply.Balances[0].AssetID, reply.Balances[1].AssetID} @@ -504,7 +485,7 @@ func TestServiceGetAllBalances(t *testing.T) { JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] require.Empty(reply.Balances) } @@ -515,18 +496,13 @@ func TestServiceGetTx(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - txID := env.genesisTx.ID() reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: txID, Encoding: formatting.Hex, }, &reply)) @@ -545,18 +521,14 @@ func TestServiceGetTxJSON_BaseTx(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - newTx := newAvaxBaseTxWithOutputs(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.TxFee, env.vm.parser) + newTx := newAvaxBaseTxWithOutputs(t, env) issueAndAccept(require, env.vm, env.issuer, newTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: newTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -578,7 +550,19 @@ func TestServiceGetTxJSON_BaseTx(t *testing.T) { "addresses": [ "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" ], - "amount": 49000, + "amount": 1000, + "locktime": 0, + "threshold": 1 + } + }, + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1d6kkj0qh4wcmus3tk59npwt3rluc6en72ngurd" + ], + "amount": 48000, "locktime": 0, "threshold": 1 } @@ -630,18 +614,14 @@ func TestServiceGetTxJSON_ExportTx(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - newTx := newAvaxExportTxWithOutputs(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.TxFee, env.vm.parser) + newTx := buildTestExportTx(t, env, env.vm.ctx.CChainID) issueAndAccept(require, env.vm, env.issuer, newTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: newTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -654,7 +634,20 @@ func TestServiceGetTxJSON_ExportTx(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], "inputs": [ { "txID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", @@ -670,7 +663,7 @@ func TestServiceGetTxJSON_ExportTx(t *testing.T) { } ], "memo": "0x", - "destinationChain": "11111111111111111111111111111111LpoYY", + "destinationChain": "2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w", "exportedOutputs": [ { "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", @@ -679,7 +672,7 @@ func TestServiceGetTxJSON_ExportTx(t *testing.T) { "addresses": [ "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" ], - "amount": 49000, + "amount": 1000, "locktime": 0, "threshold": 1 } @@ -714,24 +707,65 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 0: { + &nftfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + 1: { + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + &nftfx.MintOutput{ + GroupID: 2, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + 2: { + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: createAssetTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -745,8 +779,34 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 49000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "outputIndex": 2, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 50000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "name": "Team Rocket", "symbol": "TR", @@ -767,6 +827,7 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { "addresses": [ "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" ], + "groupID": 0, "locktime": 0, "threshold": 1 } @@ -816,13 +877,27 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { } ] }, - "credentials": null, + "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], "id": "PLACEHOLDER_TX_ID" }` expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", createAssetTx.ID().String(), 1) expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", createAssetTx.Unsigned.(*txs.CreateAssetTx).BlockchainID.String(), 1) + sigStr, err := formatting.Encode(formatting.HexNC, createAssetTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) + require.NoError(err) + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -830,29 +905,43 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 1: { + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + &nftfx.MintOutput{ + GroupID: 2, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintNFTTx := buildOperationTxWithOp(env.vm.ctx.ChainID, buildNFTxMintOp(createAssetTx, key, 2, 1)) - require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + op := buildNFTxMintOp(createAssetTx, key, 1, 1) + mintNFTTx := buildOperationTxWithOps(t, env, op) issueAndAccept(require, env.vm, env.issuer, mintNFTTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: mintNFTTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -866,8 +955,34 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "rSiY2aqcahSU5vyJeMiNBnwtPwfJFxsxskAGbU3HxHvAkrdpy", + "outputIndex": 0, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 49000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "operations": [ { @@ -875,7 +990,7 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 2 + "outputIndex": 1 } ], "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", @@ -901,6 +1016,14 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { ] }, "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, { "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", "credential": { @@ -917,10 +1040,10 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintNFTTx.ID().String(), 1) expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintNFTTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[0].Credential.(*nftfx.Credential).Sigs[0][:]) + sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[1].Credential.(*nftfx.Credential).Sigs[0][:]) require.NoError(err) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -929,32 +1052,46 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 0: { + &nftfx.MintOutput{ + GroupID: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + 1: { + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintOp1 := buildNFTxMintOp(createAssetTx, key, 2, 1) - mintOp2 := buildNFTxMintOp(createAssetTx, key, 3, 2) - mintNFTTx := buildOperationTxWithOp(env.vm.ctx.ChainID, mintOp1, mintOp2) - - require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) + mintOp1 := buildNFTxMintOp(createAssetTx, key, 1, 0) + mintOp2 := buildNFTxMintOp(createAssetTx, key, 2, 1) + mintNFTTx := buildOperationTxWithOps(t, env, mintOp1, mintOp2) issueAndAccept(require, env.vm, env.issuer, mintNFTTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: mintNFTTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -968,8 +1105,34 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "BBhSA95iv6ueXc7xrMSka1bByBqcwJxyvMiyjy5H8ccAgxy4P", + "outputIndex": 0, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 49000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "operations": [ { @@ -977,7 +1140,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 2 + "outputIndex": 1 } ], "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", @@ -987,7 +1150,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { 0 ] }, - "groupID": 1, + "groupID": 0, "payload": "0x68656c6c6f", "outputs": [ { @@ -1005,7 +1168,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 3 + "outputIndex": 2 } ], "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", @@ -1015,7 +1178,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { 0 ] }, - "groupID": 2, + "groupID": 1, "payload": "0x68656c6c6f", "outputs": [ { @@ -1031,6 +1194,14 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { ] }, "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, { "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", "credential": { @@ -1055,10 +1226,10 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintNFTTx.ID().String(), 1) expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintNFTTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[0].Credential.(*nftfx.Credential).Sigs[0][:]) + sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[1].Credential.(*nftfx.Credential).Sigs[0][:]) require.NoError(err) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 3) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -1067,29 +1238,40 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 0: { + &nftfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintSecpOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, buildSecpMintOp(createAssetTx, key, 0)) - require.NoError(mintSecpOpTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + op := buildSecpMintOp(createAssetTx, key, 1) + mintSecpOpTx := buildOperationTxWithOps(t, env, op) issueAndAccept(require, env.vm, env.issuer, mintSecpOpTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: mintSecpOpTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -1103,8 +1285,34 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "2YhAg3XUdub5syHHePZG7q3yFjKAy7ahsvQDxq5SMrYbN1s5Gn", + "outputIndex": 0, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 49000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "operations": [ { @@ -1112,7 +1320,7 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 0 + "outputIndex": 1 } ], "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", @@ -1142,6 +1350,14 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { ] }, "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { @@ -1161,7 +1377,7 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { sigStr, err := formatting.Encode(formatting.HexNC, mintSecpOpTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) require.NoError(err) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -1170,32 +1386,44 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, + fork: durango, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 0: { + &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }, + 1: { + &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - op1 := buildSecpMintOp(createAssetTx, key, 0) - op2 := buildSecpMintOp(createAssetTx, key, 1) - mintSecpOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, op1, op2) - - require.NoError(mintSecpOpTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) + op1 := buildSecpMintOp(createAssetTx, key, 1) + op2 := buildSecpMintOp(createAssetTx, key, 2) + mintSecpOpTx := buildOperationTxWithOps(t, env, op1, op2) issueAndAccept(require, env.vm, env.issuer, mintSecpOpTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: mintSecpOpTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -1209,8 +1437,34 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "2vxorPLUw5sneb7Mdhhjuws3H5AqaDp1V8ETz6fEuzvn835rVX", + "outputIndex": 0, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 49000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "operations": [ { @@ -1218,7 +1472,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 0 + "outputIndex": 1 } ], "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", @@ -1250,7 +1504,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 1 + "outputIndex": 2 } ], "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", @@ -1288,6 +1542,14 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { ] } }, + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { @@ -1307,7 +1569,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { sigStr, err := formatting.Encode(formatting.HexNC, mintSecpOpTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) require.NoError(err) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 3) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -1316,29 +1578,35 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 2: { + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintPropertyFxOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, buildPropertyFxMintOp(createAssetTx, key, 4)) - require.NoError(mintPropertyFxOpTx.SignPropertyFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + op := buildPropertyFxMintOp(createAssetTx, key, 1) + mintPropertyFxOpTx := buildOperationTxWithOps(t, env, op) issueAndAccept(require, env.vm, env.issuer, mintPropertyFxOpTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: mintPropertyFxOpTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -1352,8 +1620,34 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "nNUGBjszswU3ZmhCb8hBNWmg335UZqGWmNrYTAGyMF4bFpMXm", + "outputIndex": 0, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 49000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "operations": [ { @@ -1361,7 +1655,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 4 + "outputIndex": 1 } ], "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", @@ -1388,6 +1682,14 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { ] }, "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, { "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", "credential": { @@ -1404,10 +1706,10 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintPropertyFxOpTx.ID().String(), 1) expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintPropertyFxOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[0].Credential.(*propertyfx.Credential).Sigs[0][:]) + sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[1].Credential.(*propertyfx.Credential).Sigs[0][:]) require.NoError(err) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -1416,32 +1718,42 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 2: { + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - op1 := buildPropertyFxMintOp(createAssetTx, key, 4) - op2 := buildPropertyFxMintOp(createAssetTx, key, 5) - mintPropertyFxOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, op1, op2) - - require.NoError(mintPropertyFxOpTx.SignPropertyFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) + op1 := buildPropertyFxMintOp(createAssetTx, key, 1) + op2 := buildPropertyFxMintOp(createAssetTx, key, 2) + mintPropertyFxOpTx := buildOperationTxWithOps(t, env, op1, op2) issueAndAccept(require, env.vm, env.issuer, mintPropertyFxOpTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: mintPropertyFxOpTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -1455,8 +1767,34 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "2NV5AGoQQHVRY6VkT8sht8bhZDHR7uwta7fk7JwAZpacqMRWCa", + "outputIndex": 0, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 49000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "operations": [ { @@ -1464,7 +1802,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 4 + "outputIndex": 1 } ], "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", @@ -1493,7 +1831,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 5 + "outputIndex": 2 } ], "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", @@ -1520,6 +1858,14 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) ] }, "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, { "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", "credential": { @@ -1544,169 +1890,76 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintPropertyFxOpTx.ID().String(), 1) expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintPropertyFxOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[0].Credential.(*propertyfx.Credential).Sigs[0][:]) + sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[1].Credential.(*propertyfx.Credential).Sigs[0][:]) require.NoError(err) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 3) require.Equal(expectedReplyTxString, string(replyTxBytes)) } -func newAvaxBaseTxWithOutputs(t *testing.T, genesisBytes []byte, chainID ids.ID, fee uint64, parser txs.Parser) *txs.Tx { - avaxTx := getCreateTxFromGenesisTest(t, genesisBytes, "AVAX") - key := keys[0] - tx := buildBaseTx(avaxTx, chainID, fee, key) - require.NoError(t, tx.SignSECP256K1Fx(parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - return tx -} - -func newAvaxExportTxWithOutputs(t *testing.T, genesisBytes []byte, chainID ids.ID, fee uint64, parser txs.Parser) *txs.Tx { - avaxTx := getCreateTxFromGenesisTest(t, genesisBytes, "AVAX") - key := keys[0] - tx := buildExportTx(avaxTx, chainID, fee, key) - require.NoError(t, tx.SignSECP256K1Fx(parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - return tx -} - -func newAvaxCreateAssetTxWithOutputs(t *testing.T, chainID ids.ID, parser txs.Parser) *txs.Tx { - key := keys[0] - tx := buildCreateAssetTx(chainID, key) - require.NoError(t, tx.Initialize(parser.Codec())) - return tx -} - -func buildBaseTx(avaxTx *txs.Tx, chainID ids.ID, fee uint64, key *secp256k1.PrivateKey) *txs.Tx { - return &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - Memo: []byte{1, 2, 3, 4, 5, 6, 7, 8}, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxTx.ID()}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - fee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }}, - }, - }} -} - -func buildExportTx(avaxTx *txs.Tx, chainID ids.ID, fee uint64, key *secp256k1.PrivateKey) *txs.Tx { - return &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }}, - }, - }, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxTx.ID()}, +func newAvaxBaseTxWithOutputs(t *testing.T, env *environment) *txs.Tx { + var ( + memo = []byte{1, 2, 3, 4, 5, 6, 7, 8} + key = keys[0] + changeKey = keys[1] + kc = secp256k1fx.NewKeychain(key) + ) + + tx, err := env.txBuilder.BaseTx( + []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: env.vm.feeAssetID}, Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - fee, + Amt: units.MicroAvax, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{key.PublicKey().Address()}, }, }, }}, - }} + memo, + kc, + changeKey.PublicKey().Address(), + ) + require.NoError(t, err) + return tx } -func buildCreateAssetTx(chainID ids.ID, key *secp256k1.PrivateKey) *txs.Tx { - return &txs.Tx{Unsigned: &txs.CreateAssetTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - }}, - Name: "Team Rocket", - Symbol: "TR", - Denomination: 0, - States: []*txs.InitialState{ - { - FxIndex: 0, - Outs: []verify.State{ - &secp256k1fx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, &secp256k1fx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }, - }, - { - FxIndex: 1, - Outs: []verify.State{ - &nftfx.MintOutput{ - GroupID: 1, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - &nftfx.MintOutput{ - GroupID: 2, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }, - }, - { - FxIndex: 2, - Outs: []verify.State{ - &propertyfx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - &propertyfx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - }, - }, - }} +func newAvaxCreateAssetTxWithOutputs(t *testing.T, env *environment, initialStates map[uint32][]verify.State) *txs.Tx { + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + ) + + tx, err := env.txBuilder.CreateAssetTx( + "Team Rocket", // name + "TR", // symbol + 0, // denomination + initialStates, + kc, + key.Address(), + ) + require.NoError(t, err) + return tx +} + +func buildTestExportTx(t *testing.T, env *environment, chainID ids.ID) *txs.Tx { + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + to = key.PublicKey().Address() + ) + + tx, err := env.txBuilder.ExportTx( + chainID, + to, + env.vm.feeAssetID, + units.MicroAvax, + kc, + key.Address(), + ) + require.NoError(t, err) + return tx } func buildNFTxMintOp(createAssetTx *txs.Tx, key *secp256k1.PrivateKey, outputIndex, groupID uint32) *txs.Operation { @@ -1782,14 +2035,19 @@ func buildSecpMintOp(createAssetTx *txs.Tx, key *secp256k1.PrivateKey, outputInd } } -func buildOperationTxWithOp(chainID ids.ID, op ...*txs.Operation) *txs.Tx { - return &txs.Tx{Unsigned: &txs.OperationTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - }}, - Ops: op, - }} +func buildOperationTxWithOps(t *testing.T, env *environment, op ...*txs.Operation) *txs.Tx { + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + ) + + tx, err := env.txBuilder.Operation( + op, + kc, + key.Address(), + ) + require.NoError(t, err) + return tx } func TestServiceGetNilTx(t *testing.T) { @@ -1798,16 +2056,11 @@ func TestServiceGetNilTx(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - reply := api.GetTxReply{} - err := env.service.GetTx(nil, &api.GetTxArgs{}, &reply) + err := service.GetTx(nil, &api.GetTxArgs{}, &reply) require.ErrorIs(err, errNilTxID) } @@ -1817,16 +2070,11 @@ func TestServiceGetUnknownTx(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - reply := api.GetTxReply{} - err := env.service.GetTx(nil, &api.GetTxArgs{TxID: ids.GenerateTestID()}, &reply) + err := service.GetTx(nil, &api.GetTxArgs{TxID: ids.GenerateTestID()}, &reply) require.ErrorIs(err, database.ErrNotFound) } @@ -1834,11 +2082,8 @@ func TestServiceGetUTXOs(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(t, env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + service := &Service{vm: env.vm} + env.vm.ctx.Lock.Unlock() rawAddr := ids.GenerateTestShortID() rawEmptyAddr := ids.GenerateTestShortID() @@ -1910,8 +2155,6 @@ func TestServiceGetUTXOs(t *testing.T) { xEmptyAddr, err := env.vm.FormatLocalAddress(rawEmptyAddr) require.NoError(t, err) - env.vm.ctx.Lock.Unlock() - tests := []struct { label string count int @@ -2075,7 +2318,7 @@ func TestServiceGetUTXOs(t *testing.T) { t.Run(test.label, func(t *testing.T) { require := require.New(t) reply := &api.GetUTXOsReply{} - err := env.service.GetUTXOs(nil, test.args, reply) + err := service.GetUTXOs(nil, test.args, reply) require.ErrorIs(err, test.expectedErr) if test.expectedErr != nil { return @@ -2091,18 +2334,13 @@ func TestGetAssetDescription(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - avaxAssetID := env.genesisTx.ID() reply := GetAssetDescriptionReply{} - require.NoError(env.service.GetAssetDescription(nil, &GetAssetDescriptionArgs{ + require.NoError(service.GetAssetDescription(nil, &GetAssetDescriptionArgs{ AssetID: avaxAssetID.String(), }, &reply)) @@ -2116,20 +2354,15 @@ func TestGetBalance(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - avaxAssetID := env.genesisTx.ID() reply := GetBalanceReply{} addrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) require.NoError(err) - require.NoError(env.service.GetBalance(nil, &GetBalanceArgs{ + require.NoError(service.GetBalance(nil, &GetBalanceArgs{ Address: addrStr, AssetID: avaxAssetID.String(), }, &reply)) @@ -2150,14 +2383,9 @@ func TestCreateFixedCapAsset(t *testing.T) { initialKeys: keys, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - reply := AssetIDChangeAddr{} addrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) require.NoError(err) @@ -2166,7 +2394,7 @@ func TestCreateFixedCapAsset(t *testing.T) { require.NoError(err) _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) - require.NoError(env.service.CreateFixedCapAsset(nil, &CreateAssetArgs{ + require.NoError(service.CreateFixedCapAsset(nil, &CreateAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: api.UserPass{ Username: username, @@ -2201,21 +2429,16 @@ func TestCreateVariableCapAsset(t *testing.T) { initialKeys: keys, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - reply := AssetIDChangeAddr{} minterAddrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) require.NoError(err) _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) changeAddrStr := fromAddrsStr[0] - require.NoError(env.service.CreateVariableCapAsset(nil, &CreateAssetArgs{ + require.NoError(service.CreateVariableCapAsset(nil, &CreateAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: api.UserPass{ Username: username, @@ -2254,7 +2477,7 @@ func TestCreateVariableCapAsset(t *testing.T) { To: minterAddrStr, // Send newly minted tokens to this address } mintReply := &api.JSONTxIDChangeAddr{} - require.NoError(env.service.Mint(nil, mintArgs, mintReply)) + require.NoError(service.Mint(nil, mintArgs, mintReply)) require.Equal(changeAddrStr, mintReply.ChangeAddr) buildAndAccept(require, env.vm, env.issuer, mintReply.TxID) @@ -2275,7 +2498,7 @@ func TestCreateVariableCapAsset(t *testing.T) { }, } sendReply := &api.JSONTxIDChangeAddr{} - require.NoError(env.service.Send(nil, sendArgs, sendReply)) + require.NoError(service.Send(nil, sendArgs, sendReply)) require.Equal(changeAddrStr, sendReply.ChangeAddr) }) } @@ -2294,14 +2517,9 @@ func TestNFTWorkflow(t *testing.T) { initialKeys: keys, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - fromAddrs, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) // Test minting of the created variable cap asset @@ -2329,7 +2547,7 @@ func TestNFTWorkflow(t *testing.T) { }, } createReply := &AssetIDChangeAddr{} - require.NoError(env.service.CreateNFTAsset(nil, createArgs, createReply)) + require.NoError(service.CreateNFTAsset(nil, createArgs, createReply)) require.Equal(fromAddrsStr[0], createReply.ChangeAddr) buildAndAccept(require, env.vm, env.issuer, createReply.AssetID) @@ -2342,7 +2560,7 @@ func TestNFTWorkflow(t *testing.T) { require.NoError(err) reply := &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, + require.NoError(service.GetBalance(nil, &GetBalanceArgs{ Address: addrStr, AssetID: env.vm.feeAssetID.String(), @@ -2380,7 +2598,7 @@ func TestNFTWorkflow(t *testing.T) { } mintReply := &api.JSONTxIDChangeAddr{} - require.NoError(env.service.MintNFT(nil, mintArgs, mintReply)) + require.NoError(service.MintNFT(nil, mintArgs, mintReply)) require.Equal(fromAddrsStr[0], createReply.ChangeAddr) // Accept the transaction so that we can send the newly minted NFT @@ -2400,7 +2618,7 @@ func TestNFTWorkflow(t *testing.T) { To: addrStr, } sendReply := &api.JSONTxIDChangeAddr{} - require.NoError(env.service.SendNFT(nil, sendArgs, sendReply)) + require.NoError(service.SendNFT(nil, sendArgs, sendReply)) require.Equal(fromAddrsStr[0], sendReply.ChangeAddr) }) } @@ -2415,14 +2633,9 @@ func TestImportExportKey(t *testing.T) { password: password, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - sk, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -2434,7 +2647,7 @@ func TestImportExportKey(t *testing.T) { PrivateKey: sk, } importReply := &api.JSONAddress{} - require.NoError(env.service.ImportKey(nil, importArgs, importReply)) + require.NoError(service.ImportKey(nil, importArgs, importReply)) addrStr, err := env.vm.FormatLocalAddress(sk.PublicKey().Address()) require.NoError(err) @@ -2446,7 +2659,7 @@ func TestImportExportKey(t *testing.T) { Address: addrStr, } exportReply := &ExportKeyReply{} - require.NoError(env.service.ExportKey(nil, exportArgs, exportReply)) + require.NoError(service.ExportKey(nil, exportArgs, exportReply)) require.Equal(sk.Bytes(), exportReply.PrivateKey.Bytes()) } @@ -2459,14 +2672,9 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { password: password, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - sk, err := secp256k1.NewPrivateKey() require.NoError(err) args := ImportKeyArgs{ @@ -2477,7 +2685,7 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { PrivateKey: sk, } reply := api.JSONAddress{} - require.NoError(env.service.ImportKey(nil, &args, &reply)) + require.NoError(service.ImportKey(nil, &args, &reply)) expectedAddress, err := env.vm.FormatLocalAddress(sk.PublicKey().Address()) require.NoError(err) @@ -2485,7 +2693,7 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { require.Equal(expectedAddress, reply.Address) reply2 := api.JSONAddress{} - require.NoError(env.service.ImportKey(nil, &args, &reply2)) + require.NoError(service.ImportKey(nil, &args, &reply2)) require.Equal(expectedAddress, reply2.Address) @@ -2494,7 +2702,7 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { Password: password, } addrsReply := api.JSONAddresses{} - require.NoError(env.service.ListAddresses(nil, &addrsArgs, &addrsReply)) + require.NoError(service.ListAddresses(nil, &addrsArgs, &addrsReply)) require.Len(addrsReply.Addresses, 1) require.Equal(expectedAddress, addrsReply.Addresses[0]) @@ -2510,14 +2718,9 @@ func TestSend(t *testing.T) { initialKeys: keys, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - assetID := env.genesisTx.ID() addr := keys[0].PublicKey().Address() @@ -2543,7 +2746,7 @@ func TestSend(t *testing.T) { }, } reply := &api.JSONTxIDChangeAddr{} - require.NoError(env.service.Send(nil, args, reply)) + require.NoError(service.Send(nil, args, reply)) require.Equal(changeAddrStr, reply.ChangeAddr) buildAndAccept(require, env.vm, env.issuer, reply.TxID) @@ -2561,15 +2764,13 @@ func TestSendMultiple(t *testing.T) { password: password, initialKeys: keys, }}, + vmStaticConfig: &config.Config{ + EUpgradeTime: mockable.MaxTime, + }, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - assetID := env.genesisTx.ID() addr := keys[0].PublicKey().Address() @@ -2602,7 +2803,7 @@ func TestSendMultiple(t *testing.T) { }, } reply := &api.JSONTxIDChangeAddr{} - require.NoError(env.service.SendMultiple(nil, args, reply)) + require.NoError(service.SendMultiple(nil, args, reply)) require.Equal(changeAddrStr, reply.ChangeAddr) buildAndAccept(require, env.vm, env.issuer, reply.TxID) @@ -2619,21 +2820,16 @@ func TestCreateAndListAddresses(t *testing.T) { password: password, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - createArgs := &api.UserPass{ Username: username, Password: password, } createReply := &api.JSONAddress{} - require.NoError(env.service.CreateAddress(nil, createArgs, createReply)) + require.NoError(service.CreateAddress(nil, createArgs, createReply)) newAddr := createReply.Address @@ -2643,7 +2839,7 @@ func TestCreateAndListAddresses(t *testing.T) { } listReply := &api.JSONAddresses{} - require.NoError(env.service.ListAddresses(nil, listArgs, listReply)) + require.NoError(service.ListAddresses(nil, listArgs, listReply)) require.Contains(listReply.Addresses, newAddr) } @@ -2660,12 +2856,9 @@ func TestImport(t *testing.T) { initialKeys: keys, }}, }) + service := &Service{vm: env.vm} + env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() assetID := env.genesisTx.ID() addr0 := keys[0].PublicKey().Address() @@ -2697,8 +2890,6 @@ func TestImport(t *testing.T) { }, })) - env.vm.ctx.Lock.Unlock() - addrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) require.NoError(err) args := &ImportArgs{ @@ -2710,7 +2901,7 @@ func TestImport(t *testing.T) { To: addrStr, } reply := &api.JSONTxID{} - require.NoError(env.service.Import(nil, args, reply)) + require.NoError(service.Import(nil, args, reply)) }) } } diff --git a/vms/avm/state_test.go b/vms/avm/state_test.go index e71acf1251cb..35744fdc63e8 100644 --- a/vms/avm/state_test.go +++ b/vms/avm/state_test.go @@ -4,7 +4,6 @@ package avm import ( - "context" "math" "testing" @@ -35,10 +34,7 @@ func TestSetsAndGets(t *testing.T) { }, }}, }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ @@ -98,10 +94,7 @@ func TestFundingNoAddresses(t *testing.T) { }, }}, }) - defer func() { - require.NoError(t, env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ @@ -131,10 +124,7 @@ func TestFundingAddresses(t *testing.T) { }, }}, }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ diff --git a/vms/avm/txs/txstest/builder.go b/vms/avm/txs/txstest/builder.go new file mode 100644 index 000000000000..c52e56cdb5e7 --- /dev/null +++ b/vms/avm/txs/txstest/builder.go @@ -0,0 +1,231 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txstest + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/vms/avm/config" + "github.com/ava-labs/avalanchego/vms/avm/state" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" + "github.com/ava-labs/avalanchego/wallet/chain/x/signer" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" +) + +type Builder struct { + utxos *utxos + ctx *builder.Context +} + +func New( + codec codec.Manager, + ctx *snow.Context, + cfg *config.Config, + feeAssetID ids.ID, + state state.State, +) *Builder { + utxos := newUTXOs(ctx, state, ctx.SharedMemory, codec) + return &Builder{ + utxos: utxos, + ctx: newContext(ctx, cfg, feeAssetID), + } +} + +func (b *Builder) CreateAssetTx( + name, symbol string, + denomination byte, + initialStates map[uint32][]verify.State, + kc *secp256k1fx.Keychain, + changeAddr ids.ShortID, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + utx, err := xBuilder.NewCreateAssetTx( + name, + symbol, + denomination, + initialStates, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), + ) + if err != nil { + return nil, fmt.Errorf("failed building base tx: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) BaseTx( + outs []*avax.TransferableOutput, + memo []byte, + kc *secp256k1fx.Keychain, + changeAddr ids.ShortID, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + utx, err := xBuilder.NewBaseTx( + outs, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), + common.WithMemo(memo), + ) + if err != nil { + return nil, fmt.Errorf("failed building base tx: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) MintNFT( + assetID ids.ID, + payload []byte, + owners []*secp256k1fx.OutputOwners, + kc *secp256k1fx.Keychain, + changeAddr ids.ShortID, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + utx, err := xBuilder.NewOperationTxMintNFT( + assetID, + payload, + owners, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), + ) + if err != nil { + return nil, fmt.Errorf("failed minting NFTs: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) MintFTs( + outputs map[ids.ID]*secp256k1fx.TransferOutput, + kc *secp256k1fx.Keychain, + changeAddr ids.ShortID, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + utx, err := xBuilder.NewOperationTxMintFT( + outputs, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), + ) + if err != nil { + return nil, fmt.Errorf("failed minting FTs: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) Operation( + ops []*txs.Operation, + kc *secp256k1fx.Keychain, + changeAddr ids.ShortID, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + utx, err := xBuilder.NewOperationTx( + ops, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), + ) + if err != nil { + return nil, fmt.Errorf("failed building operation tx: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) ImportTx( + sourceChain ids.ID, + to ids.ShortID, + kc *secp256k1fx.Keychain, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + outOwner := &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{to}, + } + + utx, err := xBuilder.NewImportTx( + sourceChain, + outOwner, + ) + if err != nil { + return nil, fmt.Errorf("failed building import tx: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) ExportTx( + destinationChain ids.ID, + to ids.ShortID, + exportedAssetID ids.ID, + exportedAmt uint64, + kc *secp256k1fx.Keychain, + changeAddr ids.ShortID, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + outputs := []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: exportedAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: exportedAmt, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{to}, + }, + }, + }} + + utx, err := xBuilder.NewExportTx( + destinationChain, + outputs, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), + ) + if err != nil { + return nil, fmt.Errorf("failed building export tx: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) builders(kc *secp256k1fx.Keychain) (builder.Builder, signer.Signer) { + var ( + addrs = kc.Addresses() + wa = &walletUTXOsAdapter{ + utxos: b.utxos, + addrs: addrs, + } + builder = builder.New(addrs, b.ctx, wa) + signer = signer.New(kc, wa) + ) + return builder, signer +} diff --git a/vms/avm/txs/txstest/context.go b/vms/avm/txs/txstest/context.go new file mode 100644 index 000000000000..ea3b9f2410f4 --- /dev/null +++ b/vms/avm/txs/txstest/context.go @@ -0,0 +1,25 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txstest + +import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/vms/avm/config" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" +) + +func newContext( + ctx *snow.Context, + cfg *config.Config, + feeAssetID ids.ID, +) *builder.Context { + return &builder.Context{ + NetworkID: ctx.NetworkID, + BlockchainID: ctx.XChainID, + AVAXAssetID: feeAssetID, + BaseTxFee: cfg.TxFee, + CreateAssetTxFee: cfg.CreateAssetTxFee, + } +} diff --git a/vms/avm/txs/txstest/utxos.go b/vms/avm/txs/txstest/utxos.go new file mode 100644 index 000000000000..39b3b712905b --- /dev/null +++ b/vms/avm/txs/txstest/utxos.go @@ -0,0 +1,103 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txstest + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/avm/state" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" + "github.com/ava-labs/avalanchego/wallet/chain/x/signer" +) + +const maxPageSize uint64 = 1024 + +var ( + _ builder.Backend = (*walletUTXOsAdapter)(nil) + _ signer.Backend = (*walletUTXOsAdapter)(nil) +) + +func newUTXOs( + ctx *snow.Context, + state state.State, + sharedMemory atomic.SharedMemory, + codec codec.Manager, +) *utxos { + return &utxos{ + xchainID: ctx.ChainID, + state: state, + sharedMemory: sharedMemory, + codec: codec, + } +} + +type utxos struct { + xchainID ids.ID + state state.State + sharedMemory atomic.SharedMemory + codec codec.Manager +} + +func (u *utxos) UTXOs(addrs set.Set[ids.ShortID], sourceChainID ids.ID) ([]*avax.UTXO, error) { + if sourceChainID == u.xchainID { + return avax.GetAllUTXOs(u.state, addrs) + } + + atomicUTXOs, _, _, err := avax.GetAtomicUTXOs( + u.sharedMemory, + u.codec, + sourceChainID, + addrs, + ids.ShortEmpty, + ids.Empty, + int(maxPageSize), + ) + return atomicUTXOs, err +} + +func (u *utxos) GetUTXO(addrs set.Set[ids.ShortID], chainID, utxoID ids.ID) (*avax.UTXO, error) { + if chainID == u.xchainID { + return u.state.GetUTXO(utxoID) + } + + atomicUTXOs, _, _, err := avax.GetAtomicUTXOs( + u.sharedMemory, + u.codec, + chainID, + addrs, + ids.ShortEmpty, + ids.Empty, + int(maxPageSize), + ) + if err != nil { + return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) + } + for _, utxo := range atomicUTXOs { + if utxo.InputID() == utxoID { + return utxo, nil + } + } + return nil, database.ErrNotFound +} + +type walletUTXOsAdapter struct { + utxos *utxos + addrs set.Set[ids.ShortID] +} + +func (w *walletUTXOsAdapter) UTXOs(_ context.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) { + return w.utxos.UTXOs(w.addrs, sourceChainID) +} + +func (w *walletUTXOsAdapter) GetUTXO(_ context.Context, chainID, utxoID ids.ID) (*avax.UTXO, error) { + return w.utxos.GetUTXO(w.addrs, chainID, utxoID) +} diff --git a/vms/avm/vm_benchmark_test.go b/vms/avm/vm_benchmark_test.go index e0bb3080c4e1..096ed51e13bc 100644 --- a/vms/avm/vm_benchmark_test.go +++ b/vms/avm/vm_benchmark_test.go @@ -4,7 +4,6 @@ package avm import ( - "context" "fmt" "math/rand" "testing" @@ -29,10 +28,7 @@ func BenchmarkLoadUser(b *testing.B) { password: password, }}, }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() user, err := keystore.NewUserFromKeystore(env.vm.ctx.Keystore, username, password) require.NoError(err) @@ -69,10 +65,7 @@ func getAllUTXOsBenchmark(b *testing.B, utxoCount int, randSrc rand.Source) { require := require.New(b) env := setup(b, &envConfig{fork: latest}) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() addr := ids.GenerateTestShortID() diff --git a/vms/avm/vm_regression_test.go b/vms/avm/vm_regression_test.go index 0151ccaf6dab..9e684e756d5c 100644 --- a/vms/avm/vm_regression_test.go +++ b/vms/avm/vm_regression_test.go @@ -4,15 +4,11 @@ package avm import ( - "context" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/nftfx" @@ -22,95 +18,78 @@ import ( func TestVerifyFxUsage(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, - }) + env := setup(t, &envConfig{fork: latest}) env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - }}, - Name: "Team Rocket", - Symbol: "TR", - Denomination: 0, - States: []*txs.InitialState{ - { - FxIndex: 0, - Outs: []verify.State{ - &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + ) + + initialStates := map[uint32][]verify.State{ + 0: { + &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - { - FxIndex: 1, - Outs: []verify.State{ - &nftfx.MintOutput{ - GroupID: 1, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, + }, + 1: { + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, }, - }} - require.NoError(createAssetTx.Initialize(env.vm.parser.Codec())) + } + + // Create the asset + createAssetTx, err := env.txBuilder.CreateAssetTx( + "Team Rocket", // name + "TR", // symbol + 0, // denomination + initialStates, + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintNFTTx := &txs.Tx{Unsigned: &txs.OperationTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, + // Mint the NFT + mintNFTTx, err := env.txBuilder.MintNFT( + createAssetTx.ID(), + []byte{'h', 'e', 'l', 'l', 'o'}, // payload + []*secp256k1fx.OutputOwners{{ + Threshold: 1, + Addrs: []ids.ShortID{key.Address()}, }}, - Ops: []*txs.Operation{{ - Asset: avax.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*avax.UTXOID{{ - TxID: createAssetTx.ID(), - OutputIndex: 1, - }}, - Op: &nftfx.MintOperation{ - MintInput: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - GroupID: 1, - Payload: []byte{'h', 'e', 'l', 'l', 'o'}, - Outputs: []*secp256k1fx.OutputOwners{{}}, - }, - }}, - }} - require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, mintNFTTx) - spendTx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: createAssetTx.ID(), - OutputIndex: 0, - }, + // move the NFT + to := keys[2].PublicKey().Address() + spendTx, err := env.txBuilder.BaseTx( + []*avax.TransferableOutput{{ Asset: avax.Asset{ID: createAssetTx.ID()}, - In: &secp256k1fx.TransferInput{ + Out: &secp256k1fx.TransferOutput{ Amt: 1, - Input: secp256k1fx.Input{ - SigIndices: []uint32{0}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, }, }, }}, - }}} - require.NoError(spendTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) + nil, // memo + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, spendTx) } diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index 25824f4d0097..33af48c483f8 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -19,7 +19,6 @@ import ( "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -118,11 +117,6 @@ func TestIssueTx(t *testing.T) { fork: latest, }) env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() tx := newTx(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.parser, "AVAX") issueAndAccept(require, env.vm, env.issuer, tx) @@ -133,99 +127,71 @@ func TestIssueNFT(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, + fork: latest, }) env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - }}, - Name: "Team Rocket", - Symbol: "TR", - Denomination: 0, - States: []*txs.InitialState{{ - FxIndex: 1, - Outs: []verify.State{ - &nftfx.MintOutput{ - GroupID: 1, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - &nftfx.MintOutput{ - GroupID: 2, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + ) + + // Create the asset + initialStates := map[uint32][]verify.State{ + 1: { + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, }, }, - }}, - }} - require.NoError(createAssetTx.Initialize(env.vm.parser.Codec())) + }, + } + + createAssetTx, err := env.txBuilder.CreateAssetTx( + "Team Rocket", // name + "TR", // symbol + 0, // denomination + initialStates, + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintNFTTx := &txs.Tx{Unsigned: &txs.OperationTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, + // Mint the NFT + mintNFTTx, err := env.txBuilder.MintNFT( + createAssetTx.ID(), + []byte{'h', 'e', 'l', 'l', 'o'}, // payload + []*secp256k1fx.OutputOwners{{ + Threshold: 1, + Addrs: []ids.ShortID{key.Address()}, }}, - Ops: []*txs.Operation{{ - Asset: avax.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*avax.UTXOID{{ - TxID: createAssetTx.ID(), - OutputIndex: 0, - }}, - Op: &nftfx.MintOperation{ - MintInput: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - GroupID: 1, - Payload: []byte{'h', 'e', 'l', 'l', 'o'}, - Outputs: []*secp256k1fx.OutputOwners{{}}, - }, - }}, - }} - require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, mintNFTTx) - transferNFTTx := &txs.Tx{ - Unsigned: &txs.OperationTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - }}, - Ops: []*txs.Operation{{ - Asset: avax.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*avax.UTXOID{{ - TxID: mintNFTTx.ID(), - OutputIndex: 0, - }}, - Op: &nftfx.TransferOperation{ - Input: secp256k1fx.Input{}, - Output: nftfx.TransferOutput{ - GroupID: 1, - Payload: []byte{'h', 'e', 'l', 'l', 'o'}, - OutputOwners: secp256k1fx.OutputOwners{}, - }, - }, - }}, - }, - Creds: []*fxs.FxCredential{ - { - Credential: &nftfx.Credential{}, - }, - }, - } - require.NoError(transferNFTTx.Initialize(env.vm.parser.Codec())) + // Move the NFT + utxos, err := avax.GetAllUTXOs(env.vm.state, kc.Addresses()) + require.NoError(err) + transferOp, _, err := env.vm.SpendNFT( + utxos, + kc, + createAssetTx.ID(), + 1, + keys[2].Address(), + ) + require.NoError(err) + + transferNFTTx, err := env.txBuilder.Operation( + transferOp, + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, transferNFTTx) } @@ -234,92 +200,87 @@ func TestIssueProperty(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - }}, - Name: "Team Rocket", - Symbol: "TR", - Denomination: 0, - States: []*txs.InitialState{{ - FxIndex: 2, - Outs: []verify.State{ - &propertyfx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + ) + + // create the asset + initialStates := map[uint32][]verify.State{ + 2: { + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }}, - }} - require.NoError(createAssetTx.Initialize(env.vm.parser.Codec())) + }, + } + + createAssetTx, err := env.txBuilder.CreateAssetTx( + "Team Rocket", // name + "TR", // symbol + 0, // denomination + initialStates, + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintPropertyTx := &txs.Tx{Unsigned: &txs.OperationTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, + // mint the property + mintPropertyOp := &txs.Operation{ + Asset: avax.Asset{ID: createAssetTx.ID()}, + UTXOIDs: []*avax.UTXOID{{ + TxID: createAssetTx.ID(), + OutputIndex: 1, }}, - Ops: []*txs.Operation{{ - Asset: avax.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*avax.UTXOID{{ - TxID: createAssetTx.ID(), - OutputIndex: 0, - }}, - Op: &propertyfx.MintOperation{ - MintInput: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - MintOutput: propertyfx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Op: &propertyfx.MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + MintOutput: propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, - OwnedOutput: propertyfx.OwnedOutput{}, }, - }}, - }} + OwnedOutput: propertyfx.OwnedOutput{}, + }, + } - codec := env.vm.parser.Codec() - require.NoError(mintPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ - {keys[0]}, - })) + mintPropertyTx, err := env.txBuilder.Operation( + []*txs.Operation{mintPropertyOp}, + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, mintPropertyTx) - burnPropertyTx := &txs.Tx{Unsigned: &txs.OperationTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - }}, - Ops: []*txs.Operation{{ - Asset: avax.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*avax.UTXOID{{ - TxID: mintPropertyTx.ID(), - OutputIndex: 1, - }}, - Op: &propertyfx.BurnOperation{Input: secp256k1fx.Input{}}, + // burn the property + burnPropertyOp := &txs.Operation{ + Asset: avax.Asset{ID: createAssetTx.ID()}, + UTXOIDs: []*avax.UTXOID{{ + TxID: mintPropertyTx.ID(), + OutputIndex: 2, }}, - }} + Op: &propertyfx.BurnOperation{Input: secp256k1fx.Input{}}, + } - require.NoError(burnPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ - {}, - })) + burnPropertyTx, err := env.txBuilder.Operation( + []*txs.Operation{burnPropertyOp}, + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, burnPropertyTx) } @@ -331,11 +292,6 @@ func TestIssueTxWithFeeAsset(t *testing.T) { isCustomFeeAsset: true, }) env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() // send first asset tx := newTx(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.parser, feeAssetName) @@ -350,58 +306,44 @@ func TestIssueTxWithAnotherAsset(t *testing.T) { isCustomFeeAsset: true, }) env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() // send second asset - feeAssetCreateTx := getCreateTxFromGenesisTest(t, env.genesisBytes, feeAssetName) - createTx := getCreateTxFromGenesisTest(t, env.genesisBytes, otherAssetName) + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) - tx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - Ins: []*avax.TransferableInput{ - // fee asset - { - UTXOID: avax.UTXOID{ - TxID: feeAssetCreateTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: feeAssetCreateTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, + feeAssetCreateTx = getCreateTxFromGenesisTest(t, env.genesisBytes, feeAssetName) + createTx = getCreateTxFromGenesisTest(t, env.genesisBytes, otherAssetName) + ) + + tx, err := env.txBuilder.BaseTx( + []*avax.TransferableOutput{ + { // fee asset + Asset: avax.Asset{ID: feeAssetCreateTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: startBalance - env.vm.TxFee, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, }, }, - // issued asset - { - UTXOID: avax.UTXOID{ - TxID: createTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: createTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, + }, + { // issued asset + Asset: avax.Asset{ID: createTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: startBalance - env.vm.TxFee, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, }, }, }, }, - }} - require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}, {keys[0]}})) - + nil, // memo + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, tx) } @@ -409,10 +351,7 @@ func TestVMFormat(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) - defer func() { - require.NoError(t, env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() tests := []struct { in ids.ShortID @@ -440,45 +379,31 @@ func TestTxAcceptAfterParseTx(t *testing.T) { fork: latest, notLinearized: true, }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() - key := keys[0] - firstTx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: env.genesisTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: env.genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: env.genesisTx.ID()}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - env.vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + ) + + firstTx, err := env.txBuilder.BaseTx( + []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: env.genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: startBalance - env.vm.TxFee, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, }, - }}, - }, - }} - require.NoError(firstTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + }, + }}, + nil, // memo + kc, + key.Address(), + ) + require.NoError(err) + // let secondTx spend firstTx outputs secondTx := &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, @@ -526,75 +451,46 @@ func TestIssueImportTx(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, + fork: durango, }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() peerSharedMemory := env.sharedMemory.NewSharedMemory(constants.PlatformChainID) genesisTx := getCreateTxFromGenesisTest(t, env.genesisBytes, "AVAX") avaxID := genesisTx.ID() - key := keys[0] - utxoID := avax.UTXOID{ - TxID: ids.ID{ - 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, - 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, - 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, - 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, - }, - } + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) - txAssetID := avax.Asset{ID: avaxID} - tx := &txs.Tx{Unsigned: &txs.ImportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - Outs: []*avax.TransferableOutput{{ - Asset: txAssetID, - Out: &secp256k1fx.TransferOutput{ - Amt: 1000, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - }}, - SourceChain: constants.PlatformChainID, - ImportedIns: []*avax.TransferableInput{{ + utxoID = avax.UTXOID{ + TxID: ids.ID{ + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + }, + } + txAssetID = avax.Asset{ID: avaxID} + importedUtxo = &avax.UTXO{ UTXOID: utxoID, Asset: txAssetID, - In: &secp256k1fx.TransferInput{ + Out: &secp256k1fx.TransferOutput{ Amt: 1010, - Input: secp256k1fx.Input{ - SigIndices: []uint32{0}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, }, }, - }}, - }} - require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + } + ) // Provide the platform UTXO: - utxo := &avax.UTXO{ - UTXOID: utxoID, - Asset: txAssetID, - Out: &secp256k1fx.TransferOutput{ - Amt: 1010, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - } - - utxoBytes, err := env.vm.parser.Codec().Marshal(txs.CodecVersion, utxo) + utxoBytes, err := env.vm.parser.Codec().Marshal(txs.CodecVersion, importedUtxo) require.NoError(err) - inputID := utxo.InputID() + inputID := importedUtxo.InputID() require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ env.vm.ctx.ChainID: { PutRequests: []*atomic.Element{{ @@ -607,6 +503,13 @@ func TestIssueImportTx(t *testing.T) { }, })) + tx, err := env.txBuilder.ImportTx( + constants.PlatformChainID, // source chain + key.Address(), + kc, + ) + require.NoError(err) + env.vm.ctx.Lock.Unlock() issueAndAccept(require, env.vm, env.issuer, tx) @@ -626,13 +529,10 @@ func TestForceAcceptImportTx(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: noFeesTestConfig, - notLinearized: true, + fork: durango, + notLinearized: true, }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() genesisTx := getCreateTxFromGenesisTest(t, env.genesisBytes, "AVAX") avaxID := genesisTx.ID() @@ -655,7 +555,7 @@ func TestForceAcceptImportTx(t *testing.T) { Outs: []*avax.TransferableOutput{{ Asset: txAssetID, Out: &secp256k1fx.TransferOutput{ - Amt: 1000, + Amt: 10, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, @@ -703,47 +603,28 @@ func TestImportTxNotState(t *testing.T) { func TestIssueExportTx(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{ - fork: latest, - }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{fork: durango}) + defer env.vm.ctx.Lock.Unlock() genesisTx := getCreateTxFromGenesisTest(t, env.genesisBytes, "AVAX") - avaxID := genesisTx.ID() - key := keys[0] - tx := &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxID, - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }}, - }}, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxID}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - env.vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }}, - }} - require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + var ( + avaxID = genesisTx.ID() + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + to = key.PublicKey().Address() + changeAddr = to + ) + + tx, err := env.txBuilder.ExportTx( + constants.PlatformChainID, + to, // to + avaxID, + startBalance-env.vm.TxFee, + kc, + changeAddr, + ) + require.NoError(err) peerSharedMemory := env.sharedMemory.NewSharedMemory(constants.PlatformChainID) utxoBytes, _, _, err := peerSharedMemory.Indexed( @@ -783,45 +664,28 @@ func TestClearForceAcceptedExportTx(t *testing.T) { env := setup(t, &envConfig{ fork: latest, }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() genesisTx := getCreateTxFromGenesisTest(t, env.genesisBytes, "AVAX") - avaxID := genesisTx.ID() - key := keys[0] - assetID := avax.Asset{ID: avaxID} - tx := &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxID, - OutputIndex: 2, - }, - Asset: assetID, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }}, - }}, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: assetID, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - env.vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }}, - }} - require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + var ( + avaxID = genesisTx.ID() + assetID = avax.Asset{ID: avaxID} + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + to = key.PublicKey().Address() + changeAddr = to + ) + + tx, err := env.txBuilder.ExportTx( + constants.PlatformChainID, + to, // to + avaxID, + startBalance-env.vm.TxFee, + kc, + changeAddr, + ) + require.NoError(err) utxo := avax.UTXOID{ TxID: tx.ID(), @@ -836,7 +700,7 @@ func TestClearForceAcceptedExportTx(t *testing.T) { }, })) - _, err := peerSharedMemory.Get(env.vm.ctx.ChainID, [][]byte{utxoID[:]}) + _, err = peerSharedMemory.Get(env.vm.ctx.ChainID, [][]byte{utxoID[:]}) require.ErrorIs(err, database.ErrNotFound) env.vm.ctx.Lock.Unlock() diff --git a/vms/avm/wallet_service_test.go b/vms/avm/wallet_service_test.go index eeb214fba9bd..d4423bd31c7d 100644 --- a/vms/avm/wallet_service_test.go +++ b/vms/avm/wallet_service_test.go @@ -4,12 +4,14 @@ package avm import ( - "context" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/api" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/linked" + "github.com/ava-labs/avalanchego/vms/avm/txs" ) func TestWalletService_SendMultiple(t *testing.T) { @@ -28,10 +30,10 @@ func TestWalletService_SendMultiple(t *testing.T) { }) env.vm.ctx.Lock.Unlock() - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + walletService := &WalletService{ + vm: env.vm, + pendingTxs: linked.NewHashmap[ids.ID, *txs.Tx](), + } assetID := env.genesisTx.ID() addr := keys[0].PublicKey().Address() @@ -65,14 +67,14 @@ func TestWalletService_SendMultiple(t *testing.T) { }, } reply := &api.JSONTxIDChangeAddr{} - require.NoError(env.walletService.SendMultiple(nil, args, reply)) + require.NoError(walletService.SendMultiple(nil, args, reply)) require.Equal(changeAddrStr, reply.ChangeAddr) buildAndAccept(require, env.vm, env.issuer, reply.TxID) env.vm.ctx.Lock.Lock() - _, err = env.vm.state.GetTx(reply.TxID) + env.vm.ctx.Lock.Unlock() require.NoError(err) }) } From cd0c6e152965f44a2794935c41881fb207c8f78c Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 10 Jun 2024 12:46:35 -0400 Subject: [PATCH 055/102] Use netip.AddrPort rather than ips.IPPort (#3094) --- api/info/client.go | 5 +- api/info/service.go | 11 +- config/config.go | 8 +- genesis/bootstrappers.go | 6 +- message/mock_outbound_message_builder.go | 3 +- message/outbound_msg_builder.go | 17 +- nat/nat.go | 37 ++- nat/no_router.go | 23 +- nat/pmp.go | 8 +- nat/upnp.go | 14 +- network/config.go | 15 +- network/dialer/dialer.go | 6 +- network/dialer/dialer_test.go | 20 +- network/dialer_test.go | 27 +-- network/example_test.go | 3 +- network/ip_tracker_test.go | 2 +- network/listener_test.go | 11 +- network/network.go | 19 +- network/network_test.go | 41 ++-- network/peer/example_test.go | 11 +- network/peer/info.go | 5 +- network/peer/ip.go | 11 +- network/peer/ip_signer.go | 15 +- network/peer/ip_signer_test.go | 21 +- network/peer/ip_test.go | 26 +- network/peer/peer.go | 57 ++--- network/peer/peer_test.go | 8 +- network/peer/test_peer.go | 15 +- network/test_network.go | 228 ++++++++---------- .../inbound_conn_upgrade_throttler.go | 27 ++- .../inbound_conn_upgrade_throttler_test.go | 13 +- network/tracked_ip.go | 9 +- network/tracked_ip_test.go | 17 +- node/config.go | 6 +- node/node.go | 61 +++-- utils/atomic.go | 30 ++- utils/atomic_test.go | 45 ++++ utils/beacon/beacon.go | 11 +- utils/beacon/set.go | 22 +- utils/beacon/set_test.go | 27 +-- utils/dynamicip/ifconfig_resolver.go | 21 +- utils/dynamicip/opendns_resolver.go | 17 +- utils/dynamicip/resolver.go | 4 +- utils/dynamicip/updater.go | 24 +- utils/dynamicip/updater_test.go | 48 ++-- utils/ips/claimed_ip_port.go | 11 +- utils/ips/dynamic_ip_port.go | 56 ----- utils/ips/ip.go | 57 +++++ utils/ips/ip_port.go | 104 -------- utils/ips/ip_test.go | 176 -------------- utils/ips/lookup.go | 13 +- utils/ips/lookup_test.go | 12 +- 52 files changed, 662 insertions(+), 822 deletions(-) delete mode 100644 utils/ips/dynamic_ip_port.go create mode 100644 utils/ips/ip.go delete mode 100644 utils/ips/ip_port.go delete mode 100644 utils/ips/ip_test.go diff --git a/api/info/client.go b/api/info/client.go index 6caafd422233..15812cd5c213 100644 --- a/api/info/client.go +++ b/api/info/client.go @@ -5,6 +5,7 @@ package info import ( "context" + "net/netip" "time" "github.com/ava-labs/avalanchego/ids" @@ -19,7 +20,7 @@ var _ Client = (*client)(nil) type Client interface { GetNodeVersion(context.Context, ...rpc.Option) (*GetNodeVersionReply, error) GetNodeID(context.Context, ...rpc.Option) (ids.NodeID, *signer.ProofOfPossession, error) - GetNodeIP(context.Context, ...rpc.Option) (string, error) + GetNodeIP(context.Context, ...rpc.Option) (netip.AddrPort, error) GetNetworkID(context.Context, ...rpc.Option) (uint32, error) GetNetworkName(context.Context, ...rpc.Option) (string, error) GetBlockchainID(context.Context, string, ...rpc.Option) (ids.ID, error) @@ -54,7 +55,7 @@ func (c *client) GetNodeID(ctx context.Context, options ...rpc.Option) (ids.Node return res.NodeID, res.NodePOP, err } -func (c *client) GetNodeIP(ctx context.Context, options ...rpc.Option) (string, error) { +func (c *client) GetNodeIP(ctx context.Context, options ...rpc.Option) (netip.AddrPort, error) { res := &GetNodeIPReply{} err := c.requester.SendRequest(ctx, "info.getNodeIP", struct{}{}, res, options...) return res.IP, err diff --git a/api/info/service.go b/api/info/service.go index 929251d25aab..fd0117c5a088 100644 --- a/api/info/service.go +++ b/api/info/service.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "net/http" + "net/netip" "github.com/gorilla/rpc/v2" "go.uber.org/zap" @@ -17,8 +18,8 @@ import ( "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -37,7 +38,7 @@ type Info struct { Parameters log logging.Logger validators validators.Manager - myIP ips.DynamicIPPort + myIP *utils.Atomic[netip.AddrPort] networking network.Network chainManager chains.Manager vmManager vms.Manager @@ -67,7 +68,7 @@ func NewService( validators validators.Manager, chainManager chains.Manager, vmManager vms.Manager, - myIP ips.DynamicIPPort, + myIP *utils.Atomic[netip.AddrPort], network network.Network, benchlist benchlist.Manager, ) (http.Handler, error) { @@ -144,7 +145,7 @@ type GetNetworkIDReply struct { // GetNodeIPReply are the results from calling GetNodeIP type GetNodeIPReply struct { - IP string `json:"ip"` + IP netip.AddrPort `json:"ip"` } // GetNodeIP returns the IP of this node @@ -154,7 +155,7 @@ func (i *Info) GetNodeIP(_ *http.Request, _ *struct{}, reply *GetNodeIPReply) er zap.String("method", "getNodeIP"), ) - reply.IP = i.myIP.IPPort().String() + reply.IP = i.myIP.Get() return nil } diff --git a/config/config.go b/config/config.go index 44147b19dbd1..422eceedb6e9 100644 --- a/config/config.go +++ b/config/config.go @@ -450,7 +450,7 @@ func getStateSyncConfig(v *viper.Viper) (node.StateSyncConfig, error) { if ip == "" { continue } - addr, err := ips.ToIPPort(ip) + addr, err := ips.ParseAddrPort(ip) if err != nil { return node.StateSyncConfig{}, fmt.Errorf("couldn't parse state sync ip %s: %w", ip, err) } @@ -507,14 +507,13 @@ func getBootstrapConfig(v *viper.Viper, networkID uint32) (node.BootstrapConfig, if ip == "" { continue } - - addr, err := ips.ToIPPort(ip) + addr, err := ips.ParseAddrPort(ip) if err != nil { return node.BootstrapConfig{}, fmt.Errorf("couldn't parse bootstrap ip %s: %w", ip, err) } config.Bootstrappers = append(config.Bootstrappers, genesis.Bootstrapper{ // ID is populated below - IP: ips.IPDesc(addr), + IP: addr, }) } @@ -525,7 +524,6 @@ func getBootstrapConfig(v *viper.Viper, networkID uint32) (node.BootstrapConfig, if id == "" { continue } - nodeID, err := ids.NodeIDFromString(id) if err != nil { return node.BootstrapConfig{}, fmt.Errorf("couldn't parse bootstrap peer id %s: %w", id, err) diff --git a/genesis/bootstrappers.go b/genesis/bootstrappers.go index 4f39279ebfdd..e8bf95bc2c05 100644 --- a/genesis/bootstrappers.go +++ b/genesis/bootstrappers.go @@ -6,12 +6,12 @@ package genesis import ( "encoding/json" "fmt" + "net/netip" _ "embed" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/sampler" ) @@ -31,8 +31,8 @@ func init() { // Represents the relationship between the nodeID and the nodeIP. // The bootstrapper is sometimes called "anchor" or "beacon" node. type Bootstrapper struct { - ID ids.NodeID `json:"id"` - IP ips.IPDesc `json:"ip"` + ID ids.NodeID `json:"id"` + IP netip.AddrPort `json:"ip"` } // GetBootstrappers returns all default bootstrappers for the provided network. diff --git a/message/mock_outbound_message_builder.go b/message/mock_outbound_message_builder.go index cff8ed554caf..917d764028fe 100644 --- a/message/mock_outbound_message_builder.go +++ b/message/mock_outbound_message_builder.go @@ -10,6 +10,7 @@ package message import ( + netip "net/netip" reflect "reflect" time "time" @@ -283,7 +284,7 @@ func (mr *MockOutboundMsgBuilderMockRecorder) GetStateSummaryFrontier(arg0, arg1 } // Handshake mocks base method. -func (m *MockOutboundMsgBuilder) Handshake(arg0 uint32, arg1 uint64, arg2 ips.IPPort, arg3 string, arg4, arg5, arg6 uint32, arg7 uint64, arg8, arg9 []byte, arg10 []ids.ID, arg11, arg12 []uint32, arg13, arg14 []byte) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) Handshake(arg0 uint32, arg1 uint64, arg2 netip.AddrPort, arg3 string, arg4, arg5, arg6 uint32, arg7 uint64, arg8, arg9 []byte, arg10 []ids.ID, arg11, arg12 []uint32, arg13, arg14 []byte) (OutboundMessage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Handshake", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14) ret0, _ := ret[0].(OutboundMessage) diff --git a/message/outbound_msg_builder.go b/message/outbound_msg_builder.go index 1b02d8fa74f5..78aacdce3e08 100644 --- a/message/outbound_msg_builder.go +++ b/message/outbound_msg_builder.go @@ -4,6 +4,7 @@ package message import ( + "net/netip" "time" "github.com/ava-labs/avalanchego/ids" @@ -21,7 +22,7 @@ type OutboundMsgBuilder interface { Handshake( networkID uint32, myTime uint64, - ip ips.IPPort, + ip netip.AddrPort, client string, major uint32, minor uint32, @@ -228,7 +229,7 @@ func (b *outMsgBuilder) Pong() (OutboundMessage, error) { func (b *outMsgBuilder) Handshake( networkID uint32, myTime uint64, - ip ips.IPPort, + ip netip.AddrPort, client string, major uint32, minor uint32, @@ -244,14 +245,16 @@ func (b *outMsgBuilder) Handshake( ) (OutboundMessage, error) { subnetIDBytes := make([][]byte, len(trackedSubnets)) encodeIDs(trackedSubnets, subnetIDBytes) + // TODO: Use .AsSlice() after v1.12.x activates. + addr := ip.Addr().As16() return b.builder.createOutbound( &p2p.Message{ Message: &p2p.Message_Handshake{ Handshake: &p2p.Handshake{ NetworkId: networkID, MyTime: myTime, - IpAddr: ip.IP.To16(), - IpPort: uint32(ip.Port), + IpAddr: addr[:], + IpPort: uint32(ip.Port()), IpSigningTime: ipSigningTime, IpNodeIdSig: ipNodeIDSig, TrackedSubnets: subnetIDBytes, @@ -299,10 +302,12 @@ func (b *outMsgBuilder) GetPeerList( func (b *outMsgBuilder) PeerList(peers []*ips.ClaimedIPPort, bypassThrottling bool) (OutboundMessage, error) { claimIPPorts := make([]*p2p.ClaimedIpPort, len(peers)) for i, p := range peers { + // TODO: Use .AsSlice() after v1.12.x activates. + ip := p.AddrPort.Addr().As16() claimIPPorts[i] = &p2p.ClaimedIpPort{ X509Certificate: p.Cert.Raw, - IpAddr: p.IPPort.IP.To16(), - IpPort: uint32(p.IPPort.Port), + IpAddr: ip[:], + IpPort: uint32(p.AddrPort.Port()), Timestamp: p.Timestamp, Signature: p.Signature, TxId: ids.Empty[:], diff --git a/nat/nat.go b/nat/nat.go index a6e37078e7a6..28cdb1083eac 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -4,13 +4,13 @@ package nat import ( - "net" + "net/netip" "sync" "time" "go.uber.org/zap" - "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -29,7 +29,7 @@ type Router interface { // Undo a port mapping UnmapPort(intPort, extPort uint16) error // Return our external IP - ExternalIP() (net.IP, error) + ExternalIP() (netip.Addr, error) } // GetRouter returns a router on the current network. @@ -63,7 +63,13 @@ func NewPortMapper(log logging.Logger, r Router) *Mapper { // Map external port [extPort] (exposed to the internet) to internal port [intPort] (where our process is listening) // and set [ip]. Does this every [updateTime]. [ip] may be nil. -func (m *Mapper) Map(intPort, extPort uint16, desc string, ip ips.DynamicIPPort, updateTime time.Duration) { +func (m *Mapper) Map( + intPort uint16, + extPort uint16, + desc string, + ip *utils.Atomic[netip.AddrPort], + updateTime time.Duration, +) { if !m.r.SupportsNAT() { return } @@ -110,7 +116,13 @@ func (m *Mapper) retryMapPort(intPort, extPort uint16, desc string, timeout time // keepPortMapping runs in the background to keep a port mapped. It renews the mapping from [extPort] // to [intPort]] every [updateTime]. Updates [ip] every [updateTime]. -func (m *Mapper) keepPortMapping(intPort, extPort uint16, desc string, ip ips.DynamicIPPort, updateTime time.Duration) { +func (m *Mapper) keepPortMapping( + intPort uint16, + extPort uint16, + desc string, + ip *utils.Atomic[netip.AddrPort], + updateTime time.Duration, +) { updateTimer := time.NewTimer(updateTime) defer func(extPort uint16) { @@ -150,22 +162,25 @@ func (m *Mapper) keepPortMapping(intPort, extPort uint16, desc string, ip ips.Dy } } -func (m *Mapper) updateIP(ip ips.DynamicIPPort) { +func (m *Mapper) updateIP(ip *utils.Atomic[netip.AddrPort]) { if ip == nil { return } - newIP, err := m.r.ExternalIP() + newAddr, err := m.r.ExternalIP() if err != nil { m.log.Error("failed to get external IP", zap.Error(err), ) return } - oldIP := ip.IPPort().IP - ip.SetIP(newIP) - if !oldIP.Equal(newIP) { + oldAddrPort := ip.Get() + oldAddr := oldAddrPort.Addr() + if newAddr != oldAddr { + port := oldAddrPort.Port() + ip.Set(netip.AddrPortFrom(newAddr, port)) m.log.Info("external IP updated", - zap.Stringer("newIP", newIP), + zap.Stringer("oldIP", oldAddr), + zap.Stringer("newIP", newAddr), ) } } diff --git a/nat/no_router.go b/nat/no_router.go index 19c68dac5538..ebdf6015020c 100644 --- a/nat/no_router.go +++ b/nat/no_router.go @@ -6,6 +6,7 @@ package nat import ( "errors" "net" + "net/netip" "time" ) @@ -19,7 +20,7 @@ var ( const googleDNSServer = "8.8.8.8:80" type noRouter struct { - ip net.IP + ip netip.Addr ipErr error } @@ -35,26 +36,30 @@ func (noRouter) UnmapPort(uint16, uint16) error { return nil } -func (r noRouter) ExternalIP() (net.IP, error) { +func (r noRouter) ExternalIP() (netip.Addr, error) { return r.ip, r.ipErr } -func getOutboundIP() (net.IP, error) { +func getOutboundIP() (netip.Addr, error) { conn, err := net.Dial("udp", googleDNSServer) if err != nil { - return nil, err + return netip.Addr{}, err } - addr := conn.LocalAddr() + localAddr := conn.LocalAddr() if err := conn.Close(); err != nil { - return nil, err + return netip.Addr{}, err } - udpAddr, ok := addr.(*net.UDPAddr) + udpAddr, ok := localAddr.(*net.UDPAddr) if !ok { - return nil, errFetchingIP + return netip.Addr{}, errFetchingIP } - return udpAddr.IP, nil + addr := udpAddr.AddrPort().Addr() + if addr.Is4In6() { + addr = addr.Unmap() + } + return addr, nil } // NewNoRouter returns a router that assumes the network is public diff --git a/nat/pmp.go b/nat/pmp.go index ecee9793f934..c10bdbdbc4fa 100644 --- a/nat/pmp.go +++ b/nat/pmp.go @@ -6,7 +6,7 @@ package nat import ( "errors" "math" - "net" + "net/netip" "time" "github.com/jackpal/gateway" @@ -66,12 +66,12 @@ func (r *pmpRouter) UnmapPort(internalPort uint16, _ uint16) error { return err } -func (r *pmpRouter) ExternalIP() (net.IP, error) { +func (r *pmpRouter) ExternalIP() (netip.Addr, error) { response, err := r.client.GetExternalAddress() if err != nil { - return nil, err + return netip.Addr{}, err } - return response.ExternalIPAddress[:], nil + return netip.AddrFrom4(response.ExternalIPAddress), nil } func getPMPRouter() *pmpRouter { diff --git a/nat/upnp.go b/nat/upnp.go index d1aab02398b3..943017dc7560 100644 --- a/nat/upnp.go +++ b/nat/upnp.go @@ -7,11 +7,14 @@ import ( "fmt" "math" "net" + "net/netip" "time" "github.com/huin/goupnp" "github.com/huin/goupnp/dcps/internetgateway1" "github.com/huin/goupnp/dcps/internetgateway2" + + "github.com/ava-labs/avalanchego/utils/ips" ) const ( @@ -111,17 +114,12 @@ func (r *upnpRouter) localIP() (net.IP, error) { return nil, fmt.Errorf("couldn't find the local address in the same network as %s", deviceIP) } -func (r *upnpRouter) ExternalIP() (net.IP, error) { +func (r *upnpRouter) ExternalIP() (netip.Addr, error) { str, err := r.client.GetExternalIPAddress() if err != nil { - return nil, err - } - - ip := net.ParseIP(str) - if ip == nil { - return nil, fmt.Errorf("invalid IP %s", str) + return netip.Addr{}, err } - return ip, nil + return ips.ParseAddr(str) } func (r *upnpRouter) MapPort( diff --git a/network/config.go b/network/config.go index 3004a12bdc5b..de8eb44e14a0 100644 --- a/network/config.go +++ b/network/config.go @@ -6,6 +6,7 @@ package network import ( "crypto" "crypto/tls" + "net/netip" "time" "github.com/ava-labs/avalanchego/ids" @@ -14,9 +15,9 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/set" ) @@ -110,12 +111,12 @@ type Config struct { TLSKeyLogFile string `json:"tlsKeyLogFile"` - MyNodeID ids.NodeID `json:"myNodeID"` - MyIPPort ips.DynamicIPPort `json:"myIP"` - NetworkID uint32 `json:"networkID"` - MaxClockDifference time.Duration `json:"maxClockDifference"` - PingFrequency time.Duration `json:"pingFrequency"` - AllowPrivateIPs bool `json:"allowPrivateIPs"` + MyNodeID ids.NodeID `json:"myNodeID"` + MyIPPort *utils.Atomic[netip.AddrPort] `json:"myIP"` + NetworkID uint32 `json:"networkID"` + MaxClockDifference time.Duration `json:"maxClockDifference"` + PingFrequency time.Duration `json:"pingFrequency"` + AllowPrivateIPs bool `json:"allowPrivateIPs"` SupportedACPs set.Set[uint32] `json:"supportedACPs"` ObjectedACPs set.Set[uint32] `json:"objectedACPs"` diff --git a/network/dialer/dialer.go b/network/dialer/dialer.go index 109b63cc2002..2517184fedcc 100644 --- a/network/dialer/dialer.go +++ b/network/dialer/dialer.go @@ -7,12 +7,12 @@ import ( "context" "fmt" "net" + "net/netip" "time" "go.uber.org/zap" "github.com/ava-labs/avalanchego/network/throttling" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -22,7 +22,7 @@ var _ Dialer = (*dialer)(nil) type Dialer interface { // If [ctx] is canceled, gives up trying to connect to [ip] // and returns an error. - Dial(ctx context.Context, ip ips.IPPort) (net.Conn, error) + Dial(ctx context.Context, ip netip.AddrPort) (net.Conn, error) } type dialer struct { @@ -62,7 +62,7 @@ func NewDialer(network string, dialerConfig Config, log logging.Logger) Dialer { } } -func (d *dialer) Dial(ctx context.Context, ip ips.IPPort) (net.Conn, error) { +func (d *dialer) Dial(ctx context.Context, ip netip.AddrPort) (net.Conn, error) { if err := d.throttler.Acquire(ctx); err != nil { return nil, err } diff --git a/network/dialer/dialer_test.go b/network/dialer/dialer_test.go index a824b8b03e08..01b3f640667b 100644 --- a/network/dialer/dialer_test.go +++ b/network/dialer/dialer_test.go @@ -6,14 +6,12 @@ package dialer import ( "context" "net" - "strconv" - "strings" + "net/netip" "testing" "time" "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -22,7 +20,11 @@ import ( func TestDialerCancelDial(t *testing.T) { require := require.New(t) - l, err := net.Listen("tcp", "127.0.0.1:") + listenAddrPort := netip.AddrPortFrom( + netip.AddrFrom4([4]byte{127, 0, 0, 1}), + 0, + ) + l, err := net.Listen("tcp", listenAddrPort.String()) require.NoError(err) done := make(chan struct{}) @@ -43,12 +45,8 @@ func TestDialerCancelDial(t *testing.T) { } }() - port, err := strconv.Atoi(strings.Split(l.Addr().String(), ":")[1]) + listenedAddrPort, err := netip.ParseAddrPort(l.Addr().String()) require.NoError(err) - myIP := ips.IPPort{ - IP: net.ParseIP("127.0.0.1"), - Port: uint16(port), - } // Create a dialer dialer := NewDialer( @@ -63,11 +61,11 @@ func TestDialerCancelDial(t *testing.T) { // Make an outgoing connection with a cancelled context ctx, cancel := context.WithCancel(context.Background()) cancel() - _, err = dialer.Dial(ctx, myIP) + _, err = dialer.Dial(ctx, listenedAddrPort) require.ErrorIs(err, context.Canceled) // Make an outgoing connection with a non-cancelled context - conn, err := dialer.Dial(context.Background(), myIP) + conn, err := dialer.Dial(context.Background(), listenedAddrPort) require.NoError(err) _ = conn.Close() diff --git a/network/dialer_test.go b/network/dialer_test.go index 7a60d056d66d..d1567d20ad1b 100644 --- a/network/dialer_test.go +++ b/network/dialer_test.go @@ -7,9 +7,9 @@ import ( "context" "errors" "net" + "net/netip" "github.com/ava-labs/avalanchego/network/dialer" - "github.com/ava-labs/avalanchego/utils/ips" ) var ( @@ -20,33 +20,32 @@ var ( type testDialer struct { // maps [ip.String] to a listener - listeners map[string]*testListener + listeners map[netip.AddrPort]*testListener } func newTestDialer() *testDialer { return &testDialer{ - listeners: make(map[string]*testListener), + listeners: make(map[netip.AddrPort]*testListener), } } -func (d *testDialer) NewListener() (ips.DynamicIPPort, *testListener) { +func (d *testDialer) NewListener() (netip.AddrPort, *testListener) { // Uses a private IP to easily enable testing AllowPrivateIPs - ip := ips.NewDynamicIPPort( - net.IPv4(10, 0, 0, 0), + addrPort := netip.AddrPortFrom( + netip.AddrFrom4([4]byte{10, 0, 0, 0}), uint16(len(d.listeners)+1), ) - staticIP := ip.IPPort() - listener := newTestListener(staticIP) - d.AddListener(staticIP, listener) - return ip, listener + listener := newTestListener(addrPort) + d.AddListener(addrPort, listener) + return addrPort, listener } -func (d *testDialer) AddListener(ip ips.IPPort, listener *testListener) { - d.listeners[ip.String()] = listener +func (d *testDialer) AddListener(ip netip.AddrPort, listener *testListener) { + d.listeners[ip] = listener } -func (d *testDialer) Dial(ctx context.Context, ip ips.IPPort) (net.Conn, error) { - listener, ok := d.listeners[ip.String()] +func (d *testDialer) Dial(ctx context.Context, ip netip.AddrPort) (net.Conn, error) { + listener, ok := d.listeners[ip] if !ok { return nil, errRefused } diff --git a/network/example_test.go b/network/example_test.go index bfac03fba44f..0fef075f8101 100644 --- a/network/example_test.go +++ b/network/example_test.go @@ -16,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" @@ -110,7 +109,7 @@ func ExampleNewTestNetwork() { // gossip will enable connecting to all the remaining nodes in the network. bootstrappers := genesis.SampleBootstrappers(constants.FujiID, 5) for _, bootstrapper := range bootstrappers { - network.ManuallyTrack(bootstrapper.ID, ips.IPPort(bootstrapper.IP)) + network.ManuallyTrack(bootstrapper.ID, bootstrapper.IP) } // Typically network.StartClose() should be called based on receiving a diff --git a/network/ip_tracker_test.go b/network/ip_tracker_test.go index edae70de5b98..bbfbdb958773 100644 --- a/network/ip_tracker_test.go +++ b/network/ip_tracker_test.go @@ -25,7 +25,7 @@ func newTestIPTracker(t *testing.T) *ipTracker { func newerTestIP(ip *ips.ClaimedIPPort) *ips.ClaimedIPPort { return ips.NewClaimedIPPort( ip.Cert, - ip.IPPort, + ip.AddrPort, ip.Timestamp+1, ip.Signature, ) diff --git a/network/listener_test.go b/network/listener_test.go index 5d6073c6b383..a0167e817baa 100644 --- a/network/listener_test.go +++ b/network/listener_test.go @@ -5,19 +5,18 @@ package network import ( "net" - - "github.com/ava-labs/avalanchego/utils/ips" + "net/netip" ) var _ net.Listener = (*testListener)(nil) type testListener struct { - ip ips.IPPort + ip netip.AddrPort inbound chan net.Conn closed chan struct{} } -func newTestListener(ip ips.IPPort) *testListener { +func newTestListener(ip netip.AddrPort) *testListener { return &testListener{ ip: ip, inbound: make(chan net.Conn), @@ -41,7 +40,7 @@ func (l *testListener) Close() error { func (l *testListener) Addr() net.Addr { return &net.TCPAddr{ - IP: l.ip.IP, - Port: int(l.ip.Port), + IP: l.ip.Addr().AsSlice(), + Port: int(l.ip.Port()), } } diff --git a/network/network.go b/network/network.go index 51ded9b8cf91..2aee13a910d9 100644 --- a/network/network.go +++ b/network/network.go @@ -9,6 +9,7 @@ import ( "fmt" "math" "net" + "net/netip" "strings" "sync" "sync/atomic" @@ -77,7 +78,7 @@ type Network interface { // Attempt to connect to this IP. The network will never stop attempting to // connect to this ID. - ManuallyTrack(nodeID ids.NodeID, ip ips.IPPort) + ManuallyTrack(nodeID ids.NodeID, ip netip.AddrPort) // PeerInfo returns information about peers. If [nodeIDs] is empty, returns // info about all peers that have finished the handshake. Otherwise, returns @@ -448,7 +449,7 @@ func (n *network) Connected(nodeID ids.NodeID) { peerIP := peer.IP() newIP := ips.NewClaimedIPPort( peer.Cert(), - peerIP.IPPort, + peerIP.AddrPort, peerIP.Timestamp, peerIP.TLSSignature, ) @@ -548,7 +549,7 @@ func (n *network) Dispatch() error { // call this function inside the go-routine, rather than the main // accept loop. remoteAddr := conn.RemoteAddr().String() - ip, err := ips.ToIPPort(remoteAddr) + ip, err := ips.ParseAddrPort(remoteAddr) if err != nil { n.peerConfig.Log.Error("failed to parse remote address", zap.String("peerIP", remoteAddr), @@ -597,7 +598,7 @@ func (n *network) Dispatch() error { return errs.Err } -func (n *network) ManuallyTrack(nodeID ids.NodeID, ip ips.IPPort) { +func (n *network) ManuallyTrack(nodeID ids.NodeID, ip netip.AddrPort) { n.ipTracker.ManuallyTrack(nodeID) n.peersLock.Lock() @@ -637,7 +638,7 @@ func (n *network) track(ip *ips.ClaimedIPPort) error { // lock. signedIP := peer.SignedIP{ UnsignedIP: peer.UnsignedIP{ - IPPort: ip.IPPort, + AddrPort: ip.AddrPort, Timestamp: ip.Timestamp, }, TLSSignature: ip.Signature, @@ -663,9 +664,9 @@ func (n *network) track(ip *ips.ClaimedIPPort) error { tracked, isTracked := n.trackedIPs[ip.NodeID] if isTracked { // Stop tracking the old IP and start tracking the new one. - tracked = tracked.trackNewIP(ip.IPPort) + tracked = tracked.trackNewIP(ip.AddrPort) } else { - tracked = newTrackedIP(ip.IPPort) + tracked = newTrackedIP(ip.AddrPort) } n.trackedIPs[ip.NodeID] = tracked n.dial(ip.NodeID, tracked) @@ -798,7 +799,7 @@ func (n *network) disconnectedFromConnected(peer peer.Peer, nodeID ids.NodeID) { // The peer that is disconnecting from us finished the handshake if ip, wantsConnection := n.ipTracker.GetIP(nodeID); wantsConnection { - tracked := newTrackedIP(ip.IPPort) + tracked := newTrackedIP(ip.AddrPort) n.trackedIPs[nodeID] = tracked n.dial(nodeID, tracked) } @@ -898,7 +899,7 @@ func (n *network) dial(nodeID ids.NodeID, ip *trackedIP) { // nodeID leaves the validator set. This is why we continue the loop // rather than returning even though we will never initiate an // outbound connection with this IP. - if !n.config.AllowPrivateIPs && ip.ip.IP.IsPrivate() { + if !n.config.AllowPrivateIPs && !ips.IsPublic(ip.ip.Addr()) { n.peerConfig.Log.Verbo("skipping connection dial", zap.String("reason", "outbound connections to private IPs are prohibited"), zap.Stringer("nodeID", nodeID), diff --git a/network/network_test.go b/network/network_test.go index 5ae2cef5af3e..85390da90dff 100644 --- a/network/network_test.go +++ b/network/network_test.go @@ -6,7 +6,7 @@ package network import ( "context" "crypto" - "net" + "net/netip" "sync" "testing" "time" @@ -26,6 +26,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" @@ -178,7 +179,7 @@ func newTestNetwork(t *testing.T, count int) (*testDialer, []*testListener, []id config := defaultConfig config.TLSConfig = peer.TLSConfig(*tlsCert, nil) config.MyNodeID = nodeID - config.MyIPPort = ip + config.MyIPPort = utils.NewAtomic(ip) config.TLSKey = tlsCert.PrivateKey.(crypto.Signer) config.BLSKey = blsKey @@ -279,7 +280,7 @@ func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler for i, net := range networks { if i != 0 { config := configs[0] - net.ManuallyTrack(config.MyNodeID, config.MyIPPort.IPPort()) + net.ManuallyTrack(config.MyNodeID, config.MyIPPort.Get()) } go func(net Network) { @@ -418,10 +419,10 @@ func TestTrackVerifiesSignatures(t *testing.T) { err = network.Track([]*ips.ClaimedIPPort{ ips.NewClaimedIPPort( stakingCert, - ips.IPPort{ - IP: net.IPv4(123, 132, 123, 123), - Port: 10000, - }, + netip.AddrPortFrom( + netip.AddrFrom4([4]byte{123, 132, 123, 123}), + 10000, + ), 1000, // timestamp nil, // signature ), @@ -487,7 +488,7 @@ func TestTrackDoesNotDialPrivateIPs(t *testing.T) { for i, net := range networks { if i != 0 { config := configs[0] - net.ManuallyTrack(config.MyNodeID, config.MyIPPort.IPPort()) + net.ManuallyTrack(config.MyNodeID, config.MyIPPort.Get()) } go func(net Network) { @@ -576,7 +577,7 @@ func TestDialDeletesNonValidators(t *testing.T) { require.NoError(net.Track([]*ips.ClaimedIPPort{ ips.NewClaimedIPPort( stakingCert, - ip.IPPort, + ip.AddrPort, ip.Timestamp, ip.TLSSignature, ), @@ -628,23 +629,23 @@ func TestDialContext(t *testing.T) { neverDialedNodeID = ids.GenerateTestNodeID() dialedNodeID = ids.GenerateTestNodeID() - dynamicNeverDialedIP, neverDialedListener = dialer.NewListener() - dynamicDialedIP, dialedListener = dialer.NewListener() + neverDialedIP, neverDialedListener = dialer.NewListener() + dialedIP, dialedListener = dialer.NewListener() - neverDialedIP = &trackedIP{ - ip: dynamicNeverDialedIP.IPPort(), + neverDialedTrackedIP = &trackedIP{ + ip: neverDialedIP, } - dialedIP = &trackedIP{ - ip: dynamicDialedIP.IPPort(), + dialedTrackedIP = &trackedIP{ + ip: dialedIP, } ) - network.ManuallyTrack(neverDialedNodeID, neverDialedIP.ip) - network.ManuallyTrack(dialedNodeID, dialedIP.ip) + network.ManuallyTrack(neverDialedNodeID, neverDialedIP) + network.ManuallyTrack(dialedNodeID, dialedIP) // Sanity check that when a non-cancelled context is given, // we actually dial the peer. - network.dial(dialedNodeID, dialedIP) + network.dial(dialedNodeID, dialedTrackedIP) gotDialedIPConn := make(chan struct{}) go func() { @@ -656,7 +657,7 @@ func TestDialContext(t *testing.T) { // Asset that when [n.onCloseCtx] is cancelled, dial returns immediately. // That is, [neverDialedListener] doesn't accept a connection. network.onCloseCtxCancel() - network.dial(neverDialedNodeID, neverDialedIP) + network.dial(neverDialedNodeID, neverDialedTrackedIP) gotNeverDialedIPConn := make(chan struct{}) go func() { @@ -718,7 +719,7 @@ func TestAllowConnectionAsAValidator(t *testing.T) { for i, net := range networks { if i != 0 { config := configs[0] - net.ManuallyTrack(config.MyNodeID, config.MyIPPort.IPPort()) + net.ManuallyTrack(config.MyNodeID, config.MyIPPort.Get()) } go func(net Network) { diff --git a/network/peer/example_test.go b/network/peer/example_test.go index d6c8ba20c913..59e5268fb623 100644 --- a/network/peer/example_test.go +++ b/network/peer/example_test.go @@ -6,13 +6,12 @@ package peer import ( "context" "fmt" - "net" + "net/netip" "time" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/ips" ) func ExampleStartTestPeer() { @@ -20,10 +19,10 @@ func ExampleStartTestPeer() { ctx, cancel := context.WithTimeout(ctx, 15*time.Second) defer cancel() - peerIP := ips.IPPort{ - IP: net.IPv6loopback, - Port: 9651, - } + peerIP := netip.AddrPortFrom( + netip.IPv6Loopback(), + 9651, + ) peer, err := StartTestPeer( ctx, peerIP, diff --git a/network/peer/info.go b/network/peer/info.go index 00ccaec7953b..928c47ff26ee 100644 --- a/network/peer/info.go +++ b/network/peer/info.go @@ -4,6 +4,7 @@ package peer import ( + "net/netip" "time" "github.com/ava-labs/avalanchego/ids" @@ -12,8 +13,8 @@ import ( ) type Info struct { - IP string `json:"ip"` - PublicIP string `json:"publicIP,omitempty"` + IP netip.AddrPort `json:"ip"` + PublicIP netip.AddrPort `json:"publicIP,omitempty"` ID ids.NodeID `json:"nodeID"` Version string `json:"version"` LastSent time.Time `json:"lastSent"` diff --git a/network/peer/ip.go b/network/peer/ip.go index a873f1668d6a..443396d344d2 100644 --- a/network/peer/ip.go +++ b/network/peer/ip.go @@ -8,12 +8,13 @@ import ( "crypto/rand" "errors" "fmt" + "net" + "net/netip" "time" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -26,7 +27,7 @@ var ( // ensure that the most updated IP claim is tracked by peers for a given // validator. type UnsignedIP struct { - ips.IPPort + AddrPort netip.AddrPort Timestamp uint64 } @@ -49,9 +50,11 @@ func (ip *UnsignedIP) Sign(tlsSigner crypto.Signer, blsSigner *bls.SecretKey) (* func (ip *UnsignedIP) bytes() []byte { p := wrappers.Packer{ - Bytes: make([]byte, ips.IPPortLen+wrappers.LongLen), + Bytes: make([]byte, net.IPv6len+wrappers.ShortLen+wrappers.LongLen), } - ips.PackIP(&p, ip.IPPort) + addrBytes := ip.AddrPort.Addr().As16() + p.PackFixedBytes(addrBytes[:]) + p.PackShort(ip.AddrPort.Port()) p.PackLong(ip.Timestamp) return p.Bytes } diff --git a/network/peer/ip_signer.go b/network/peer/ip_signer.go index 1c38d4e67528..1053cfce3e62 100644 --- a/network/peer/ip_signer.go +++ b/network/peer/ip_signer.go @@ -5,16 +5,17 @@ package peer import ( "crypto" + "net/netip" "sync" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) // IPSigner will return a signedIP for the current value of our dynamic IP. type IPSigner struct { - ip ips.DynamicIPPort + ip *utils.Atomic[netip.AddrPort] clock mockable.Clock tlsSigner crypto.Signer blsSigner *bls.SecretKey @@ -27,7 +28,7 @@ type IPSigner struct { } func NewIPSigner( - ip ips.DynamicIPPort, + ip *utils.Atomic[netip.AddrPort], tlsSigner crypto.Signer, blsSigner *bls.SecretKey, ) *IPSigner { @@ -49,8 +50,8 @@ func (s *IPSigner) GetSignedIP() (*SignedIP, error) { s.signedIPLock.RLock() signedIP := s.signedIP s.signedIPLock.RUnlock() - ip := s.ip.IPPort() - if signedIP != nil && signedIP.IPPort.Equal(ip) { + ip := s.ip.Get() + if signedIP != nil && signedIP.AddrPort == ip { return signedIP, nil } @@ -62,13 +63,13 @@ func (s *IPSigner) GetSignedIP() (*SignedIP, error) { // same time, we should verify that we are the first thread to attempt to // update it. signedIP = s.signedIP - if signedIP != nil && signedIP.IPPort.Equal(ip) { + if signedIP != nil && signedIP.AddrPort == ip { return signedIP, nil } // We should now sign our new IP at the current timestamp. unsignedIP := UnsignedIP{ - IPPort: ip, + AddrPort: ip, Timestamp: s.clock.Unix(), } signedIP, err := unsignedIP.Sign(s.tlsSigner, s.blsSigner) diff --git a/network/peer/ip_signer_test.go b/network/peer/ip_signer_test.go index 315becd8f082..cff9b2cbbda2 100644 --- a/network/peer/ip_signer_test.go +++ b/network/peer/ip_signer_test.go @@ -5,24 +5,24 @@ package peer import ( "crypto" - "net" + "net/netip" "testing" "time" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" ) func TestIPSigner(t *testing.T) { require := require.New(t) - dynIP := ips.NewDynamicIPPort( - net.IPv6loopback, + dynIP := utils.NewAtomic(netip.AddrPortFrom( + netip.IPv6Loopback(), 0, - ) + )) tlsCert, err := staking.NewTLSCert() require.NoError(err) @@ -37,22 +37,25 @@ func TestIPSigner(t *testing.T) { signedIP1, err := s.GetSignedIP() require.NoError(err) - require.Equal(dynIP.IPPort(), signedIP1.IPPort) + require.Equal(dynIP.Get(), signedIP1.AddrPort) require.Equal(uint64(10), signedIP1.Timestamp) s.clock.Set(time.Unix(11, 0)) signedIP2, err := s.GetSignedIP() require.NoError(err) - require.Equal(dynIP.IPPort(), signedIP2.IPPort) + require.Equal(dynIP.Get(), signedIP2.AddrPort) require.Equal(uint64(10), signedIP2.Timestamp) require.Equal(signedIP1.TLSSignature, signedIP2.TLSSignature) - dynIP.SetIP(net.IPv4(1, 2, 3, 4)) + dynIP.Set(netip.AddrPortFrom( + netip.AddrFrom4([4]byte{1, 2, 3, 4}), + dynIP.Get().Port(), + )) signedIP3, err := s.GetSignedIP() require.NoError(err) - require.Equal(dynIP.IPPort(), signedIP3.IPPort) + require.Equal(dynIP.Get(), signedIP3.AddrPort) require.Equal(uint64(11), signedIP3.Timestamp) require.NotEqual(signedIP2.TLSSignature, signedIP3.TLSSignature) } diff --git a/network/peer/ip_test.go b/network/peer/ip_test.go index 142d675ecfaa..4c3f62d27694 100644 --- a/network/peer/ip_test.go +++ b/network/peer/ip_test.go @@ -5,7 +5,7 @@ package peer import ( "crypto" - "net" + "net/netip" "testing" "time" @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" ) func TestSignedIpVerify(t *testing.T) { @@ -31,6 +30,10 @@ func TestSignedIpVerify(t *testing.T) { require.NoError(t, err) now := time.Now() + addrPort := netip.AddrPortFrom( + netip.AddrFrom4([4]byte{1, 2, 3, 4}), + 1, + ) type test struct { name string @@ -49,10 +52,7 @@ func TestSignedIpVerify(t *testing.T) { blsSigner: blsKey1, expectedCert: cert1, ip: UnsignedIP{ - IPPort: ips.IPPort{ - IP: net.IPv4(1, 2, 3, 4), - Port: 1, - }, + AddrPort: addrPort, Timestamp: uint64(now.Unix()) - 1, }, maxTimestamp: now, @@ -64,10 +64,7 @@ func TestSignedIpVerify(t *testing.T) { blsSigner: blsKey1, expectedCert: cert1, ip: UnsignedIP{ - IPPort: ips.IPPort{ - IP: net.IPv4(1, 2, 3, 4), - Port: 1, - }, + AddrPort: addrPort, Timestamp: uint64(now.Unix()), }, maxTimestamp: now, @@ -79,10 +76,7 @@ func TestSignedIpVerify(t *testing.T) { blsSigner: blsKey1, expectedCert: cert1, ip: UnsignedIP{ - IPPort: ips.IPPort{ - IP: net.IPv4(1, 2, 3, 4), - Port: 1, - }, + AddrPort: addrPort, Timestamp: uint64(now.Unix()) + 1, }, maxTimestamp: now, @@ -94,10 +88,6 @@ func TestSignedIpVerify(t *testing.T) { blsSigner: blsKey1, expectedCert: cert2, // note this isn't cert1 ip: UnsignedIP{ - IPPort: ips.IPPort{ - IP: net.IPv4(1, 2, 3, 4), - Port: 1, - }, Timestamp: uint64(now.Unix()), }, maxTimestamp: now, diff --git a/network/peer/peer.go b/network/peer/peer.go index a87bca708544..a92791ff72ee 100644 --- a/network/peer/peer.go +++ b/network/peer/peer.go @@ -10,6 +10,7 @@ import ( "io" "math" "net" + "net/netip" "sync" "sync/atomic" "time" @@ -269,11 +270,6 @@ func (p *peer) AwaitReady(ctx context.Context) error { } func (p *peer) Info() Info { - publicIPStr := "" - if !p.ip.IsZero() { - publicIPStr = p.ip.IPPort.String() - } - uptimes := make(map[ids.ID]json.Uint32, p.MySubnets.Len()) for subnetID := range p.MySubnets { uptime, exist := p.ObservedUptime(subnetID) @@ -288,9 +284,10 @@ func (p *peer) Info() Info { primaryUptime = 0 } + ip, _ := ips.ParseAddrPort(p.conn.RemoteAddr().String()) return Info{ - IP: p.conn.RemoteAddr().String(), - PublicIP: publicIPStr, + IP: ip, + PublicIP: p.ip.AddrPort, ID: p.id, Version: p.version.String(), LastSent: p.LastSent(), @@ -526,10 +523,10 @@ func (p *peer) writeMessages() { ) return } - if mySignedIP.Port == 0 { + if port := mySignedIP.AddrPort.Port(); port == 0 { p.Log.Error("signed IP has invalid port", zap.Stringer("nodeID", p.id), - zap.Uint16("port", mySignedIP.Port), + zap.Uint16("port", port), ) return } @@ -540,7 +537,7 @@ func (p *peer) writeMessages() { msg, err := p.MessageCreator.Handshake( p.NetworkID, p.Clock.Unix(), - mySignedIP.IPPort, + mySignedIP.AddrPort, myVersion.Name, uint32(myVersion.Major), uint32(myVersion.Minor), @@ -731,7 +728,7 @@ func (p *peer) shouldDisconnect() bool { return true } - // Avoid unnecessary signature verifications by only verifing the signature + // Avoid unnecessary signature verifications by only verifying the signature // once per validation period. p.txIDOfVerifiedBLSKey = vdr.TxID return false @@ -1038,23 +1035,25 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { } } - // "net.IP" type in Golang is 16-byte - if ipLen := len(msg.IpAddr); ipLen != net.IPv6len { + addr, ok := ips.AddrFromSlice(msg.IpAddr) + if !ok { p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), zap.String("field", "ip"), - zap.Int("ipLen", ipLen), + zap.Int("ipLen", len(msg.IpAddr)), ) p.StartClose() return } + + port := uint16(msg.IpPort) if msg.IpPort == 0 { p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), zap.String("field", "port"), - zap.Uint32("port", msg.IpPort), + zap.Uint16("port", port), ) p.StartClose() return @@ -1062,10 +1061,10 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { p.ip = &SignedIP{ UnsignedIP: UnsignedIP{ - IPPort: ips.IPPort{ - IP: msg.IpAddr, - Port: uint16(msg.IpPort), - }, + AddrPort: netip.AddrPortFrom( + addr, + port, + ), Timestamp: msg.IpSigningTime, }, TLSSignature: msg.IpNodeIdSig, @@ -1224,23 +1223,25 @@ func (p *peer) handlePeerList(msg *p2p.PeerList) { return } - // "net.IP" type in Golang is 16-byte - if ipLen := len(claimedIPPort.IpAddr); ipLen != net.IPv6len { + addr, ok := ips.AddrFromSlice(claimedIPPort.IpAddr) + if !ok { p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), zap.String("field", "ip"), - zap.Int("ipLen", ipLen), + zap.Int("ipLen", len(claimedIPPort.IpAddr)), ) p.StartClose() return } - if claimedIPPort.IpPort == 0 { + + port := uint16(claimedIPPort.IpPort) + if port == 0 { p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), zap.String("field", "port"), - zap.Uint32("port", claimedIPPort.IpPort), + zap.Uint16("port", port), ) p.StartClose() return @@ -1248,10 +1249,10 @@ func (p *peer) handlePeerList(msg *p2p.PeerList) { discoveredIPs[i] = ips.NewClaimedIPPort( tlsCert, - ips.IPPort{ - IP: claimedIPPort.IpAddr, - Port: uint16(claimedIPPort.IpPort), - }, + netip.AddrPortFrom( + addr, + port, + ), claimedIPPort.Timestamp, claimedIPPort.Signature, ) diff --git a/network/peer/peer_test.go b/network/peer/peer_test.go index 4a0399bc3a1e..e29edbe17ba6 100644 --- a/network/peer/peer_test.go +++ b/network/peer/peer_test.go @@ -7,6 +7,7 @@ import ( "context" "crypto" "net" + "net/netip" "testing" "time" @@ -22,9 +23,9 @@ import ( "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" @@ -106,7 +107,10 @@ func newRawTestPeer(t *testing.T, config Config) *rawTestPeer { require.NoError(err) nodeID := ids.NodeIDFromCert(cert) - ip := ips.NewDynamicIPPort(net.IPv6loopback, 1) + ip := utils.NewAtomic(netip.AddrPortFrom( + netip.IPv6Loopback(), + 1, + )) tls := tlsCert.PrivateKey.(crypto.Signer) bls, err := bls.NewSecretKey() require.NoError(err) diff --git a/network/peer/test_peer.go b/network/peer/test_peer.go index a4df06b72ee0..ae03594f8e67 100644 --- a/network/peer/test_peer.go +++ b/network/peer/test_peer.go @@ -7,6 +7,7 @@ import ( "context" "crypto" "net" + "net/netip" "time" "github.com/prometheus/client_golang/prometheus" @@ -19,9 +20,9 @@ import ( "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" @@ -47,7 +48,7 @@ const maxMessageToSend = 1024 // peer. func StartTestPeer( ctx context.Context, - ip ips.IPPort, + ip netip.AddrPort, networkID uint32, router router.InboundHandler, ) (Peer, error) { @@ -98,7 +99,6 @@ func StartTestPeer( return nil, err } - signerIP := ips.NewDynamicIPPort(net.IPv6zero, 1) tlsKey := tlsCert.PrivateKey.(crypto.Signer) blsKey, err := bls.NewSecretKey() if err != nil { @@ -123,7 +123,14 @@ func StartTestPeer( MaxClockDifference: time.Minute, ResourceTracker: resourceTracker, UptimeCalculator: uptime.NoOpCalculator, - IPSigner: NewIPSigner(signerIP, tlsKey, blsKey), + IPSigner: NewIPSigner( + utils.NewAtomic(netip.AddrPortFrom( + netip.IPv6Loopback(), + 1, + )), + tlsKey, + blsKey, + ), }, conn, cert, diff --git a/network/test_network.go b/network/test_network.go index 6a6bcdfcc08c..8644eb359ae1 100644 --- a/network/test_network.go +++ b/network/test_network.go @@ -8,6 +8,7 @@ import ( "errors" "math" "net" + "net/netip" "runtime" "sync" @@ -24,9 +25,9 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" @@ -89,114 +90,20 @@ func NewTestNetwork( return nil, err } - networkConfig := Config{ - ThrottlerConfig: ThrottlerConfig{ - InboundConnUpgradeThrottlerConfig: throttling.InboundConnUpgradeThrottlerConfig{ - UpgradeCooldown: constants.DefaultInboundConnUpgradeThrottlerCooldown, - MaxRecentConnsUpgraded: int(math.Ceil(constants.DefaultInboundThrottlerMaxConnsPerSec * constants.DefaultInboundConnUpgradeThrottlerCooldown.Seconds())), - }, - - InboundMsgThrottlerConfig: throttling.InboundMsgThrottlerConfig{ - MsgByteThrottlerConfig: throttling.MsgByteThrottlerConfig{ - VdrAllocSize: constants.DefaultInboundThrottlerVdrAllocSize, - AtLargeAllocSize: constants.DefaultInboundThrottlerAtLargeAllocSize, - NodeMaxAtLargeBytes: constants.DefaultInboundThrottlerNodeMaxAtLargeBytes, - }, - - BandwidthThrottlerConfig: throttling.BandwidthThrottlerConfig{ - RefillRate: constants.DefaultInboundThrottlerBandwidthRefillRate, - MaxBurstSize: constants.DefaultInboundThrottlerBandwidthMaxBurstSize, - }, - - CPUThrottlerConfig: throttling.SystemThrottlerConfig{ - MaxRecheckDelay: constants.DefaultInboundThrottlerCPUMaxRecheckDelay, - }, - - DiskThrottlerConfig: throttling.SystemThrottlerConfig{ - MaxRecheckDelay: constants.DefaultInboundThrottlerDiskMaxRecheckDelay, - }, - - MaxProcessingMsgsPerNode: constants.DefaultInboundThrottlerMaxProcessingMsgsPerNode, - }, - OutboundMsgThrottlerConfig: throttling.MsgByteThrottlerConfig{ - VdrAllocSize: constants.DefaultOutboundThrottlerVdrAllocSize, - AtLargeAllocSize: constants.DefaultOutboundThrottlerAtLargeAllocSize, - NodeMaxAtLargeBytes: constants.DefaultOutboundThrottlerNodeMaxAtLargeBytes, - }, - - MaxInboundConnsPerSec: constants.DefaultInboundThrottlerMaxConnsPerSec, - }, - - HealthConfig: HealthConfig{ - Enabled: true, - MinConnectedPeers: constants.DefaultNetworkHealthMinPeers, - MaxTimeSinceMsgReceived: constants.DefaultNetworkHealthMaxTimeSinceMsgReceived, - MaxTimeSinceMsgSent: constants.DefaultNetworkHealthMaxTimeSinceMsgSent, - MaxPortionSendQueueBytesFull: constants.DefaultNetworkHealthMaxPortionSendQueueFill, - MaxSendFailRate: constants.DefaultNetworkHealthMaxSendFailRate, - SendFailRateHalflife: constants.DefaultHealthCheckAveragerHalflife, - }, - - ProxyEnabled: constants.DefaultNetworkTCPProxyEnabled, - ProxyReadHeaderTimeout: constants.DefaultNetworkTCPProxyReadTimeout, - - DialerConfig: dialer.Config{ - ThrottleRps: constants.DefaultOutboundConnectionThrottlingRps, - ConnectionTimeout: constants.DefaultOutboundConnectionTimeout, - }, - - TimeoutConfig: TimeoutConfig{ - PingPongTimeout: constants.DefaultPingPongTimeout, - ReadHandshakeTimeout: constants.DefaultNetworkReadHandshakeTimeout, - }, - - PeerListGossipConfig: PeerListGossipConfig{ - PeerListNumValidatorIPs: constants.DefaultNetworkPeerListNumValidatorIPs, - PeerListPullGossipFreq: constants.DefaultNetworkPeerListPullGossipFreq, - PeerListBloomResetFreq: constants.DefaultNetworkPeerListBloomResetFreq, - }, - - DelayConfig: DelayConfig{ - InitialReconnectDelay: constants.DefaultNetworkInitialReconnectDelay, - MaxReconnectDelay: constants.DefaultNetworkMaxReconnectDelay, - }, - - MaxClockDifference: constants.DefaultNetworkMaxClockDifference, - CompressionType: constants.DefaultNetworkCompressionType, - PingFrequency: constants.DefaultPingFrequency, - AllowPrivateIPs: !constants.ProductionNetworkIDs.Contains(networkID), - UptimeMetricFreq: constants.DefaultUptimeMetricFreq, - MaximumInboundMessageTimeout: constants.DefaultNetworkMaximumInboundTimeout, - - RequireValidatorToConnect: constants.DefaultNetworkRequireValidatorToConnect, - PeerReadBufferSize: constants.DefaultNetworkPeerReadBufferSize, - PeerWriteBufferSize: constants.DefaultNetworkPeerWriteBufferSize, - } - - networkConfig.NetworkID = networkID - networkConfig.TrackedSubnets = trackedSubnets - tlsCert, err := staking.NewTLSCert() if err != nil { return nil, err } - tlsConfig := peer.TLSConfig(*tlsCert, nil) - networkConfig.TLSConfig = tlsConfig - networkConfig.TLSKey = tlsCert.PrivateKey.(crypto.Signer) - networkConfig.BLSKey, err = bls.NewSecretKey() + + blsKey, err := bls.NewSecretKey() if err != nil { return nil, err } - networkConfig.Validators = currentValidators - networkConfig.Beacons = validators.NewManager() - // This never actually does anything because we never initialize the P-chain - networkConfig.UptimeCalculator = uptime.NoOpCalculator - // TODO actually monitor usage // TestNetwork doesn't use disk so we don't need to track it, but we should // still have guardrails around cpu/memory usage. - networkConfig.ResourceTracker, err = tracker.NewResourceTracker( + resourceTracker, err := tracker.NewResourceTracker( metrics, resource.NoUsage, &meter.ContinuousFactory{}, @@ -205,31 +112,110 @@ func NewTestNetwork( if err != nil { return nil, err } - networkConfig.CPUTargeter = tracker.NewTargeter( - logging.NoLog{}, - &tracker.TargeterConfig{ - VdrAlloc: float64(runtime.NumCPU()), - MaxNonVdrUsage: .8 * float64(runtime.NumCPU()), - MaxNonVdrNodeUsage: float64(runtime.NumCPU()) / 8, - }, - currentValidators, - networkConfig.ResourceTracker.CPUTracker(), - ) - networkConfig.DiskTargeter = tracker.NewTargeter( - logging.NoLog{}, - &tracker.TargeterConfig{ - VdrAlloc: 1000 * units.GiB, - MaxNonVdrUsage: 1000 * units.GiB, - MaxNonVdrNodeUsage: 1000 * units.GiB, - }, - currentValidators, - networkConfig.ResourceTracker.DiskTracker(), - ) - - networkConfig.MyIPPort = ips.NewDynamicIPPort(net.IPv4zero, 1) return NewNetwork( - &networkConfig, + &Config{ + HealthConfig: HealthConfig{ + Enabled: true, + MinConnectedPeers: constants.DefaultNetworkHealthMinPeers, + MaxTimeSinceMsgReceived: constants.DefaultNetworkHealthMaxTimeSinceMsgReceived, + MaxTimeSinceMsgSent: constants.DefaultNetworkHealthMaxTimeSinceMsgSent, + MaxPortionSendQueueBytesFull: constants.DefaultNetworkHealthMaxPortionSendQueueFill, + MaxSendFailRate: constants.DefaultNetworkHealthMaxSendFailRate, + SendFailRateHalflife: constants.DefaultHealthCheckAveragerHalflife, + }, + PeerListGossipConfig: PeerListGossipConfig{ + PeerListNumValidatorIPs: constants.DefaultNetworkPeerListNumValidatorIPs, + PeerListPullGossipFreq: constants.DefaultNetworkPeerListPullGossipFreq, + PeerListBloomResetFreq: constants.DefaultNetworkPeerListBloomResetFreq, + }, + TimeoutConfig: TimeoutConfig{ + PingPongTimeout: constants.DefaultPingPongTimeout, + ReadHandshakeTimeout: constants.DefaultNetworkReadHandshakeTimeout, + }, + DelayConfig: DelayConfig{ + InitialReconnectDelay: constants.DefaultNetworkInitialReconnectDelay, + MaxReconnectDelay: constants.DefaultNetworkMaxReconnectDelay, + }, + ThrottlerConfig: ThrottlerConfig{ + InboundConnUpgradeThrottlerConfig: throttling.InboundConnUpgradeThrottlerConfig{ + UpgradeCooldown: constants.DefaultInboundConnUpgradeThrottlerCooldown, + MaxRecentConnsUpgraded: int(math.Ceil(constants.DefaultInboundThrottlerMaxConnsPerSec * constants.DefaultInboundConnUpgradeThrottlerCooldown.Seconds())), + }, + InboundMsgThrottlerConfig: throttling.InboundMsgThrottlerConfig{ + MsgByteThrottlerConfig: throttling.MsgByteThrottlerConfig{ + VdrAllocSize: constants.DefaultInboundThrottlerVdrAllocSize, + AtLargeAllocSize: constants.DefaultInboundThrottlerAtLargeAllocSize, + NodeMaxAtLargeBytes: constants.DefaultInboundThrottlerNodeMaxAtLargeBytes, + }, + BandwidthThrottlerConfig: throttling.BandwidthThrottlerConfig{ + RefillRate: constants.DefaultInboundThrottlerBandwidthRefillRate, + MaxBurstSize: constants.DefaultInboundThrottlerBandwidthMaxBurstSize, + }, + CPUThrottlerConfig: throttling.SystemThrottlerConfig{ + MaxRecheckDelay: constants.DefaultInboundThrottlerCPUMaxRecheckDelay, + }, + DiskThrottlerConfig: throttling.SystemThrottlerConfig{ + MaxRecheckDelay: constants.DefaultInboundThrottlerDiskMaxRecheckDelay, + }, + MaxProcessingMsgsPerNode: constants.DefaultInboundThrottlerMaxProcessingMsgsPerNode, + }, + OutboundMsgThrottlerConfig: throttling.MsgByteThrottlerConfig{ + VdrAllocSize: constants.DefaultOutboundThrottlerVdrAllocSize, + AtLargeAllocSize: constants.DefaultOutboundThrottlerAtLargeAllocSize, + NodeMaxAtLargeBytes: constants.DefaultOutboundThrottlerNodeMaxAtLargeBytes, + }, + MaxInboundConnsPerSec: constants.DefaultInboundThrottlerMaxConnsPerSec, + }, + ProxyEnabled: constants.DefaultNetworkTCPProxyEnabled, + ProxyReadHeaderTimeout: constants.DefaultNetworkTCPProxyReadTimeout, + DialerConfig: dialer.Config{ + ThrottleRps: constants.DefaultOutboundConnectionThrottlingRps, + ConnectionTimeout: constants.DefaultOutboundConnectionTimeout, + }, + TLSConfig: peer.TLSConfig(*tlsCert, nil), + MyIPPort: utils.NewAtomic(netip.AddrPortFrom( + netip.IPv4Unspecified(), + 1, + )), + NetworkID: networkID, + MaxClockDifference: constants.DefaultNetworkMaxClockDifference, + PingFrequency: constants.DefaultPingFrequency, + AllowPrivateIPs: !constants.ProductionNetworkIDs.Contains(networkID), + CompressionType: constants.DefaultNetworkCompressionType, + TLSKey: tlsCert.PrivateKey.(crypto.Signer), + BLSKey: blsKey, + TrackedSubnets: trackedSubnets, + Beacons: validators.NewManager(), + Validators: currentValidators, + UptimeCalculator: uptime.NoOpCalculator, + UptimeMetricFreq: constants.DefaultUptimeMetricFreq, + RequireValidatorToConnect: constants.DefaultNetworkRequireValidatorToConnect, + MaximumInboundMessageTimeout: constants.DefaultNetworkMaximumInboundTimeout, + PeerReadBufferSize: constants.DefaultNetworkPeerReadBufferSize, + PeerWriteBufferSize: constants.DefaultNetworkPeerWriteBufferSize, + ResourceTracker: resourceTracker, + CPUTargeter: tracker.NewTargeter( + logging.NoLog{}, + &tracker.TargeterConfig{ + VdrAlloc: float64(runtime.NumCPU()), + MaxNonVdrUsage: .8 * float64(runtime.NumCPU()), + MaxNonVdrNodeUsage: float64(runtime.NumCPU()) / 8, + }, + currentValidators, + resourceTracker.CPUTracker(), + ), + DiskTargeter: tracker.NewTargeter( + logging.NoLog{}, + &tracker.TargeterConfig{ + VdrAlloc: 1000 * units.GiB, + MaxNonVdrUsage: 1000 * units.GiB, + MaxNonVdrNodeUsage: 1000 * units.GiB, + }, + currentValidators, + resourceTracker.DiskTracker(), + ), + }, msgCreator, metrics, log, diff --git a/network/throttling/inbound_conn_upgrade_throttler.go b/network/throttling/inbound_conn_upgrade_throttler.go index 4df5ee39b776..4067d80b2b29 100644 --- a/network/throttling/inbound_conn_upgrade_throttler.go +++ b/network/throttling/inbound_conn_upgrade_throttler.go @@ -4,10 +4,10 @@ package throttling import ( + "net/netip" "sync" "time" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -36,7 +36,7 @@ type InboundConnUpgradeThrottler interface { // Must only be called after [Dispatch] has been called. // If [ip] is a local IP, this method always returns true. // Must not be called after [Stop] has been called. - ShouldUpgrade(ip ips.IPPort) bool + ShouldUpgrade(ip netip.AddrPort) bool } type InboundConnUpgradeThrottlerConfig struct { @@ -73,12 +73,12 @@ func (*noInboundConnUpgradeThrottler) Dispatch() {} func (*noInboundConnUpgradeThrottler) Stop() {} -func (*noInboundConnUpgradeThrottler) ShouldUpgrade(ips.IPPort) bool { +func (*noInboundConnUpgradeThrottler) ShouldUpgrade(netip.AddrPort) bool { return true } type ipAndTime struct { - ip string + ip netip.Addr cooldownElapsedAt time.Time } @@ -92,7 +92,7 @@ type inboundConnUpgradeThrottler struct { done chan struct{} // IP --> Present if ShouldUpgrade(ipStr) returned true // within the last [UpgradeCooldown]. - recentIPs set.Set[string] + recentIPs set.Set[netip.Addr] // Sorted in order of increasing time // of last call to ShouldUpgrade that returned true. // For each IP in this channel, ShouldUpgrade(ipStr) @@ -101,28 +101,29 @@ type inboundConnUpgradeThrottler struct { } // Returns whether we should upgrade an inbound connection from [ipStr]. -func (n *inboundConnUpgradeThrottler) ShouldUpgrade(ip ips.IPPort) bool { - if ip.IP.IsLoopback() { +func (n *inboundConnUpgradeThrottler) ShouldUpgrade(addrPort netip.AddrPort) bool { + // Only use addr (not port). This mitigates DoS attacks from many nodes on one + // host. + addr := addrPort.Addr() + if addr.IsLoopback() { // Don't rate-limit loopback IPs return true } - // Only use IP (not port). This mitigates DoS - // attacks from many nodes on one host. - ipStr := ip.IP.String() + n.lock.Lock() defer n.lock.Unlock() - if n.recentIPs.Contains(ipStr) { + if n.recentIPs.Contains(addr) { // We recently upgraded an inbound connection from this IP return false } select { case n.recentIPsAndTimes <- ipAndTime{ - ip: ipStr, + ip: addr, cooldownElapsedAt: n.clock.Time().Add(n.UpgradeCooldown), }: - n.recentIPs.Add(ipStr) + n.recentIPs.Add(addr) return true default: return false diff --git a/network/throttling/inbound_conn_upgrade_throttler_test.go b/network/throttling/inbound_conn_upgrade_throttler_test.go index 2f6cd926451e..802593db8b02 100644 --- a/network/throttling/inbound_conn_upgrade_throttler_test.go +++ b/network/throttling/inbound_conn_upgrade_throttler_test.go @@ -4,22 +4,21 @@ package throttling import ( - "net" + "net/netip" "testing" "time" "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" ) var ( - host1 = ips.IPPort{IP: net.IPv4(1, 2, 3, 4), Port: 9651} - host2 = ips.IPPort{IP: net.IPv4(1, 2, 3, 5), Port: 9653} - host3 = ips.IPPort{IP: net.IPv4(1, 2, 3, 6), Port: 9655} - host4 = ips.IPPort{IP: net.IPv4(1, 2, 3, 7), Port: 9657} - loopbackIP = ips.IPPort{IP: net.IPv4(127, 0, 0, 1), Port: 9657} + host1 = netip.AddrPortFrom(netip.AddrFrom4([4]byte{1, 2, 3, 4}), 9651) + host2 = netip.AddrPortFrom(netip.AddrFrom4([4]byte{1, 2, 3, 5}), 9653) + host3 = netip.AddrPortFrom(netip.AddrFrom4([4]byte{1, 2, 3, 6}), 9655) + host4 = netip.AddrPortFrom(netip.AddrFrom4([4]byte{1, 2, 3, 7}), 9657) + loopbackIP = netip.AddrPortFrom(netip.AddrFrom4([4]byte{127, 0, 0, 1}), 9657) ) func TestNoInboundConnUpgradeThrottler(t *testing.T) { diff --git a/network/tracked_ip.go b/network/tracked_ip.go index 6a95bbee5a47..87377ec669e5 100644 --- a/network/tracked_ip.go +++ b/network/tracked_ip.go @@ -5,10 +5,9 @@ package network import ( "math/rand" + "net/netip" "sync" "time" - - "github.com/ava-labs/avalanchego/utils/ips" ) func init() { @@ -19,20 +18,20 @@ type trackedIP struct { delayLock sync.RWMutex delay time.Duration - ip ips.IPPort + ip netip.AddrPort stopTrackingOnce sync.Once onStopTracking chan struct{} } -func newTrackedIP(ip ips.IPPort) *trackedIP { +func newTrackedIP(ip netip.AddrPort) *trackedIP { return &trackedIP{ ip: ip, onStopTracking: make(chan struct{}), } } -func (ip *trackedIP) trackNewIP(newIP ips.IPPort) *trackedIP { +func (ip *trackedIP) trackNewIP(newIP netip.AddrPort) *trackedIP { ip.stopTracking() return &trackedIP{ delay: ip.getDelay(), diff --git a/network/tracked_ip_test.go b/network/tracked_ip_test.go index 90207e48a6e9..4e735668ecc9 100644 --- a/network/tracked_ip_test.go +++ b/network/tracked_ip_test.go @@ -4,7 +4,7 @@ package network import ( - "net" + "net/netip" "testing" "time" @@ -17,6 +17,11 @@ import ( var ( ip *ips.ClaimedIPPort otherIP *ips.ClaimedIPPort + + defaultLoopbackAddrPort = netip.AddrPortFrom( + netip.AddrFrom4([4]byte{127, 0, 0, 1}), + 9651, + ) ) func init() { @@ -31,10 +36,7 @@ func init() { } ip = ips.NewClaimedIPPort( stakingCert, - ips.IPPort{ - IP: net.IPv4(127, 0, 0, 1), - Port: 9651, - }, + defaultLoopbackAddrPort, 1, // timestamp nil, // signature ) @@ -51,10 +53,7 @@ func init() { } otherIP = ips.NewClaimedIPPort( stakingCert, - ips.IPPort{ - IP: net.IPv4(127, 0, 0, 1), - Port: 9651, - }, + defaultLoopbackAddrPort, 1, // timestamp nil, // signature ) diff --git a/node/config.go b/node/config.go index f5f8c1332530..6f8d3e1ec549 100644 --- a/node/config.go +++ b/node/config.go @@ -5,6 +5,7 @@ package node import ( "crypto/tls" + "net/netip" "time" "github.com/ava-labs/avalanchego/api/server" @@ -18,7 +19,6 @@ import ( "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/set" @@ -85,8 +85,8 @@ type StakingConfig struct { } type StateSyncConfig struct { - StateSyncIDs []ids.NodeID `json:"stateSyncIDs"` - StateSyncIPs []ips.IPPort `json:"stateSyncIPs"` + StateSyncIDs []ids.NodeID `json:"stateSyncIDs"` + StateSyncIPs []netip.AddrPort `json:"stateSyncIPs"` } type BootstrapConfig struct { diff --git a/node/node.go b/node/node.go index 09fb05d06e86..63946140258d 100644 --- a/node/node.go +++ b/node/node.go @@ -13,6 +13,7 @@ import ( "io" "io/fs" "net" + "net/netip" "os" "path/filepath" "strconv" @@ -437,20 +438,26 @@ func (n *Node) initNetworking(reg prometheus.Registerer) error { // Record the bound address to enable inclusion in process context file. n.stakingAddress = listener.Addr().String() - ipPort, err := ips.ToIPPort(n.stakingAddress) + stakingAddrPort, err := ips.ParseAddrPort(n.stakingAddress) if err != nil { return err } - var dynamicIP ips.DynamicIPPort + var ( + publicAddr netip.Addr + atomicIP *utils.Atomic[netip.AddrPort] + ) switch { case n.Config.PublicIP != "": // Use the specified public IP. - ipPort.IP = net.ParseIP(n.Config.PublicIP) - if ipPort.IP == nil { - return fmt.Errorf("invalid IP Address: %s", n.Config.PublicIP) + publicAddr, err = ips.ParseAddr(n.Config.PublicIP) + if err != nil { + return fmt.Errorf("invalid public IP address %q: %w", n.Config.PublicIP, err) } - dynamicIP = ips.NewDynamicIPPort(ipPort.IP, ipPort.Port) + atomicIP = utils.NewAtomic(netip.AddrPortFrom( + publicAddr, + stakingAddrPort.Port(), + )) n.ipUpdater = dynamicip.NewNoUpdater() case n.Config.PublicIPResolutionService != "": // Use dynamic IP resolution. @@ -461,40 +468,46 @@ func (n *Node) initNetworking(reg prometheus.Registerer) error { // Use that to resolve our public IP. ctx, cancel := context.WithTimeout(context.Background(), ipResolutionTimeout) - ipPort.IP, err = resolver.Resolve(ctx) + publicAddr, err = resolver.Resolve(ctx) cancel() if err != nil { return fmt.Errorf("couldn't resolve public IP: %w", err) } - dynamicIP = ips.NewDynamicIPPort(ipPort.IP, ipPort.Port) - n.ipUpdater = dynamicip.NewUpdater(dynamicIP, resolver, n.Config.PublicIPResolutionFreq) + atomicIP = utils.NewAtomic(netip.AddrPortFrom( + publicAddr, + stakingAddrPort.Port(), + )) + n.ipUpdater = dynamicip.NewUpdater(atomicIP, resolver, n.Config.PublicIPResolutionFreq) default: - ipPort.IP, err = n.router.ExternalIP() + publicAddr, err = n.router.ExternalIP() if err != nil { return fmt.Errorf("public IP / IP resolution service not given and failed to resolve IP with NAT: %w", err) } - dynamicIP = ips.NewDynamicIPPort(ipPort.IP, ipPort.Port) + atomicIP = utils.NewAtomic(netip.AddrPortFrom( + publicAddr, + stakingAddrPort.Port(), + )) n.ipUpdater = dynamicip.NewNoUpdater() } - if ipPort.IP.IsLoopback() || ipPort.IP.IsPrivate() { + if !ips.IsPublic(publicAddr) { n.Log.Warn("P2P IP is private, you will not be publicly discoverable", - zap.Stringer("ip", ipPort), + zap.Stringer("ip", publicAddr), ) } // Regularly update our public IP and port mappings. n.portMapper.Map( - ipPort.Port, - ipPort.Port, + stakingAddrPort.Port(), + stakingAddrPort.Port(), stakingPortName, - dynamicIP, + atomicIP, n.Config.PublicIPResolutionFreq, ) go n.ipUpdater.Dispatch(n.Log) n.Log.Info("initializing networking", - zap.Stringer("ip", ipPort), + zap.Stringer("ip", atomicIP.Get()), ) tlsKey, ok := n.Config.StakingTLSCert.PrivateKey.(crypto.Signer) @@ -617,7 +630,7 @@ func (n *Node) initNetworking(reg prometheus.Registerer) error { // add node configs to network config n.Config.NetworkConfig.MyNodeID = n.ID - n.Config.NetworkConfig.MyIPPort = dynamicIP + n.Config.NetworkConfig.MyIPPort = atomicIP n.Config.NetworkConfig.NetworkID = n.Config.NetworkID n.Config.NetworkConfig.Validators = n.vdrs n.Config.NetworkConfig.Beacons = n.bootstrappers @@ -709,7 +722,7 @@ func (n *Node) Dispatch() error { // Add bootstrap nodes to the peer network for _, bootstrapper := range n.Config.Bootstrappers { - n.Net.ManuallyTrack(bootstrapper.ID, ips.IPPort(bootstrapper.IP)) + n.Net.ManuallyTrack(bootstrapper.ID, bootstrapper.IP) } // Start P2P connections @@ -962,7 +975,7 @@ func (n *Node) initAPIServer() error { ) return err } - hostIsPublic = !ip.IsLoopback() && !ip.IsPrivate() + hostIsPublic = ips.IsPublic(ip) n.Log.Debug("finished HTTP host lookup", zap.String("host", n.Config.HTTPHost), @@ -977,8 +990,8 @@ func (n *Node) initAPIServer() error { return err } - addr := listener.Addr().String() - ipPort, err := ips.ToIPPort(addr) + addrStr := listener.Addr().String() + addrPort, err := ips.ParseAddrPort(addrStr) if err != nil { return err } @@ -991,8 +1004,8 @@ func (n *Node) initAPIServer() error { ) n.portMapper.Map( - ipPort.Port, - ipPort.Port, + addrPort.Port(), + addrPort.Port(), httpPortName, nil, n.Config.PublicIPResolutionFreq, diff --git a/utils/atomic.go b/utils/atomic.go index 3bb125ee8af6..7236d9a50de8 100644 --- a/utils/atomic.go +++ b/utils/atomic.go @@ -3,13 +3,27 @@ package utils -import "sync" +import ( + "encoding/json" + "sync" +) + +var ( + _ json.Marshaler = (*Atomic[struct{}])(nil) + _ json.Unmarshaler = (*Atomic[struct{}])(nil) +) type Atomic[T any] struct { lock sync.RWMutex value T } +func NewAtomic[T any](value T) *Atomic[T] { + return &Atomic[T]{ + value: value, + } +} + func (a *Atomic[T]) Get() T { a.lock.RLock() defer a.lock.RUnlock() @@ -23,3 +37,17 @@ func (a *Atomic[T]) Set(value T) { a.value = value } + +func (a *Atomic[T]) MarshalJSON() ([]byte, error) { + a.lock.RLock() + defer a.lock.RUnlock() + + return json.Marshal(a.value) +} + +func (a *Atomic[T]) UnmarshalJSON(b []byte) error { + a.lock.Lock() + defer a.lock.Unlock() + + return json.Unmarshal(b, &a.value) +} diff --git a/utils/atomic_test.go b/utils/atomic_test.go index 3fa74063c18a..eee159d783f9 100644 --- a/utils/atomic_test.go +++ b/utils/atomic_test.go @@ -4,6 +4,8 @@ package utils import ( + "encoding/json" + "net/netip" "testing" "github.com/stretchr/testify/require" @@ -24,3 +26,46 @@ func TestAtomic(t *testing.T) { a.Set(false) require.False(a.Get()) } + +func TestAtomicJSON(t *testing.T) { + tests := []struct { + name string + value *Atomic[netip.AddrPort] + expected string + }{ + { + name: "zero value", + value: new(Atomic[netip.AddrPort]), + expected: `""`, + }, + { + name: "ipv4 value", + value: NewAtomic(netip.AddrPortFrom( + netip.AddrFrom4([4]byte{1, 2, 3, 4}), + 12345, + )), + expected: `"1.2.3.4:12345"`, + }, + { + name: "ipv6 loopback", + value: NewAtomic(netip.AddrPortFrom( + netip.IPv6Loopback(), + 12345, + )), + expected: `"[::1]:12345"`, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + b, err := json.Marshal(test.value) + require.NoError(err) + require.Equal(test.expected, string(b)) + + var parsed Atomic[netip.AddrPort] + require.NoError(json.Unmarshal([]byte(test.expected), &parsed)) + require.Equal(test.value.Get(), parsed.Get()) + }) + } +} diff --git a/utils/beacon/beacon.go b/utils/beacon/beacon.go index 38ac6df5b0f5..112c50f6db22 100644 --- a/utils/beacon/beacon.go +++ b/utils/beacon/beacon.go @@ -4,23 +4,24 @@ package beacon import ( + "net/netip" + "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/ips" ) var _ Beacon = (*beacon)(nil) type Beacon interface { ID() ids.NodeID - IP() ips.IPPort + IP() netip.AddrPort } type beacon struct { id ids.NodeID - ip ips.IPPort + ip netip.AddrPort } -func New(id ids.NodeID, ip ips.IPPort) Beacon { +func New(id ids.NodeID, ip netip.AddrPort) Beacon { return &beacon{ id: id, ip: ip, @@ -31,6 +32,6 @@ func (b *beacon) ID() ids.NodeID { return b.id } -func (b *beacon) IP() ips.IPPort { +func (b *beacon) IP() netip.AddrPort { return b.ip } diff --git a/utils/beacon/set.go b/utils/beacon/set.go index 8b6970b55421..56a292203ed5 100644 --- a/utils/beacon/set.go +++ b/utils/beacon/set.go @@ -5,10 +5,10 @@ package beacon import ( "errors" + "net/netip" "strings" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/ips" ) var ( @@ -25,7 +25,7 @@ type Set interface { Add(Beacon) error RemoveByID(ids.NodeID) error - RemoveByIP(ips.IPPort) error + RemoveByIP(netip.AddrPort) error Len() int @@ -35,14 +35,14 @@ type Set interface { type set struct { ids map[ids.NodeID]int - ips map[string]int + ips map[netip.AddrPort]int beacons []Beacon } func NewSet() Set { return &set{ ids: make(map[ids.NodeID]int), - ips: make(map[string]int), + ips: make(map[netip.AddrPort]int), } } @@ -53,14 +53,14 @@ func (s *set) Add(b Beacon) error { return errDuplicateID } - ipStr := b.IP().String() - _, duplicateIP := s.ips[ipStr] + ip := b.IP() + _, duplicateIP := s.ips[ip] if duplicateIP { return errDuplicateIP } s.ids[id] = len(s.beacons) - s.ips[ipStr] = len(s.beacons) + s.ips[ip] = len(s.beacons) s.beacons = append(s.beacons, b) return nil } @@ -71,12 +71,12 @@ func (s *set) RemoveByID(idToRemove ids.NodeID) error { return errUnknownID } toRemove := s.beacons[indexToRemove] - ipToRemove := toRemove.IP().String() + ipToRemove := toRemove.IP() indexToMove := len(s.beacons) - 1 toMove := s.beacons[indexToMove] idToMove := toMove.ID() - ipToMove := toMove.IP().String() + ipToMove := toMove.IP() s.ids[idToMove] = indexToRemove s.ips[ipToMove] = indexToRemove @@ -89,8 +89,8 @@ func (s *set) RemoveByID(idToRemove ids.NodeID) error { return nil } -func (s *set) RemoveByIP(ip ips.IPPort) error { - indexToRemove, exists := s.ips[ip.String()] +func (s *set) RemoveByIP(ip netip.AddrPort) error { + indexToRemove, exists := s.ips[ip] if !exists { return errUnknownIP } diff --git a/utils/beacon/set_test.go b/utils/beacon/set_test.go index 976d0582e3ff..04e250909fb5 100644 --- a/utils/beacon/set_test.go +++ b/utils/beacon/set_test.go @@ -4,13 +4,12 @@ package beacon import ( - "net" + "net/netip" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/ips" ) func TestSet(t *testing.T) { @@ -20,18 +19,18 @@ func TestSet(t *testing.T) { id1 := ids.BuildTestNodeID([]byte{1}) id2 := ids.BuildTestNodeID([]byte{2}) - ip0 := ips.IPPort{ - IP: net.IPv4zero, - Port: 0, - } - ip1 := ips.IPPort{ - IP: net.IPv4zero, - Port: 1, - } - ip2 := ips.IPPort{ - IP: net.IPv4zero, - Port: 2, - } + ip0 := netip.AddrPortFrom( + netip.IPv4Unspecified(), + 0, + ) + ip1 := netip.AddrPortFrom( + netip.IPv4Unspecified(), + 1, + ) + ip2 := netip.AddrPortFrom( + netip.IPv4Unspecified(), + 2, + ) b0 := New(id0, ip0) b1 := New(id1, ip1) diff --git a/utils/dynamicip/ifconfig_resolver.go b/utils/dynamicip/ifconfig_resolver.go index 36c8d5adf04c..754a17f86a31 100644 --- a/utils/dynamicip/ifconfig_resolver.go +++ b/utils/dynamicip/ifconfig_resolver.go @@ -7,9 +7,11 @@ import ( "context" "fmt" "io" - "net" "net/http" + "net/netip" "strings" + + "github.com/ava-labs/avalanchego/utils/ips" ) var _ Resolver = (*ifConfigResolver)(nil) @@ -19,29 +21,24 @@ type ifConfigResolver struct { url string } -func (r *ifConfigResolver) Resolve(ctx context.Context) (net.IP, error) { +func (r *ifConfigResolver) Resolve(ctx context.Context) (netip.Addr, error) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, r.url, nil) if err != nil { - return nil, err + return netip.Addr{}, err } resp, err := http.DefaultClient.Do(req) if err != nil { - return nil, err + return netip.Addr{}, err } defer resp.Body.Close() ipBytes, err := io.ReadAll(resp.Body) if err != nil { // Drop any error to report the original error - return nil, fmt.Errorf("failed to read response from %q: %w", r.url, err) + return netip.Addr{}, fmt.Errorf("failed to read response from %q: %w", r.url, err) } - ipStr := string(ipBytes) - ipStr = strings.TrimSpace(ipStr) - ipResolved := net.ParseIP(ipStr) - if ipResolved == nil { - return nil, fmt.Errorf("couldn't parse IP from %q", ipStr) - } - return ipResolved, nil + ipStr := strings.TrimSpace(string(ipBytes)) + return ips.ParseAddr(ipStr) } diff --git a/utils/dynamicip/opendns_resolver.go b/utils/dynamicip/opendns_resolver.go index 5c39c95535fc..ccf75653c81e 100644 --- a/utils/dynamicip/opendns_resolver.go +++ b/utils/dynamicip/opendns_resolver.go @@ -7,6 +7,9 @@ import ( "context" "errors" "net" + "net/netip" + + "github.com/ava-labs/avalanchego/utils/ips" ) const openDNSUrl = "resolver1.opendns.com:53" @@ -34,13 +37,15 @@ func newOpenDNSResolver() Resolver { } } -func (r *openDNSResolver) Resolve(ctx context.Context) (net.IP, error) { - ips, err := r.resolver.LookupIP(ctx, "ip", "myip.opendns.com") +func (r *openDNSResolver) Resolve(ctx context.Context) (netip.Addr, error) { + resolvedIPs, err := r.resolver.LookupIP(ctx, "ip", "myip.opendns.com") if err != nil { - return nil, err + return netip.Addr{}, err } - if len(ips) == 0 { - return nil, errOpenDNSNoIP + for _, ip := range resolvedIPs { + if addr, ok := ips.AddrFromSlice(ip); ok { + return addr, nil + } } - return ips[0], nil + return netip.Addr{}, errOpenDNSNoIP } diff --git a/utils/dynamicip/resolver.go b/utils/dynamicip/resolver.go index 45ad3778bc01..05f8896c7b36 100644 --- a/utils/dynamicip/resolver.go +++ b/utils/dynamicip/resolver.go @@ -7,7 +7,7 @@ import ( "context" "errors" "fmt" - "net" + "net/netip" "strings" ) @@ -29,7 +29,7 @@ var errUnknownResolver = errors.New("unknown resolver") // Resolver resolves our public IP type Resolver interface { // Resolve and return our public IP. - Resolve(context.Context) (net.IP, error) + Resolve(context.Context) (netip.Addr, error) } // Returns a new Resolver that uses the given service diff --git a/utils/dynamicip/updater.go b/utils/dynamicip/updater.go index 9a59c9fd25e0..18c41e2fd742 100644 --- a/utils/dynamicip/updater.go +++ b/utils/dynamicip/updater.go @@ -5,11 +5,12 @@ package dynamicip import ( "context" + "net/netip" "time" "go.uber.org/zap" - "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -30,7 +31,7 @@ type Updater interface { type updater struct { // The IP we periodically modify. - dynamicIP ips.DynamicIPPort + dynamicIP *utils.Atomic[netip.AddrPort] // Used to find out what our public IP is. resolver Resolver // The parent of all contexts passed into resolver.Resolve(). @@ -49,7 +50,7 @@ type updater struct { // every [updateFreq]. Uses [resolver] to find // out what our public IP is. func NewUpdater( - dynamicIP ips.DynamicIPPort, + dynamicIP *utils.Atomic[netip.AddrPort], resolver Resolver, updateFreq time.Duration, ) Updater { @@ -73,13 +74,16 @@ func (u *updater) Dispatch(log logging.Logger) { close(u.doneChan) }() + var ( + initialAddrPort = u.dynamicIP.Get() + oldAddr = initialAddrPort.Addr() + port = initialAddrPort.Port() + ) for { select { case <-ticker.C: - oldIP := u.dynamicIP.IPPort().IP - ctx, cancel := context.WithTimeout(u.rootCtx, ipResolutionTimeout) - newIP, err := u.resolver.Resolve(ctx) + newAddr, err := u.resolver.Resolve(ctx) cancel() if err != nil { log.Warn("couldn't resolve public IP. If this machine's IP recently changed, it may be sharing the wrong public IP with peers", @@ -88,11 +92,13 @@ func (u *updater) Dispatch(log logging.Logger) { continue } - if !newIP.Equal(oldIP) { - u.dynamicIP.SetIP(newIP) + if newAddr != oldAddr { + u.dynamicIP.Set(netip.AddrPortFrom(newAddr, port)) log.Info("updated public IP", - zap.Stringer("newIP", newIP), + zap.Stringer("oldIP", oldAddr), + zap.Stringer("newIP", newAddr), ) + oldAddr = newAddr } case <-u.rootCtx.Done(): return diff --git a/utils/dynamicip/updater_test.go b/utils/dynamicip/updater_test.go index 66c9a21c4c6a..3d0cbcaceca7 100644 --- a/utils/dynamicip/updater_test.go +++ b/utils/dynamicip/updater_test.go @@ -5,42 +5,51 @@ package dynamicip import ( "context" - "net" + "net/netip" "testing" "time" "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" ) var _ Resolver = (*mockResolver)(nil) type mockResolver struct { - onResolve func(context.Context) (net.IP, error) + onResolve func(context.Context) (netip.Addr, error) } -func (r *mockResolver) Resolve(ctx context.Context) (net.IP, error) { +func (r *mockResolver) Resolve(ctx context.Context) (netip.Addr, error) { return r.onResolve(ctx) } func TestNewUpdater(t *testing.T) { require := require.New(t) - originalIP := net.IPv4zero - originalPort := 9651 - dynamicIP := ips.NewDynamicIPPort(originalIP, uint16(originalPort)) - newIP := net.IPv4(1, 2, 3, 4) + + const ( + port = 9651 + updateFrequency = time.Millisecond + stopTimeout = 5 * time.Second + ) + + var ( + originalAddr = netip.IPv4Unspecified() + originalAddrPort = netip.AddrPortFrom(originalAddr, port) + newAddr = netip.AddrFrom4([4]byte{1, 2, 3, 4}) + expectedNewAddrPort = netip.AddrPortFrom(newAddr, port) + dynamicIP = utils.NewAtomic(originalAddrPort) + ) resolver := &mockResolver{ - onResolve: func(context.Context) (net.IP, error) { - return newIP, nil + onResolve: func(context.Context) (netip.Addr, error) { + return newAddr, nil }, } - updateFreq := time.Millisecond updaterIntf := NewUpdater( dynamicIP, resolver, - updateFreq, + updateFrequency, ) // Assert NewUpdater returns expected type @@ -53,28 +62,23 @@ func TestNewUpdater(t *testing.T) { require.NotNil(updater.rootCtx) require.NotNil(updater.rootCtxCancel) require.NotNil(updater.doneChan) - require.Equal(updateFreq, updater.updateFreq) + require.Equal(updateFrequency, updater.updateFreq) // Start updating the IP address - go updaterIntf.Dispatch(logging.NoLog{}) + go updater.Dispatch(logging.NoLog{}) // Assert that the IP is updated within 5s. - expectedIP := ips.IPPort{ - IP: newIP, - Port: uint16(originalPort), - } require.Eventually( func() bool { - return expectedIP.Equal(dynamicIP.IPPort()) + return dynamicIP.Get() == expectedNewAddrPort }, 5*time.Second, - updateFreq, + updateFrequency, ) // Make sure stopChan and doneChan are closed when stop is called - updaterIntf.Stop() + updater.Stop() - stopTimeout := 5 * time.Second ctx, cancel := context.WithTimeout(context.Background(), stopTimeout) defer cancel() select { diff --git a/utils/ips/claimed_ip_port.go b/utils/ips/claimed_ip_port.go index 2ef6c0a71087..3721de9d624c 100644 --- a/utils/ips/claimed_ip_port.go +++ b/utils/ips/claimed_ip_port.go @@ -4,6 +4,9 @@ package ips import ( + "net" + "net/netip" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/hashing" @@ -12,7 +15,7 @@ import ( const ( // Certificate length, signature length, IP, timestamp, tx ID - baseIPCertDescLen = 2*wrappers.IntLen + IPPortLen + wrappers.LongLen + ids.IDLen + baseIPCertDescLen = 2*wrappers.IntLen + net.IPv6len + wrappers.ShortLen + wrappers.LongLen + ids.IDLen preimageLen = ids.IDLen + wrappers.LongLen ) @@ -22,7 +25,7 @@ type ClaimedIPPort struct { // The peer's certificate. Cert *staking.Certificate // The peer's claimed IP and port. - IPPort IPPort + AddrPort netip.AddrPort // The time the peer claimed to own this IP and port. Timestamp uint64 // [Cert]'s signature over the IPPort and timestamp. @@ -38,13 +41,13 @@ type ClaimedIPPort struct { func NewClaimedIPPort( cert *staking.Certificate, - ipPort IPPort, + ipPort netip.AddrPort, timestamp uint64, signature []byte, ) *ClaimedIPPort { ip := &ClaimedIPPort{ Cert: cert, - IPPort: ipPort, + AddrPort: ipPort, Timestamp: timestamp, Signature: signature, NodeID: ids.NodeIDFromCert(cert), diff --git a/utils/ips/dynamic_ip_port.go b/utils/ips/dynamic_ip_port.go deleted file mode 100644 index 0b83ab5924f1..000000000000 --- a/utils/ips/dynamic_ip_port.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ips - -import ( - "encoding/json" - "net" - "sync" -) - -var _ DynamicIPPort = (*dynamicIPPort)(nil) - -// An IPPort that can change. -// Safe for use by multiple goroutines. -type DynamicIPPort interface { - // Returns the IP + port pair. - IPPort() IPPort - // Changes the IP. - SetIP(ip net.IP) -} - -type dynamicIPPort struct { - lock sync.RWMutex - ipPort IPPort -} - -func NewDynamicIPPort(ip net.IP, port uint16) DynamicIPPort { - return &dynamicIPPort{ - ipPort: IPPort{ - IP: ip, - Port: port, - }, - } -} - -func (i *dynamicIPPort) IPPort() IPPort { - i.lock.RLock() - defer i.lock.RUnlock() - - return i.ipPort -} - -func (i *dynamicIPPort) SetIP(ip net.IP) { - i.lock.Lock() - defer i.lock.Unlock() - - i.ipPort.IP = ip -} - -func (i *dynamicIPPort) MarshalJSON() ([]byte, error) { - i.lock.RLock() - defer i.lock.RUnlock() - - return json.Marshal(i.ipPort) -} diff --git a/utils/ips/ip.go b/utils/ips/ip.go new file mode 100644 index 000000000000..2194a0941426 --- /dev/null +++ b/utils/ips/ip.go @@ -0,0 +1,57 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ips + +import "net/netip" + +// IsPublic returns true if the provided address is considered to be a public +// IP. +func IsPublic(addr netip.Addr) bool { + return addr.IsGlobalUnicast() && !addr.IsPrivate() +} + +// ParseAddr returns the IP address from the provided string. If the string +// represents an IPv4 address in an IPv6 address, the IPv4 address is returned. +func ParseAddr(s string) (netip.Addr, error) { + addr, err := netip.ParseAddr(s) + if err != nil { + return netip.Addr{}, err + } + if addr.Is4In6() { + addr = addr.Unmap() + } + return addr, nil +} + +// ParseAddrPort returns the IP:port address from the provided string. If the +// string represents an IPv4 address in an IPv6 address, the IPv4 address is +// returned. +func ParseAddrPort(s string) (netip.AddrPort, error) { + addrPort, err := netip.ParseAddrPort(s) + if err != nil { + return netip.AddrPort{}, err + } + addr := addrPort.Addr() + if addr.Is4In6() { + addrPort = netip.AddrPortFrom( + addr.Unmap(), + addrPort.Port(), + ) + } + return addrPort, nil +} + +// AddrFromSlice returns the IP address from the provided byte slice. If the +// byte slice represents an IPv4 address in an IPv6 address, the IPv4 address is +// returned. +func AddrFromSlice(b []byte) (netip.Addr, bool) { + addr, ok := netip.AddrFromSlice(b) + if !ok { + return netip.Addr{}, false + } + if addr.Is4In6() { + addr = addr.Unmap() + } + return addr, true +} diff --git a/utils/ips/ip_port.go b/utils/ips/ip_port.go deleted file mode 100644 index eea203525a87..000000000000 --- a/utils/ips/ip_port.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ips - -import ( - "errors" - "fmt" - "net" - "strconv" - - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -const ( - IPPortLen = net.IPv6len + wrappers.ShortLen - nullStr = "null" -) - -var ( - errMissingQuotes = errors.New("first and last characters should be quotes") - errBadIP = errors.New("bad ip format") -) - -type IPDesc IPPort - -func (ipDesc IPDesc) String() string { - return IPPort(ipDesc).String() -} - -func (ipDesc IPDesc) MarshalJSON() ([]byte, error) { - return []byte(`"` + ipDesc.String() + `"`), nil -} - -func (ipDesc *IPDesc) UnmarshalJSON(b []byte) error { - str := string(b) - if str == nullStr { // If "null", do nothing - return nil - } else if len(str) < 2 { - return errMissingQuotes - } - - lastIndex := len(str) - 1 - if str[0] != '"' || str[lastIndex] != '"' { - return errMissingQuotes - } - - ipPort, err := ToIPPort(str[1:lastIndex]) - if err != nil { - return fmt.Errorf("couldn't decode to IPPort: %w", err) - } - *ipDesc = IPDesc(ipPort) - - return nil -} - -// An IP and a port. -type IPPort struct { - IP net.IP `json:"ip"` - Port uint16 `json:"port"` -} - -func (ipPort IPPort) Equal(other IPPort) bool { - return ipPort.Port == other.Port && ipPort.IP.Equal(other.IP) -} - -func (ipPort IPPort) String() string { - return net.JoinHostPort(ipPort.IP.String(), strconv.FormatUint(uint64(ipPort.Port), 10)) -} - -// IsZero returns if the IP or port is zeroed out -func (ipPort IPPort) IsZero() bool { - ip := ipPort.IP - return ipPort.Port == 0 || - len(ip) == 0 || - ip.Equal(net.IPv4zero) || - ip.Equal(net.IPv6zero) -} - -func ToIPPort(str string) (IPPort, error) { - host, portStr, err := net.SplitHostPort(str) - if err != nil { - return IPPort{}, errBadIP - } - port, err := strconv.ParseUint(portStr, 10 /*=base*/, 16 /*=size*/) - if err != nil { - // TODO: Should this return a locally defined error? (e.g. errBadPort) - return IPPort{}, err - } - ip := net.ParseIP(host) - if ip == nil { - return IPPort{}, errBadIP - } - return IPPort{ - IP: ip, - Port: uint16(port), - }, nil -} - -// PackIP packs an ip port pair to the byte array -func PackIP(p *wrappers.Packer, ip IPPort) { - p.PackFixedBytes(ip.IP.To16()) - p.PackShort(ip.Port) -} diff --git a/utils/ips/ip_test.go b/utils/ips/ip_test.go deleted file mode 100644 index 903f26a2d070..000000000000 --- a/utils/ips/ip_test.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ips - -import ( - "encoding/json" - "net" - "strconv" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestIPPortEqual(t *testing.T) { - tests := []struct { - ipPort string - ipPort1 IPPort - ipPort2 IPPort - result bool - }{ - // Expected equal - { - `"127.0.0.1:0"`, - IPPort{net.ParseIP("127.0.0.1"), 0}, - IPPort{net.ParseIP("127.0.0.1"), 0}, - true, - }, - { - `"[::1]:0"`, - IPPort{net.ParseIP("::1"), 0}, - IPPort{net.ParseIP("::1"), 0}, - true, - }, - { - `"127.0.0.1:0"`, - IPPort{net.ParseIP("127.0.0.1"), 0}, - IPPort{net.ParseIP("::ffff:127.0.0.1"), 0}, - true, - }, - - // Expected unequal - { - `"127.0.0.1:0"`, - IPPort{net.ParseIP("127.0.0.1"), 0}, - IPPort{net.ParseIP("1.2.3.4"), 0}, - false, - }, - { - `"[::1]:0"`, - IPPort{net.ParseIP("::1"), 0}, - IPPort{net.ParseIP("2001::1"), 0}, - false, - }, - { - `"127.0.0.1:0"`, - IPPort{net.ParseIP("127.0.0.1"), 0}, - IPPort{net.ParseIP("127.0.0.1"), 1}, - false, - }, - } - for i, tt := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - require := require.New(t) - - ipPort := IPDesc{} - require.NoError(ipPort.UnmarshalJSON([]byte(tt.ipPort))) - require.Equal(tt.ipPort1, IPPort(ipPort)) - - ipPortJSON, err := json.Marshal(ipPort) - require.NoError(err) - require.Equal(tt.ipPort, string(ipPortJSON)) - - require.Equal(tt.result, tt.ipPort1.Equal(tt.ipPort2)) - }) - } -} - -func TestIPPortString(t *testing.T) { - tests := []struct { - ipPort IPPort - result string - }{ - {IPPort{net.ParseIP("127.0.0.1"), 0}, "127.0.0.1:0"}, - {IPPort{net.ParseIP("::1"), 42}, "[::1]:42"}, - {IPPort{net.ParseIP("::ffff:127.0.0.1"), 65535}, "127.0.0.1:65535"}, - {IPPort{net.IP{}, 1234}, ":1234"}, - } - for _, tt := range tests { - t.Run(tt.result, func(t *testing.T) { - require.Equal(t, tt.result, tt.ipPort.String()) - }) - } -} - -func TestToIPPortError(t *testing.T) { - tests := []struct { - in string - out IPPort - expectedErr error - }{ - { - in: "", - out: IPPort{}, - expectedErr: errBadIP, - }, - { - in: ":", - out: IPPort{}, - expectedErr: strconv.ErrSyntax, - }, - { - in: "abc:", - out: IPPort{}, - expectedErr: strconv.ErrSyntax, - }, - { - in: ":abc", - out: IPPort{}, - expectedErr: strconv.ErrSyntax, - }, - { - in: "abc:abc", - out: IPPort{}, - expectedErr: strconv.ErrSyntax, - }, - { - in: "127.0.0.1:", - out: IPPort{}, - expectedErr: strconv.ErrSyntax, - }, - { - in: ":1", - out: IPPort{}, - expectedErr: errBadIP, - }, - { - in: "::1", - out: IPPort{}, - expectedErr: errBadIP, - }, - { - in: "::1:42", - out: IPPort{}, - expectedErr: errBadIP, - }, - } - for _, tt := range tests { - t.Run(tt.in, func(t *testing.T) { - require := require.New(t) - - result, err := ToIPPort(tt.in) - require.ErrorIs(err, tt.expectedErr) - require.Equal(tt.out, result) - }) - } -} - -func TestToIPPort(t *testing.T) { - tests := []struct { - in string - out IPPort - }{ - {"127.0.0.1:42", IPPort{net.ParseIP("127.0.0.1"), 42}}, - {"[::1]:42", IPPort{net.ParseIP("::1"), 42}}, - } - for _, tt := range tests { - t.Run(tt.in, func(t *testing.T) { - require := require.New(t) - - result, err := ToIPPort(tt.in) - require.NoError(err) - require.Equal(tt.out, result) - }) - } -} diff --git a/utils/ips/lookup.go b/utils/ips/lookup.go index cdf9176f9568..cf4158d233b3 100644 --- a/utils/ips/lookup.go +++ b/utils/ips/lookup.go @@ -6,6 +6,7 @@ package ips import ( "errors" "net" + "net/netip" ) var errNoIPsFound = errors.New("no IPs found") @@ -15,20 +16,22 @@ var errNoIPsFound = errors.New("no IPs found") // pick any of the IPs. // // Note: IPv4 is preferred because `net.Listen` prefers IPv4. -func Lookup(hostname string) (net.IP, error) { +func Lookup(hostname string) (netip.Addr, error) { ips, err := net.LookupIP(hostname) if err != nil { - return nil, err + return netip.Addr{}, err } if len(ips) == 0 { - return nil, errNoIPsFound + return netip.Addr{}, errNoIPsFound } for _, ip := range ips { ipv4 := ip.To4() if ipv4 != nil { - return ipv4, nil + addr, _ := AddrFromSlice(ipv4) + return addr, nil } } - return ips[0], nil + addr, _ := AddrFromSlice(ips[0]) + return addr, nil } diff --git a/utils/ips/lookup_test.go b/utils/ips/lookup_test.go index 9fecccc54593..4f5621dfce7b 100644 --- a/utils/ips/lookup_test.go +++ b/utils/ips/lookup_test.go @@ -4,7 +4,7 @@ package ips import ( - "net" + "net/netip" "testing" "github.com/stretchr/testify/require" @@ -13,23 +13,23 @@ import ( func TestLookup(t *testing.T) { tests := []struct { host string - ip net.IP + ip netip.Addr }{ { host: "127.0.0.1", - ip: net.ParseIP("127.0.0.1").To4(), + ip: netip.AddrFrom4([4]byte{127, 0, 0, 1}), }, { host: "localhost", - ip: net.ParseIP("127.0.0.1").To4(), + ip: netip.AddrFrom4([4]byte{127, 0, 0, 1}), }, { host: "::", - ip: net.IPv6zero, + ip: netip.IPv6Unspecified(), }, { host: "0.0.0.0", - ip: net.ParseIP("0.0.0.0").To4(), + ip: netip.IPv4Unspecified(), }, } for _, tt := range tests { From 41e46d1178116eb7421fe2adbf2eaa7ef97b0f8b Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 12 Jun 2024 13:05:20 -0400 Subject: [PATCH 056/102] Update versions for v1.11.8 (#3103) --- RELEASES.md | 27 +++++++++++++++++++++++++++ version/compatibility.json | 3 ++- version/constants.go | 2 +- 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 01521a2b4498..e616850e816c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,32 @@ # Release Notes +## [v1.11.8](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.8) + +This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. + +The plugin version is unchanged at `35` and is compatible with versions `v1.11.3-v1.11.7`. + +### APIs + +- Redesigned metrics to use labels rather than custom namespaces. + +### What's Changed + +- Remove avalanche metrics registerer from consensus context by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3087 +- Remove rejection from `consensus.Add` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3084 +- [vms/platformvm] Rename `txstest.Builder` to `txstest.WalletFactory` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2890 +- Small metrics cleanup by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3088 +- Fix race in test by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3089 +- Implement error driven snowflake hardcoded to support a single beta by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2978 +- Replace all chain namespaces with labels by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3053 +- add a metrics gauge for built block slot by @tsachiherman in https://github.com/ava-labs/avalanchego/pull/3048 +- [ci] Switch to gh workers for arm64 by @marun in https://github.com/ava-labs/avalanchego/pull/3090 +- [ci] Ensure focal arm64 builds all have their required dependencies by @marun in https://github.com/ava-labs/avalanchego/pull/3091 +- X-chain - consolidate tx creation in unit tests by @abi87 in https://github.com/ava-labs/avalanchego/pull/2736 +- Use netip.AddrPort rather than ips.IPPort by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3094 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.11.7...v1.11.8 + ## [v1.11.7](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.7) This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. diff --git a/version/compatibility.json b/version/compatibility.json index c2d3525a0393..a1596351f373 100644 --- a/version/compatibility.json +++ b/version/compatibility.json @@ -4,7 +4,8 @@ "v1.11.4", "v1.11.5", "v1.11.6", - "v1.11.7" + "v1.11.7", + "v1.11.8" ], "34": [ "v1.11.2" diff --git a/version/constants.go b/version/constants.go index 1e2b809d5a7a..2899b37fac6b 100644 --- a/version/constants.go +++ b/version/constants.go @@ -26,7 +26,7 @@ var ( Current = &Semantic{ Major: 1, Minor: 11, - Patch: 7, + Patch: 8, } CurrentApp = &Application{ Name: Client, From 5002b8244de7fb6e325a6e322ece5c6d2a1f8149 Mon Sep 17 00:00:00 2001 From: aaronbuchwald Date: Wed, 12 Jun 2024 15:27:13 -0400 Subject: [PATCH 057/102] Error driven snowflake multi counter (#3092) --- snow/consensus/snowball/binary_snowball.go | 4 +- .../snowball/binary_snowball_test.go | 23 +- snow/consensus/snowball/binary_snowflake.go | 19 +- .../snowball/binary_snowflake_test.go | 44 ++- snow/consensus/snowball/factory.go | 8 +- snow/consensus/snowball/flat_test.go | 2 +- snow/consensus/snowball/nnary_snowball.go | 4 +- .../consensus/snowball/nnary_snowball_test.go | 14 +- snow/consensus/snowball/nnary_snowflake.go | 19 +- .../snowball/nnary_snowflake_test.go | 52 ++- snow/consensus/snowball/parameters.go | 9 + snow/consensus/snowball/test_snowflake.go | 145 ++++++++ snow/consensus/snowball/tree_test.go | 326 +++++++++--------- snow/consensus/snowball/unary_snowball.go | 4 +- .../consensus/snowball/unary_snowball_test.go | 7 +- snow/consensus/snowball/unary_snowflake.go | 17 +- .../snowball/unary_snowflake_test.go | 39 ++- 17 files changed, 503 insertions(+), 233 deletions(-) create mode 100644 snow/consensus/snowball/test_snowflake.go diff --git a/snow/consensus/snowball/binary_snowball.go b/snow/consensus/snowball/binary_snowball.go index e8a424378a89..16649c3252a9 100644 --- a/snow/consensus/snowball/binary_snowball.go +++ b/snow/consensus/snowball/binary_snowball.go @@ -7,9 +7,9 @@ import "fmt" var _ Binary = (*binarySnowball)(nil) -func newBinarySnowball(alphaPreference int, alphaConfidence int, beta int, choice int) binarySnowball { +func newBinarySnowball(alphaPreference int, terminationConditions []terminationCondition, choice int) binarySnowball { return binarySnowball{ - binarySnowflake: newBinarySnowflake(alphaPreference, alphaConfidence, beta, choice), + binarySnowflake: newBinarySnowflake(alphaPreference, terminationConditions, choice), preference: choice, } } diff --git a/snow/consensus/snowball/binary_snowball_test.go b/snow/consensus/snowball/binary_snowball_test.go index 118b3c7913a7..968743ef36a8 100644 --- a/snow/consensus/snowball/binary_snowball_test.go +++ b/snow/consensus/snowball/binary_snowball_test.go @@ -17,8 +17,9 @@ func TestBinarySnowball(t *testing.T) { alphaPreference, alphaConfidence := 2, 3 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newBinarySnowball(alphaPreference, alphaConfidence, beta, red) + sb := newBinarySnowball(alphaPreference, terminationConditions, red) require.Equal(red, sb.Preference()) require.False(sb.Finalized()) @@ -47,8 +48,9 @@ func TestBinarySnowballRecordPollPreference(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newBinarySnowball(alphaPreference, alphaConfidence, beta, red) + sb := newBinarySnowball(alphaPreference, terminationConditions, red) require.Equal(red, sb.Preference()) require.False(sb.Finalized()) @@ -72,7 +74,7 @@ func TestBinarySnowballRecordPollPreference(t *testing.T) { require.Equal(red, sb.Preference()) require.True(sb.Finalized()) - expected := "SB(Preference = 0, PreferenceStrength[0] = 4, PreferenceStrength[1] = 1, SF(Confidence = 2, Finalized = true, SL(Preference = 0)))" + expected := "SB(Preference = 0, PreferenceStrength[0] = 4, PreferenceStrength[1] = 1, SF(Confidence = [2], Finalized = true, SL(Preference = 0)))" require.Equal(expected, sb.String()) } @@ -84,8 +86,9 @@ func TestBinarySnowballRecordUnsuccessfulPoll(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newBinarySnowball(alphaPreference, alphaConfidence, beta, red) + sb := newBinarySnowball(alphaPreference, terminationConditions, red) require.Equal(red, sb.Preference()) require.False(sb.Finalized()) @@ -103,7 +106,7 @@ func TestBinarySnowballRecordUnsuccessfulPoll(t *testing.T) { require.Equal(blue, sb.Preference()) require.True(sb.Finalized()) - expected := "SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 3, SF(Confidence = 2, Finalized = true, SL(Preference = 1)))" + expected := "SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 3, SF(Confidence = [2], Finalized = true, SL(Preference = 1)))" require.Equal(expected, sb.String()) } @@ -115,8 +118,9 @@ func TestBinarySnowballAcceptWeirdColor(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newBinarySnowball(alphaPreference, alphaConfidence, beta, red) + sb := newBinarySnowball(alphaPreference, terminationConditions, red) require.Equal(red, sb.Preference()) require.False(sb.Finalized()) @@ -144,7 +148,7 @@ func TestBinarySnowballAcceptWeirdColor(t *testing.T) { require.Equal(blue, sb.Preference()) require.True(sb.Finalized()) - expected := "SB(Preference = 1, PreferenceStrength[0] = 2, PreferenceStrength[1] = 2, SF(Confidence = 2, Finalized = true, SL(Preference = 0)))" + expected := "SB(Preference = 1, PreferenceStrength[0] = 2, PreferenceStrength[1] = 2, SF(Confidence = [2], Finalized = true, SL(Preference = 0)))" require.Equal(expected, sb.String()) } @@ -156,8 +160,9 @@ func TestBinarySnowballLockColor(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 1 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newBinarySnowball(alphaPreference, alphaConfidence, beta, red) + sb := newBinarySnowball(alphaPreference, terminationConditions, red) sb.RecordPoll(alphaConfidence, red) @@ -175,6 +180,6 @@ func TestBinarySnowballLockColor(t *testing.T) { require.Equal(red, sb.Preference()) require.True(sb.Finalized()) - expected := "SB(Preference = 1, PreferenceStrength[0] = 1, PreferenceStrength[1] = 3, SF(Confidence = 1, Finalized = true, SL(Preference = 0)))" + expected := "SB(Preference = 1, PreferenceStrength[0] = 1, PreferenceStrength[1] = 3, SF(Confidence = [1], Finalized = true, SL(Preference = 0)))" require.Equal(expected, sb.String()) } diff --git a/snow/consensus/snowball/binary_snowflake.go b/snow/consensus/snowball/binary_snowflake.go index 81cca1ce8501..6dc856a7bc7c 100644 --- a/snow/consensus/snowball/binary_snowflake.go +++ b/snow/consensus/snowball/binary_snowflake.go @@ -7,17 +7,12 @@ import "fmt" var _ Binary = (*binarySnowflake)(nil) -func newBinarySnowflake(alphaPreference, alphaConfidence, beta, choice int) binarySnowflake { +func newBinarySnowflake(alphaPreference int, terminationConditions []terminationCondition, choice int) binarySnowflake { return binarySnowflake{ - binarySlush: newBinarySlush(choice), - alphaPreference: alphaPreference, - terminationConditions: []terminationCondition{ - { - alphaConfidence: alphaConfidence, - beta: beta, - }, - }, - confidence: make([]int, 1), + binarySlush: newBinarySlush(choice), + alphaPreference: alphaPreference, + terminationConditions: terminationConditions, + confidence: make([]int, len(terminationConditions)), } } @@ -94,8 +89,8 @@ func (sf *binarySnowflake) Finalized() bool { } func (sf *binarySnowflake) String() string { - return fmt.Sprintf("SF(Confidence = %d, Finalized = %v, %s)", - sf.confidence[0], + return fmt.Sprintf("SF(Confidence = %v, Finalized = %v, %s)", + sf.confidence, sf.finalized, &sf.binarySlush) } diff --git a/snow/consensus/snowball/binary_snowflake_test.go b/snow/consensus/snowball/binary_snowflake_test.go index 16944b5b2082..ca2347aa086d 100644 --- a/snow/consensus/snowball/binary_snowflake_test.go +++ b/snow/consensus/snowball/binary_snowflake_test.go @@ -17,8 +17,9 @@ func TestBinarySnowflake(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sf := newBinarySnowflake(alphaPreference, alphaConfidence, beta, red) + sf := newBinarySnowflake(alphaPreference, terminationConditions, red) require.Equal(red, sf.Preference()) require.False(sf.Finalized()) @@ -50,3 +51,44 @@ func TestBinarySnowflake(t *testing.T) { require.Equal(blue, sf.Preference()) require.True(sf.Finalized()) } + +type binarySnowflakeTest struct { + require *require.Assertions + + binarySnowflake +} + +func newBinarySnowflakeTest(t *testing.T, alphaPreference int, terminationConditions []terminationCondition) snowflakeTest[int] { + require := require.New(t) + + return &binarySnowflakeTest{ + require: require, + binarySnowflake: newBinarySnowflake(alphaPreference, terminationConditions, 0), + } +} + +func (sf *binarySnowflakeTest) RecordPoll(count int, choice int) { + sf.binarySnowflake.RecordPoll(count, choice) +} + +func (sf *binarySnowflakeTest) AssertEqual(expectedConfidences []int, expectedFinalized bool, expectedPreference int) { + sf.require.Equal(expectedPreference, sf.Preference()) + sf.require.Equal(expectedConfidences, sf.binarySnowflake.confidence) + sf.require.Equal(expectedFinalized, sf.Finalized()) +} + +func TestBinarySnowflakeErrorDrivenSingleChoice(t *testing.T) { + for _, test := range getErrorDrivenSnowflakeSingleChoiceSuite[int]() { + t.Run(test.name, func(t *testing.T) { + test.f(t, newBinarySnowflakeTest, 0) + }) + } +} + +func TestBinarySnowflakeErrorDrivenMultiChoice(t *testing.T) { + for _, test := range getErrorDrivenSnowflakeMultiChoiceSuite[int]() { + t.Run(test.name, func(t *testing.T) { + test.f(t, newBinarySnowflakeTest, 0, 1) + }) + } +} diff --git a/snow/consensus/snowball/factory.go b/snow/consensus/snowball/factory.go index eea827202bc3..e9ae98180e6e 100644 --- a/snow/consensus/snowball/factory.go +++ b/snow/consensus/snowball/factory.go @@ -13,23 +13,23 @@ var ( type snowballFactory struct{} func (snowballFactory) NewNnary(params Parameters, choice ids.ID) Nnary { - sb := newNnarySnowball(params.AlphaPreference, params.AlphaConfidence, params.Beta, choice) + sb := newNnarySnowball(params.AlphaPreference, newSingleTerminationCondition(params.AlphaConfidence, params.Beta), choice) return &sb } func (snowballFactory) NewUnary(params Parameters) Unary { - sb := newUnarySnowball(params.AlphaPreference, params.AlphaConfidence, params.Beta) + sb := newUnarySnowball(params.AlphaPreference, newSingleTerminationCondition(params.AlphaConfidence, params.Beta)) return &sb } type snowflakeFactory struct{} func (snowflakeFactory) NewNnary(params Parameters, choice ids.ID) Nnary { - sf := newNnarySnowflake(params.AlphaPreference, params.AlphaConfidence, params.Beta, choice) + sf := newNnarySnowflake(params.AlphaPreference, newSingleTerminationCondition(params.AlphaConfidence, params.Beta), choice) return &sf } func (snowflakeFactory) NewUnary(params Parameters) Unary { - sf := newUnarySnowflake(params.AlphaPreference, params.AlphaConfidence, params.Beta) + sf := newUnarySnowflake(params.AlphaPreference, newSingleTerminationCondition(params.AlphaConfidence, params.Beta)) return &sf } diff --git a/snow/consensus/snowball/flat_test.go b/snow/consensus/snowball/flat_test.go index b4b8d1c6f308..51b7390bb88d 100644 --- a/snow/consensus/snowball/flat_test.go +++ b/snow/consensus/snowball/flat_test.go @@ -56,6 +56,6 @@ func TestFlat(t *testing.T) { require.Equal(Green, f.Preference()) require.True(f.Finalized()) - expected := "SB(Preference = 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w, PreferenceStrength = 4, SF(Confidence = 2, Finalized = true, SL(Preference = 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w)))" + expected := "SB(Preference = 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w, PreferenceStrength = 4, SF(Confidence = [2], Finalized = true, SL(Preference = 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w)))" require.Equal(expected, f.String()) } diff --git a/snow/consensus/snowball/nnary_snowball.go b/snow/consensus/snowball/nnary_snowball.go index 98b63cd9dbcb..dfecaf3450fe 100644 --- a/snow/consensus/snowball/nnary_snowball.go +++ b/snow/consensus/snowball/nnary_snowball.go @@ -11,9 +11,9 @@ import ( var _ Nnary = (*nnarySnowball)(nil) -func newNnarySnowball(alphaPreference, alphaConfidence, beta int, choice ids.ID) nnarySnowball { +func newNnarySnowball(alphaPreference int, terminationConditions []terminationCondition, choice ids.ID) nnarySnowball { return nnarySnowball{ - nnarySnowflake: newNnarySnowflake(alphaPreference, alphaConfidence, beta, choice), + nnarySnowflake: newNnarySnowflake(alphaPreference, terminationConditions, choice), preference: choice, preferenceStrength: make(map[ids.ID]int), } diff --git a/snow/consensus/snowball/nnary_snowball_test.go b/snow/consensus/snowball/nnary_snowball_test.go index 466337bc00d5..8a5e66143db2 100644 --- a/snow/consensus/snowball/nnary_snowball_test.go +++ b/snow/consensus/snowball/nnary_snowball_test.go @@ -14,8 +14,9 @@ func TestNnarySnowball(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newNnarySnowball(alphaPreference, alphaConfidence, beta, Red) + sb := newNnarySnowball(alphaPreference, terminationConditions, Red) sb.Add(Blue) sb.Add(Green) @@ -56,8 +57,9 @@ func TestVirtuousNnarySnowball(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 1 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newNnarySnowball(alphaPreference, alphaConfidence, beta, Red) + sb := newNnarySnowball(alphaPreference, terminationConditions, Red) require.Equal(Red, sb.Preference()) require.False(sb.Finalized()) @@ -72,8 +74,9 @@ func TestNarySnowballRecordUnsuccessfulPoll(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newNnarySnowball(alphaPreference, alphaConfidence, beta, Red) + sb := newNnarySnowball(alphaPreference, terminationConditions, Red) sb.Add(Blue) require.Equal(Red, sb.Preference()) @@ -95,7 +98,7 @@ func TestNarySnowballRecordUnsuccessfulPoll(t *testing.T) { require.Equal(Blue, sb.Preference()) require.True(sb.Finalized()) - expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, PreferenceStrength = 3, SF(Confidence = 2, Finalized = true, SL(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES)))" + expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, PreferenceStrength = 3, SF(Confidence = [2], Finalized = true, SL(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES)))" require.Equal(expected, sb.String()) for i := 0; i < 4; i++ { @@ -111,8 +114,9 @@ func TestNarySnowballDifferentSnowflakeColor(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newNnarySnowball(alphaPreference, alphaConfidence, beta, Red) + sb := newNnarySnowball(alphaPreference, terminationConditions, Red) sb.Add(Blue) require.Equal(Red, sb.Preference()) diff --git a/snow/consensus/snowball/nnary_snowflake.go b/snow/consensus/snowball/nnary_snowflake.go index ab3c4f462c29..3fe9f517c0e7 100644 --- a/snow/consensus/snowball/nnary_snowflake.go +++ b/snow/consensus/snowball/nnary_snowflake.go @@ -11,17 +11,12 @@ import ( var _ Nnary = (*nnarySnowflake)(nil) -func newNnarySnowflake(alphaPreference, alphaConfidence, beta int, choice ids.ID) nnarySnowflake { +func newNnarySnowflake(alphaPreference int, terminationConditions []terminationCondition, choice ids.ID) nnarySnowflake { return nnarySnowflake{ - nnarySlush: newNnarySlush(choice), - alphaPreference: alphaPreference, - terminationConditions: []terminationCondition{ - { - alphaConfidence: alphaConfidence, - beta: beta, - }, - }, - confidence: make([]int, 1), + nnarySlush: newNnarySlush(choice), + alphaPreference: alphaPreference, + terminationConditions: terminationConditions, + confidence: make([]int, len(terminationConditions)), } } @@ -101,8 +96,8 @@ func (sf *nnarySnowflake) Finalized() bool { } func (sf *nnarySnowflake) String() string { - return fmt.Sprintf("SF(Confidence = %d, Finalized = %v, %s)", - sf.confidence[0], + return fmt.Sprintf("SF(Confidence = %v, Finalized = %v, %s)", + sf.confidence, sf.finalized, &sf.nnarySlush) } diff --git a/snow/consensus/snowball/nnary_snowflake_test.go b/snow/consensus/snowball/nnary_snowflake_test.go index 714a6bae9b07..ad090ee0f0df 100644 --- a/snow/consensus/snowball/nnary_snowflake_test.go +++ b/snow/consensus/snowball/nnary_snowflake_test.go @@ -7,6 +7,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" ) func TestNnarySnowflake(t *testing.T) { @@ -14,8 +16,9 @@ func TestNnarySnowflake(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sf := newNnarySnowflake(alphaPreference, alphaConfidence, beta, Red) + sf := newNnarySnowflake(alphaPreference, terminationConditions, Red) sf.Add(Blue) sf.Add(Green) @@ -52,8 +55,9 @@ func TestNnarySnowflakeConfidenceReset(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 4 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sf := newNnarySnowflake(alphaPreference, alphaConfidence, beta, Red) + sf := newNnarySnowflake(alphaPreference, terminationConditions, Red) sf.Add(Blue) sf.Add(Green) @@ -85,8 +89,9 @@ func TestVirtuousNnarySnowflake(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newNnarySnowflake(alphaPreference, alphaConfidence, beta, Red) + sb := newNnarySnowflake(alphaPreference, terminationConditions, Red) require.Equal(Red, sb.Preference()) require.False(sb.Finalized()) @@ -98,3 +103,44 @@ func TestVirtuousNnarySnowflake(t *testing.T) { require.Equal(Red, sb.Preference()) require.True(sb.Finalized()) } + +type nnarySnowflakeTest struct { + require *require.Assertions + + nnarySnowflake +} + +func newNnarySnowflakeTest(t *testing.T, alphaPreference int, terminationConditions []terminationCondition) snowflakeTest[ids.ID] { + require := require.New(t) + + return &nnarySnowflakeTest{ + require: require, + nnarySnowflake: newNnarySnowflake(alphaPreference, terminationConditions, Red), + } +} + +func (sf *nnarySnowflakeTest) RecordPoll(count int, choice ids.ID) { + sf.nnarySnowflake.RecordPoll(count, choice) +} + +func (sf *nnarySnowflakeTest) AssertEqual(expectedConfidences []int, expectedFinalized bool, expectedPreference ids.ID) { + sf.require.Equal(expectedPreference, sf.Preference()) + sf.require.Equal(expectedConfidences, sf.nnarySnowflake.confidence) + sf.require.Equal(expectedFinalized, sf.Finalized()) +} + +func TestNnarySnowflakeErrorDrivenSingleChoice(t *testing.T) { + for _, test := range getErrorDrivenSnowflakeSingleChoiceSuite[ids.ID]() { + t.Run(test.name, func(t *testing.T) { + test.f(t, newNnarySnowflakeTest, Red) + }) + } +} + +func TestNnarySnowflakeErrorDrivenMultiChoice(t *testing.T) { + for _, test := range getErrorDrivenSnowflakeMultiChoiceSuite[ids.ID]() { + t.Run(test.name, func(t *testing.T) { + test.f(t, newNnarySnowflakeTest, Red, Green) + }) + } +} diff --git a/snow/consensus/snowball/parameters.go b/snow/consensus/snowball/parameters.go index a13d99c27565..3e2ba03f2081 100644 --- a/snow/consensus/snowball/parameters.go +++ b/snow/consensus/snowball/parameters.go @@ -127,3 +127,12 @@ type terminationCondition struct { alphaConfidence int beta int } + +func newSingleTerminationCondition(alphaConfidence int, beta int) []terminationCondition { + return []terminationCondition{ + { + alphaConfidence: alphaConfidence, + beta: beta, + }, + } +} diff --git a/snow/consensus/snowball/test_snowflake.go b/snow/consensus/snowball/test_snowflake.go new file mode 100644 index 000000000000..78ce95b27e3d --- /dev/null +++ b/snow/consensus/snowball/test_snowflake.go @@ -0,0 +1,145 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import "testing" + +const alphaPreference = 3 + +var terminationConditions = []terminationCondition{ + { + alphaConfidence: 3, + beta: 4, + }, + { + alphaConfidence: 4, + beta: 3, + }, + { + alphaConfidence: 5, + beta: 2, + }, +} + +type snowflakeTestConstructor[T comparable] func(t *testing.T, alphaPreference int, terminationConditions []terminationCondition) snowflakeTest[T] + +type snowflakeTest[T comparable] interface { + RecordPoll(count int, optionalMode T) + RecordUnsuccessfulPoll() + AssertEqual(expectedConfidences []int, expectedFinalized bool, expectedPreference T) +} + +func executeErrorDrivenTerminatesInBetaPolls[T comparable](t *testing.T, newSnowflakeTest snowflakeTestConstructor[T], choice T) { + for i, terminationCondition := range terminationConditions { + sfTest := newSnowflakeTest(t, alphaPreference, terminationConditions) + + for poll := 0; poll < terminationCondition.beta; poll++ { + sfTest.RecordPoll(terminationCondition.alphaConfidence, choice) + + expectedConfidences := make([]int, len(terminationConditions)) + for j := 0; j < i+1; j++ { + expectedConfidences[j] = poll + 1 + } + sfTest.AssertEqual(expectedConfidences, poll+1 >= terminationCondition.beta, choice) + } + } +} + +func executeErrorDrivenReset[T comparable](t *testing.T, newSnowflakeTest snowflakeTestConstructor[T], choice T) { + for i, terminationCondition := range terminationConditions { + sfTest := newSnowflakeTest(t, alphaPreference, terminationConditions) + + // Accumulate confidence up to 1 less than beta, reset, and confirm + // expected behavior from fresh state. + for poll := 0; poll < terminationCondition.beta-1; poll++ { + sfTest.RecordPoll(terminationCondition.alphaConfidence, choice) + } + sfTest.RecordUnsuccessfulPoll() + zeroConfidence := make([]int, len(terminationConditions)) + sfTest.AssertEqual(zeroConfidence, false, choice) + + for poll := 0; poll < terminationCondition.beta; poll++ { + sfTest.RecordPoll(terminationCondition.alphaConfidence, choice) + + expectedConfidences := make([]int, len(terminationConditions)) + for j := 0; j < i+1; j++ { + expectedConfidences[j] = poll + 1 + } + sfTest.AssertEqual(expectedConfidences, poll+1 >= terminationCondition.beta, choice) + } + } +} + +func executeErrorDrivenResetHighestAlphaConfidence[T comparable](t *testing.T, newSnowflakeTest snowflakeTestConstructor[T], choice T) { + sfTest := newSnowflakeTest(t, alphaPreference, terminationConditions) + + sfTest.RecordPoll(5, choice) + sfTest.AssertEqual([]int{1, 1, 1}, false, choice) + sfTest.RecordPoll(4, choice) + sfTest.AssertEqual([]int{2, 2, 0}, false, choice) + sfTest.RecordPoll(3, choice) + sfTest.AssertEqual([]int{3, 0, 0}, false, choice) + sfTest.RecordPoll(5, choice) + sfTest.AssertEqual([]int{4, 0, 0}, true, choice) +} + +type snowflakeTestSingleChoice[T comparable] struct { + name string + f func(*testing.T, snowflakeTestConstructor[T], T) +} + +func getErrorDrivenSnowflakeSingleChoiceSuite[T comparable]() []snowflakeTestSingleChoice[T] { + return []snowflakeTestSingleChoice[T]{ + { + name: "TerminateInBetaPolls", + f: executeErrorDrivenTerminatesInBetaPolls[T], + }, + { + name: "Reset", + f: executeErrorDrivenReset[T], + }, + { + name: "ResetHighestAlphaConfidence", + f: executeErrorDrivenResetHighestAlphaConfidence[T], + }, + } +} + +func executeErrorDrivenSwitchChoices[T comparable](t *testing.T, newSnowflakeTest snowflakeTestConstructor[T], choice0, choice1 T) { + sfTest := newSnowflakeTest(t, alphaPreference, terminationConditions) + + sfTest.RecordPoll(3, choice0) + sfTest.AssertEqual([]int{1, 0, 0}, false, choice0) + + sfTest.RecordPoll(2, choice1) + sfTest.AssertEqual([]int{0, 0, 0}, false, choice0) + + sfTest.RecordPoll(3, choice0) + sfTest.AssertEqual([]int{1, 0, 0}, false, choice0) + + sfTest.RecordPoll(0, choice0) + sfTest.AssertEqual([]int{0, 0, 0}, false, choice0) + + sfTest.RecordPoll(3, choice1) + sfTest.AssertEqual([]int{1, 0, 0}, false, choice1) + + sfTest.RecordPoll(5, choice1) + sfTest.AssertEqual([]int{2, 1, 1}, false, choice1) + sfTest.RecordPoll(5, choice1) + sfTest.AssertEqual([]int{3, 2, 2}, true, choice1) +} + +type snowflakeTestMultiChoice[T comparable] struct { + name string + f func(*testing.T, snowflakeTestConstructor[T], T, T) +} + +func getErrorDrivenSnowflakeMultiChoiceSuite[T comparable]() []snowflakeTestMultiChoice[T] { + return []snowflakeTestMultiChoice[T]{ + { + name: "SwitchChoices", + f: executeErrorDrivenSwitchChoices[T], + }, + } +} diff --git a/snow/consensus/snowball/tree_test.go b/snow/consensus/snowball/tree_test.go index 687f948fa783..fc9c8944b768 100644 --- a/snow/consensus/snowball/tree_test.go +++ b/snow/consensus/snowball/tree_test.go @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/avalanchego/utils/bag" ) -const initialUnaryDescription = "SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 256)" +const initialUnaryDescription = "SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [0, 256)" func TestSnowballSingleton(t *testing.T) { require := require.New(t) @@ -143,8 +143,8 @@ func TestSnowballLastBinary(t *testing.T) { // Should do nothing tree.Add(one) - expected := `SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 255) - SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 255` + expected := `SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [0, 255) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 255` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -154,15 +154,15 @@ func TestSnowballLastBinary(t *testing.T) { require.Equal(one, tree.Preference()) require.False(tree.Finalized()) - expected = `SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 255) - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 255` + expected = `SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [0, 255) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 255` require.Equal(expected, tree.String()) require.True(tree.RecordPoll(oneBag)) require.Equal(one, tree.Preference()) require.True(tree.Finalized()) - expected = "SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = 2, Finalized = true, SL(Preference = 1))) Bit = 255" + expected = "SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = [2], Finalized = true, SL(Preference = 1))) Bit = 255" require.Equal(expected, tree.String()) } @@ -181,9 +181,9 @@ func TestSnowballFirstBinary(t *testing.T) { tree := NewTree(SnowballFactory, params, zero) tree.Add(one) - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -193,16 +193,16 @@ func TestSnowballFirstBinary(t *testing.T) { require.Equal(one, tree.Preference()) require.False(tree.Finalized()) - expected = `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [1, 256)` + expected = `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.True(tree.RecordPoll(oneBag)) require.Equal(one, tree.Preference()) require.True(tree.Finalized()) - expected = `SB(PreferenceStrength = 2, SF(Confidence = 2, Finalized = true)) Bits = [1, 256)` + expected = `SB(PreferenceStrength = 2, SF(Confidence = [2], Finalized = true)) Bits = [1, 256)` require.Equal(expected, tree.String()) } @@ -224,11 +224,11 @@ func TestSnowballAddDecidedFirstBit(t *testing.T) { tree.Add(c1000) tree.Add(c1100) - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -238,11 +238,11 @@ func TestSnowballAddDecidedFirstBit(t *testing.T) { require.Equal(c1000, tree.Preference()) require.False(tree.Finalized()) - expected = `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected = `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) threeBag := bag.Of(c1100) @@ -250,9 +250,9 @@ func TestSnowballAddDecidedFirstBit(t *testing.T) { require.Equal(c1000, tree.Preference()) require.False(tree.Finalized()) - expected = `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 256)` + expected = `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) // Adding six should have no effect because the first bit is already decided @@ -277,10 +277,10 @@ func TestSnowballAddPreviouslyRejected(t *testing.T) { tree.Add(two) { - expected := `SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 1) - SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -290,10 +290,10 @@ func TestSnowballAddPreviouslyRejected(t *testing.T) { require.True(tree.RecordPoll(zeroBag)) { - expected := `SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 1) - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -303,9 +303,9 @@ func TestSnowballAddPreviouslyRejected(t *testing.T) { require.True(tree.RecordPoll(twoBag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -314,9 +314,9 @@ func TestSnowballAddPreviouslyRejected(t *testing.T) { tree.Add(one) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -339,9 +339,9 @@ func TestSnowballNewUnary(t *testing.T) { tree.Add(one) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -351,9 +351,9 @@ func TestSnowballNewUnary(t *testing.T) { require.True(tree.RecordPoll(oneBag)) { - expected := `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(one, tree.Preference()) require.False(tree.Finalized()) @@ -362,9 +362,9 @@ func TestSnowballNewUnary(t *testing.T) { require.True(tree.RecordPoll(oneBag)) { - expected := `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = 2, Finalized = false, SL(Preference = 1))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(PreferenceStrength = 2, SF(Confidence = 2, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = [2], Finalized = false, SL(Preference = 1))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 2, SF(Confidence = [2], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(one, tree.Preference()) require.False(tree.Finalized()) @@ -389,13 +389,13 @@ func TestSnowballTransitiveReset(t *testing.T) { tree.Add(eight) { - expected := `SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 1) - SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 3) - SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 3 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -405,13 +405,13 @@ func TestSnowballTransitiveReset(t *testing.T) { require.True(tree.RecordPoll(zeroBag)) { - expected := `SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 1) - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3) - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -421,13 +421,13 @@ func TestSnowballTransitiveReset(t *testing.T) { require.False(tree.RecordPoll(emptyBag)) { - expected := `SB(PreferenceStrength = 1, SF(Confidence = 0, Finalized = false)) Bits = [0, 1) - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3) - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(PreferenceStrength = 1, SF(Confidence = [0], Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -436,13 +436,13 @@ func TestSnowballTransitiveReset(t *testing.T) { require.True(tree.RecordPoll(zeroBag)) { - expected := `SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [0, 1) - SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [2, 3) - SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3 - SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(PreferenceStrength = 2, SF(Confidence = [1], Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 2, SF(Confidence = [1], Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 2, SF(Confidence = [1], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -451,7 +451,7 @@ func TestSnowballTransitiveReset(t *testing.T) { require.True(tree.RecordPoll(zeroBag)) { - expected := "SB(PreferenceStrength = 3, SF(Confidence = 2, Finalized = true)) Bits = [4, 256)" + expected := "SB(PreferenceStrength = 3, SF(Confidence = [2], Finalized = true)) Bits = [4, 256)" require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.True(tree.Finalized()) @@ -578,11 +578,11 @@ func TestSnowballResetChild(t *testing.T) { require.True(tree.RecordPoll(c0000Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -592,11 +592,11 @@ func TestSnowballResetChild(t *testing.T) { require.False(tree.RecordPoll(emptyBag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -605,11 +605,11 @@ func TestSnowballResetChild(t *testing.T) { require.True(tree.RecordPoll(c0000Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 2, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -640,11 +640,11 @@ func TestSnowballResetSibling(t *testing.T) { require.True(tree.RecordPoll(c0100Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0100, tree.Preference()) require.False(tree.Finalized()) @@ -654,11 +654,11 @@ func TestSnowballResetSibling(t *testing.T) { require.True(tree.RecordPoll(c1000Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0 - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0100, tree.Preference()) require.False(tree.Finalized()) @@ -667,11 +667,11 @@ func TestSnowballResetSibling(t *testing.T) { require.True(tree.RecordPoll(c0100Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 2, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0100, tree.Preference()) require.False(tree.Finalized()) @@ -736,9 +736,9 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c1100) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -747,11 +747,11 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c1000) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -760,14 +760,14 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c0010) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2) - SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -777,14 +777,14 @@ func TestSnowballFineGrained(t *testing.T) { require.True(tree.RecordPoll(c0000Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [1, 2) - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -794,9 +794,9 @@ func TestSnowballFineGrained(t *testing.T) { require.True(tree.RecordPoll(c0010Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [3, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [3, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -804,7 +804,7 @@ func TestSnowballFineGrained(t *testing.T) { require.True(tree.RecordPoll(c0010Bag)) { - expected := "SB(PreferenceStrength = 2, SF(Confidence = 2, Finalized = true)) Bits = [3, 256)" + expected := "SB(PreferenceStrength = 2, SF(Confidence = [2], Finalized = true)) Bits = [3, 256)" require.Equal(expected, tree.String()) require.Equal(c0010, tree.Preference()) require.True(tree.Finalized()) @@ -881,9 +881,9 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { tree.Add(c1000) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -892,12 +892,12 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { tree.Add(c0010) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2) - SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -907,12 +907,12 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { require.True(tree.RecordPoll(c0000Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [1, 2) - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -921,13 +921,13 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { tree.Add(c0100) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -937,11 +937,11 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { require.True(tree.RecordPoll(c0100Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) diff --git a/snow/consensus/snowball/unary_snowball.go b/snow/consensus/snowball/unary_snowball.go index 24ed78cee43b..c67180f4c384 100644 --- a/snow/consensus/snowball/unary_snowball.go +++ b/snow/consensus/snowball/unary_snowball.go @@ -10,9 +10,9 @@ import ( var _ Unary = (*unarySnowball)(nil) -func newUnarySnowball(alphaPreference, alphaConfidence, beta int) unarySnowball { +func newUnarySnowball(alphaPreference int, terminationConditions []terminationCondition) unarySnowball { return unarySnowball{ - unarySnowflake: newUnarySnowflake(alphaPreference, alphaConfidence, beta), + unarySnowflake: newUnarySnowflake(alphaPreference, terminationConditions), } } diff --git a/snow/consensus/snowball/unary_snowball_test.go b/snow/consensus/snowball/unary_snowball_test.go index 4bea0458d95f..007d2ab53090 100644 --- a/snow/consensus/snowball/unary_snowball_test.go +++ b/snow/consensus/snowball/unary_snowball_test.go @@ -22,8 +22,9 @@ func TestUnarySnowball(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newUnarySnowball(alphaPreference, alphaConfidence, beta) + sb := newUnarySnowball(alphaPreference, terminationConditions) sb.RecordPoll(alphaConfidence) UnarySnowballStateTest(t, &sb, 1, []int{1}, false) @@ -48,7 +49,7 @@ func TestUnarySnowball(t *testing.T) { binarySnowball := sbClone.Extend(0) - expected := "SB(Preference = 0, PreferenceStrength[0] = 4, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0)))" + expected := "SB(Preference = 0, PreferenceStrength[0] = 4, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0)))" require.Equal(expected, binarySnowball.String()) binarySnowball.RecordUnsuccessfulPoll() @@ -70,6 +71,6 @@ func TestUnarySnowball(t *testing.T) { require.Equal(1, binarySnowball.Preference()) require.True(binarySnowball.Finalized()) - expected = "SB(PreferenceStrength = 4, SF(Confidence = 1, Finalized = false))" + expected = "SB(PreferenceStrength = 4, SF(Confidence = [1], Finalized = false))" require.Equal(expected, sb.String()) } diff --git a/snow/consensus/snowball/unary_snowflake.go b/snow/consensus/snowball/unary_snowflake.go index 3e21316dc370..a49152966d9a 100644 --- a/snow/consensus/snowball/unary_snowflake.go +++ b/snow/consensus/snowball/unary_snowflake.go @@ -10,16 +10,11 @@ import ( var _ Unary = (*unarySnowflake)(nil) -func newUnarySnowflake(alphaPreference, alphaConfidence, beta int) unarySnowflake { +func newUnarySnowflake(alphaPreference int, terminationConditions []terminationCondition) unarySnowflake { return unarySnowflake{ - alphaPreference: alphaPreference, - terminationConditions: []terminationCondition{ - { - alphaConfidence: alphaConfidence, - beta: beta, - }, - }, - confidence: make([]int, 1), + alphaPreference: alphaPreference, + terminationConditions: terminationConditions, + confidence: make([]int, len(terminationConditions)), } } @@ -93,7 +88,7 @@ func (sf *unarySnowflake) Clone() Unary { } func (sf *unarySnowflake) String() string { - return fmt.Sprintf("SF(Confidence = %d, Finalized = %v)", - sf.confidence[0], + return fmt.Sprintf("SF(Confidence = %v, Finalized = %v)", + sf.confidence, sf.finalized) } diff --git a/snow/consensus/snowball/unary_snowflake_test.go b/snow/consensus/snowball/unary_snowflake_test.go index 0c6282060b42..ee099460e52b 100644 --- a/snow/consensus/snowball/unary_snowflake_test.go +++ b/snow/consensus/snowball/unary_snowflake_test.go @@ -9,10 +9,10 @@ import ( "github.com/stretchr/testify/require" ) -func UnarySnowflakeStateTest(t *testing.T, sf *unarySnowflake, expectedConfidence []int, expectedFinalized bool) { +func UnarySnowflakeStateTest(t *testing.T, sf *unarySnowflake, expectedConfidences []int, expectedFinalized bool) { require := require.New(t) - require.Equal(expectedConfidence, sf.confidence) + require.Equal(expectedConfidences, sf.confidence) require.Equal(expectedFinalized, sf.Finalized()) } @@ -21,8 +21,9 @@ func TestUnarySnowflake(t *testing.T) { alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sf := newUnarySnowflake(alphaPreference, alphaConfidence, beta) + sf := newUnarySnowflake(alphaPreference, terminationConditions) sf.RecordPoll(alphaConfidence) UnarySnowflakeStateTest(t, &sf, []int{1}, false) @@ -61,3 +62,35 @@ func TestUnarySnowflake(t *testing.T) { sf.RecordPoll(alphaConfidence) UnarySnowflakeStateTest(t, &sf, []int{1}, true) } + +type unarySnowflakeTest struct { + require *require.Assertions + + unarySnowflake +} + +func newUnarySnowflakeTest(t *testing.T, alphaPreference int, terminationConditions []terminationCondition) snowflakeTest[struct{}] { + require := require.New(t) + + return &unarySnowflakeTest{ + require: require, + unarySnowflake: newUnarySnowflake(alphaPreference, terminationConditions), + } +} + +func (sf *unarySnowflakeTest) RecordPoll(count int, _ struct{}) { + sf.unarySnowflake.RecordPoll(count) +} + +func (sf *unarySnowflakeTest) AssertEqual(expectedConfidences []int, expectedFinalized bool, _ struct{}) { + sf.require.Equal(expectedConfidences, sf.unarySnowflake.confidence) + sf.require.Equal(expectedFinalized, sf.Finalized()) +} + +func TestUnarySnowflakeErrorDriven(t *testing.T) { + for _, test := range getErrorDrivenSnowflakeSingleChoiceSuite[struct{}]() { + t.Run(test.name, func(t *testing.T) { + test.f(t, newUnarySnowflakeTest, struct{}{}) + }) + } +} From d3a37392cbc0c498f5c484af7db8ea8dad201c93 Mon Sep 17 00:00:00 2001 From: marun Date: Wed, 12 Jun 2024 23:23:52 +0200 Subject: [PATCH 058/102] [antithesis] Add ci jobs to trigger test runs (#3076) --- .../workflows/publish_antithesis_images.yml | 10 +++- .github/workflows/trigger-antithesis-runs.yml | 53 +++++++++++++++++++ tests/antithesis/README.md | 28 ++++++++++ 3 files changed, 89 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/trigger-antithesis-runs.yml diff --git a/.github/workflows/publish_antithesis_images.yml b/.github/workflows/publish_antithesis_images.yml index 35e77218fdb6..32ecc3ae4c36 100644 --- a/.github/workflows/publish_antithesis_images.yml +++ b/.github/workflows/publish_antithesis_images.yml @@ -2,6 +2,12 @@ name: Publish Antithesis Images on: workflow_dispatch: + inputs: + image_tag: + description: 'The tag to apply to published images' + default: latest + required: true + type: string push: branches: - master @@ -29,12 +35,12 @@ jobs: run: bash -x ./scripts/build_antithesis_images.sh env: IMAGE_PREFIX: ${{ env.REGISTRY }}/${{ env.REPOSITORY }} - TAG: latest + TAG: ${{ github.events.inputs.image_tag || latest }} TEST_SETUP: avalanchego - name: Build and push images for xsvm test setup run: bash -x ./scripts/build_antithesis_images.sh env: IMAGE_PREFIX: ${{ env.REGISTRY }}/${{ env.REPOSITORY }} - TAG: latest + TAG: ${{ github.events.inputs.image_tag || latest }} TEST_SETUP: xsvm diff --git a/.github/workflows/trigger-antithesis-runs.yml b/.github/workflows/trigger-antithesis-runs.yml new file mode 100644 index 000000000000..0521b0770d79 --- /dev/null +++ b/.github/workflows/trigger-antithesis-runs.yml @@ -0,0 +1,53 @@ +name: Trigger Antithesis Test Runs + +on: + # TODO(marun) Add a schedule to execute regularly + workflow_dispatch: + inputs: + duration: + description: 'The duration to run the test for' + default: '0.5' + required: true + type: string + recipients: + description: 'Email recipients to send the test report to' + default: ${{ secrets.ANTITHESIS_RECIPIENTS }} + required: true + type: string + image_tag: + description: 'The image tag to target' + default: latest + required: true + type: string + +jobs: + Run Antithesis Avalanchego Test Setup: + runs-on: ubuntu-latest + steps: + - uses: antithesishq/antithesis-trigger-action@v0.5 + with: + notebook_name: avalanche + tenant: avalanche + username: ${{ secrets.ANTITHESIS_USERNAME }} + password: ${{ secrets.ANTITHESIS_PASSWORD }} + github_token: ${{ secrets.ANTITHESIS_GH_PAT }} + config_image: antithesis-avalanchego-config@${{ github.events.inputs.image_tag }} + images: antithesis-avalanchego-workload@${{ github.events.inputs.image_tag }};antithesis-avalanchego-node@${{ github.events.inputs.image_tag }} + email_recipients: ${{ github.events.inputs.recipients }} + additional_parameters: |- + custom.duration=${{ github.events.inputs.duration }} + Run Antithesis XSVM Test Setup: + runs-on: ubuntu-latest + steps: + - uses: antithesishq/antithesis-trigger-action@v0.5 + with: + notebook_name: avalanche + tenant: avalanche + username: ${{ secrets.ANTITHESIS_USERNAME }} + password: ${{ secrets.ANTITHESIS_PASSWORD }} + github_token: ${{ secrets.ANTITHESIS_GH_PAT }} + config_image: antithesis-xsvm-config@${{ github.events.inputs.image_tag }} + images: antithesis-xsvm-workload@${{ github.events.inputs.image_tag }};antithesis-xsvm-node@${{ github.events.inputs.image_tag }} + email_recipients: ${{ github.events.inputs.recipients }} + additional_parameters: |- + custom.duration=${{ github.events.inputs.duration }} diff --git a/tests/antithesis/README.md b/tests/antithesis/README.md index 3acb7746104a..1fd22370e655 100644 --- a/tests/antithesis/README.md +++ b/tests/antithesis/README.md @@ -115,3 +115,31 @@ $ docker-compose up # Cleanup the compose project $ docker-compose down --volumes ``` + +## Manually triggering an Antithesis test run + +When making changes to a test setup, it may be useful to manually +trigger an Antithesis test run outside of the normal schedule. This +can be performed against master or an arbitrary branch: + + - Navigate to the ['Actions' tab of the avalanchego + repo](https://github.com/ava-labs/avalanchego/actions). + - Select the [Publish Antithesis + Images](https://github.com/ava-labs/avalanchego/actions/workflows/publish_antithesis_images.yml) + workflow on the left. + - Find the 'Run workflow' drop-down on the right and trigger the + workflow against the desired branch. The default value for + `image_tag` (`latest`) is used by scheduled test runs, so consider + supplying a different value to avoid interferring with the results + of the scheduled runs. + - Wait for the publication job to complete successfully so that the + images are available to be tested against. + - Select the [Trigger Antithesis Test Runs](https://github.com/ava-labs/avalanchego/actions/workflows/trigger-antithesis-runs.yml) + workflow on the left. + - Find the 'Run workflow' drop-down on the right and trigger the + workflow against the desired branch. The branch only determines the + CI configuration (the images have already been built), so master is + probably fine. Make sure to supply the same `image_tag` that was + provided to the publishing workflow and consider setting + `recipients` to your own email rather than sending the test report + to everyone on the regular distribution list. From fa05d628b2700498ce96641bb754a9551b5f0a30 Mon Sep 17 00:00:00 2001 From: felipemadero Date: Thu, 13 Jun 2024 11:22:00 -0300 Subject: [PATCH 059/102] bump ledger-avalanche dependency to current main branch (#3115) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index dd81b9438eae..34d9c6cfacde 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/NYTimes/gziphandler v1.1.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 github.com/ava-labs/coreth v0.13.5-rc.0 - github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 + github.com/ava-labs/ledger-avalanche/go v0.0.0-20240610153809-9c955cc90a95 github.com/btcsuite/btcd/btcutil v1.1.3 github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 github.com/compose-spec/compose-go v1.20.2 diff --git a/go.sum b/go.sum index a3a0887b60b7..79f7b0d77e36 100644 --- a/go.sum +++ b/go.sum @@ -64,8 +64,8 @@ github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/ava-labs/coreth v0.13.5-rc.0 h1:PJQbR9o2RrW3j9ba4r1glXnmM2PNAP3xR569+gMcBd0= github.com/ava-labs/coreth v0.13.5-rc.0/go.mod h1:cm5c12xo5NiTgtbmeduv8i2nYdzgkczz9Wm3yiwwTRU= -github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 h1:mg9Uw6oZFJKytJxgxnl3uxZOs/SB8CVHg6Io4Tf99Zc= -github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34/go.mod h1:pJxaT9bUgeRNVmNRgtCHb7sFDIRKy7CzTQVi8gGNT6g= +github.com/ava-labs/ledger-avalanche/go v0.0.0-20240610153809-9c955cc90a95 h1:dOVbtdnZL++pENdTCNZ1nu41eYDQkTML4sWebDnnq8c= +github.com/ava-labs/ledger-avalanche/go v0.0.0-20240610153809-9c955cc90a95/go.mod h1:pJxaT9bUgeRNVmNRgtCHb7sFDIRKy7CzTQVi8gGNT6g= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From c72c21e749f57c36ae611a80993d3400e2dea85d Mon Sep 17 00:00:00 2001 From: marun Date: Thu, 13 Jun 2024 16:58:21 +0200 Subject: [PATCH 060/102] [antithesis] Fix image publication job by quoting default tag value (#3112) --- .github/workflows/publish_antithesis_images.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish_antithesis_images.yml b/.github/workflows/publish_antithesis_images.yml index 32ecc3ae4c36..8363ad73e975 100644 --- a/.github/workflows/publish_antithesis_images.yml +++ b/.github/workflows/publish_antithesis_images.yml @@ -35,12 +35,12 @@ jobs: run: bash -x ./scripts/build_antithesis_images.sh env: IMAGE_PREFIX: ${{ env.REGISTRY }}/${{ env.REPOSITORY }} - TAG: ${{ github.events.inputs.image_tag || latest }} + TAG: ${{ github.events.inputs.image_tag || 'latest' }} TEST_SETUP: avalanchego - name: Build and push images for xsvm test setup run: bash -x ./scripts/build_antithesis_images.sh env: IMAGE_PREFIX: ${{ env.REGISTRY }}/${{ env.REPOSITORY }} - TAG: ${{ github.events.inputs.image_tag || latest }} + TAG: ${{ github.events.inputs.image_tag || 'latest' }} TEST_SETUP: xsvm From 2b14a72e7a8bc58e95906d767c2357a4032c29ba Mon Sep 17 00:00:00 2001 From: marun Date: Thu, 13 Jun 2024 20:28:48 +0200 Subject: [PATCH 061/102] [e2e] Fix excessively verbose output from virtuous test (#3116) --- tests/e2e/x/transfer/virtuous.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/e2e/x/transfer/virtuous.go b/tests/e2e/x/transfer/virtuous.go index 10a2359e7f9e..b30bbd30316c 100644 --- a/tests/e2e/x/transfer/virtuous.go +++ b/tests/e2e/x/transfer/virtuous.go @@ -114,7 +114,9 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { ) require.NoError(err) for _, uri := range rpcEps { - tests.Outf("{{green}}metrics at %q:{{/}} %v\n", uri, metricsBeforeTx[uri]) + for _, metric := range []string{blksProcessingMetric, blksAcceptedMetric} { + tests.Outf("{{green}}%s at %q:{{/}} %v\n", metric, uri, metricsBeforeTx[uri][metric]) + } } testBalances := make([]uint64, 0) From fa37f5ab530e04948fcd3d2b2ceed4caae2a9f53 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 13 Jun 2024 15:09:51 -0400 Subject: [PATCH 062/102] Remove .Status() from .IsPreferred() (#3111) --- snow/consensus/snowman/consensus.go | 6 ++--- snow/consensus/snowman/consensus_test.go | 34 ++++++++++++------------ snow/consensus/snowman/topological.go | 8 ++---- snow/engine/snowman/transitive.go | 4 +-- 4 files changed, 24 insertions(+), 28 deletions(-) diff --git a/snow/consensus/snowman/consensus.go b/snow/consensus/snowman/consensus.go index 1eaff4b0e2f4..9d5aa00c4e82 100644 --- a/snow/consensus/snowman/consensus.go +++ b/snow/consensus/snowman/consensus.go @@ -45,9 +45,9 @@ type Consensus interface { // Processing returns true if the block ID is currently processing. Processing(ids.ID) bool - // IsPreferred returns true if the block is currently on the preferred - // chain. - IsPreferred(Block) bool + // IsPreferred returns true if the block ID is preferred. Only the last + // accepted block and processing blocks are considered preferred. + IsPreferred(ids.ID) bool // Returns the ID and height of the last accepted decision. LastAccepted() (ids.ID, uint64) diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go index bb51790b76f7..4d5d9e1e6b68 100644 --- a/snow/consensus/snowman/consensus_test.go +++ b/snow/consensus/snowman/consensus_test.go @@ -177,7 +177,7 @@ func AddToTailTest(t *testing.T, factory Factory) { // Adding to the previous preference will update the preference require.NoError(sm.Add(block)) require.Equal(block.ID(), sm.Preference()) - require.True(sm.IsPreferred(block)) + require.True(sm.IsPreferred(block.ID())) pref, ok := sm.PreferenceAtHeight(block.Height()) require.True(ok) @@ -292,7 +292,7 @@ func StatusOrProcessingPreviouslyAcceptedTest(t *testing.T, factory Factory) { require.Equal(choices.Accepted, snowmantest.Genesis.Status()) require.False(sm.Processing(snowmantest.Genesis.ID())) require.True(sm.Decided(snowmantest.Genesis)) - require.True(sm.IsPreferred(snowmantest.Genesis)) + require.True(sm.IsPreferred(snowmantest.Genesis.ID())) pref, ok := sm.PreferenceAtHeight(snowmantest.Genesis.Height()) require.True(ok) @@ -330,7 +330,7 @@ func StatusOrProcessingPreviouslyRejectedTest(t *testing.T, factory Factory) { require.Equal(choices.Rejected, block.Status()) require.False(sm.Processing(block.ID())) require.True(sm.Decided(block)) - require.False(sm.IsPreferred(block)) + require.False(sm.IsPreferred(block.ID())) _, ok := sm.PreferenceAtHeight(block.Height()) require.False(ok) @@ -366,7 +366,7 @@ func StatusOrProcessingUnissuedTest(t *testing.T, factory Factory) { require.Equal(choices.Processing, block.Status()) require.False(sm.Processing(block.ID())) require.False(sm.Decided(block)) - require.False(sm.IsPreferred(block)) + require.False(sm.IsPreferred(block.ID())) _, ok := sm.PreferenceAtHeight(block.Height()) require.False(ok) @@ -403,7 +403,7 @@ func StatusOrProcessingIssuedTest(t *testing.T, factory Factory) { require.Equal(choices.Processing, block.Status()) require.True(sm.Processing(block.ID())) require.False(sm.Decided(block)) - require.True(sm.IsPreferred(block)) + require.True(sm.IsPreferred(block.ID())) pref, ok := sm.PreferenceAtHeight(block.Height()) require.True(ok) @@ -968,10 +968,10 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { require.Equal(a2Block.ID(), sm.Preference()) - require.True(sm.IsPreferred(a1Block)) - require.True(sm.IsPreferred(a2Block)) - require.False(sm.IsPreferred(b1Block)) - require.False(sm.IsPreferred(b2Block)) + require.True(sm.IsPreferred(a1Block.ID())) + require.True(sm.IsPreferred(a2Block.ID())) + require.False(sm.IsPreferred(b1Block.ID())) + require.False(sm.IsPreferred(b2Block.ID())) pref, ok := sm.PreferenceAtHeight(a1Block.Height()) require.True(ok) @@ -985,10 +985,10 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { require.NoError(sm.RecordPoll(context.Background(), b2Votes)) require.Equal(b2Block.ID(), sm.Preference()) - require.False(sm.IsPreferred(a1Block)) - require.False(sm.IsPreferred(a2Block)) - require.True(sm.IsPreferred(b1Block)) - require.True(sm.IsPreferred(b2Block)) + require.False(sm.IsPreferred(a1Block.ID())) + require.False(sm.IsPreferred(a2Block.ID())) + require.True(sm.IsPreferred(b1Block.ID())) + require.True(sm.IsPreferred(b2Block.ID())) pref, ok = sm.PreferenceAtHeight(b1Block.Height()) require.True(ok) @@ -1003,10 +1003,10 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { require.NoError(sm.RecordPoll(context.Background(), a1Votes)) require.Equal(a2Block.ID(), sm.Preference()) - require.True(sm.IsPreferred(a1Block)) - require.True(sm.IsPreferred(a2Block)) - require.False(sm.IsPreferred(b1Block)) - require.False(sm.IsPreferred(b2Block)) + require.True(sm.IsPreferred(a1Block.ID())) + require.True(sm.IsPreferred(a2Block.ID())) + require.False(sm.IsPreferred(b1Block.ID())) + require.False(sm.IsPreferred(b2Block.ID())) pref, ok = sm.PreferenceAtHeight(a1Block.Height()) require.True(ok) diff --git a/snow/consensus/snowman/topological.go b/snow/consensus/snowman/topological.go index 6956d707a4e9..96f717f32ba4 100644 --- a/snow/consensus/snowman/topological.go +++ b/snow/consensus/snowman/topological.go @@ -204,12 +204,8 @@ func (ts *Topological) Processing(blkID ids.ID) bool { return ok } -func (ts *Topological) IsPreferred(blk Block) bool { - // If the block is accepted, then it must be transitively preferred. - if blk.Status() == choices.Accepted { - return true - } - return ts.preferredIDs.Contains(blk.ID()) +func (ts *Topological) IsPreferred(blkID ids.ID) bool { + return blkID == ts.lastAcceptedID || ts.preferredIDs.Contains(blkID) } func (ts *Topological) LastAccepted() (ids.ID, uint64) { diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index 6bc93d1e8611..3a2870ebd85b 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -1018,14 +1018,14 @@ func (t *Transitive) deliver( // If the block is now preferred, query the network for its preferences // with this new block. - if t.Consensus.IsPreferred(blk) { + if t.Consensus.IsPreferred(blkID) { t.sendQuery(ctx, blkID, blk.Bytes(), push) } t.blocked.Fulfill(ctx, blkID) for _, blk := range added { blkID := blk.ID() - if t.Consensus.IsPreferred(blk) { + if t.Consensus.IsPreferred(blkID) { t.sendQuery(ctx, blkID, blk.Bytes(), push) } From 347a3f89b2d590c936c85b65b5e30ba92a2ec4f9 Mon Sep 17 00:00:00 2001 From: aaronbuchwald Date: Thu, 13 Jun 2024 15:31:16 -0400 Subject: [PATCH 063/102] Add early termination metrics case by case (#3093) --- .../snowman/poll/early_term_no_traversal.go | 133 +++++++++++++++++- .../poll/early_term_no_traversal_test.go | 35 +++-- snow/consensus/snowman/poll/set_test.go | 18 +-- snow/engine/snowman/transitive.go | 6 +- 4 files changed, 168 insertions(+), 24 deletions(-) diff --git a/snow/consensus/snowman/poll/early_term_no_traversal.go b/snow/consensus/snowman/poll/early_term_no_traversal.go index 460805ab7820..df09157b04df 100644 --- a/snow/consensus/snowman/poll/early_term_no_traversal.go +++ b/snow/consensus/snowman/poll/early_term_no_traversal.go @@ -4,24 +4,124 @@ package poll import ( + "errors" "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/bag" ) +var ( + errPollDurationVectorMetrics = errors.New("failed to register poll_duration vector metrics") + errPollCountVectorMetrics = errors.New("failed to register poll_count vector metrics") + + terminationReason = "reason" + exhaustedReason = "exhausted" + earlyFailReason = "early_fail" + earlyAlphaPrefReason = "early_alpha_pref" + earlyAlphaConfReason = "early_alpha_conf" + + exhaustedLabel = prometheus.Labels{ + terminationReason: exhaustedReason, + } + earlyFailLabel = prometheus.Labels{ + terminationReason: earlyFailReason, + } + earlyAlphaPrefLabel = prometheus.Labels{ + terminationReason: earlyAlphaPrefReason, + } + earlyAlphaConfLabel = prometheus.Labels{ + terminationReason: earlyAlphaConfReason, + } +) + +type earlyTermNoTraversalMetrics struct { + durExhaustedPolls prometheus.Gauge + durEarlyFailPolls prometheus.Gauge + durEarlyAlphaPrefPolls prometheus.Gauge + durEarlyAlphaConfPolls prometheus.Gauge + + countExhaustedPolls prometheus.Counter + countEarlyFailPolls prometheus.Counter + countEarlyAlphaPrefPolls prometheus.Counter + countEarlyAlphaConfPolls prometheus.Counter +} + +func newEarlyTermNoTraversalMetrics(reg prometheus.Registerer) (*earlyTermNoTraversalMetrics, error) { + pollCountVec := prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "poll_count", + Help: "Total # of terminated polls by reason", + }, []string{terminationReason}) + if err := reg.Register(pollCountVec); err != nil { + return nil, fmt.Errorf("%w: %w", errPollCountVectorMetrics, err) + } + durPollsVec := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "poll_duration", + Help: "time (in ns) polls took to complete by reason", + }, []string{terminationReason}) + if err := reg.Register(durPollsVec); err != nil { + return nil, fmt.Errorf("%w: %w", errPollDurationVectorMetrics, err) + } + + return &earlyTermNoTraversalMetrics{ + durExhaustedPolls: durPollsVec.With(exhaustedLabel), + durEarlyFailPolls: durPollsVec.With(earlyFailLabel), + durEarlyAlphaPrefPolls: durPollsVec.With(earlyAlphaPrefLabel), + durEarlyAlphaConfPolls: durPollsVec.With(earlyAlphaConfLabel), + countExhaustedPolls: pollCountVec.With(exhaustedLabel), + countEarlyFailPolls: pollCountVec.With(earlyFailLabel), + countEarlyAlphaPrefPolls: pollCountVec.With(earlyAlphaPrefLabel), + countEarlyAlphaConfPolls: pollCountVec.With(earlyAlphaConfLabel), + }, nil +} + +func (m *earlyTermNoTraversalMetrics) observeExhausted(duration time.Duration) { + m.durExhaustedPolls.Add(float64(duration.Nanoseconds())) + m.countExhaustedPolls.Inc() +} + +func (m *earlyTermNoTraversalMetrics) observeEarlyFail(duration time.Duration) { + m.durEarlyFailPolls.Add(float64(duration.Nanoseconds())) + m.countEarlyFailPolls.Inc() +} + +func (m *earlyTermNoTraversalMetrics) observeEarlyAlphaPref(duration time.Duration) { + m.durEarlyAlphaPrefPolls.Add(float64(duration.Nanoseconds())) + m.countEarlyAlphaPrefPolls.Inc() +} + +func (m *earlyTermNoTraversalMetrics) observeEarlyAlphaConf(duration time.Duration) { + m.durEarlyAlphaConfPolls.Add(float64(duration.Nanoseconds())) + m.countEarlyAlphaConfPolls.Inc() +} + type earlyTermNoTraversalFactory struct { alphaPreference int alphaConfidence int + + metrics *earlyTermNoTraversalMetrics } // NewEarlyTermNoTraversalFactory returns a factory that returns polls with // early termination, without doing DAG traversals -func NewEarlyTermNoTraversalFactory(alphaPreference int, alphaConfidence int) Factory { +func NewEarlyTermNoTraversalFactory( + alphaPreference int, + alphaConfidence int, + reg prometheus.Registerer, +) (Factory, error) { + metrics, err := newEarlyTermNoTraversalMetrics(reg) + if err != nil { + return nil, err + } + return &earlyTermNoTraversalFactory{ alphaPreference: alphaPreference, alphaConfidence: alphaConfidence, - } + metrics: metrics, + }, nil } func (f *earlyTermNoTraversalFactory) New(vdrs bag.Bag[ids.NodeID]) Poll { @@ -29,6 +129,8 @@ func (f *earlyTermNoTraversalFactory) New(vdrs bag.Bag[ids.NodeID]) Poll { polled: vdrs, alphaPreference: f.alphaPreference, alphaConfidence: f.alphaConfidence, + metrics: f.metrics, + start: time.Now(), } } @@ -40,6 +142,10 @@ type earlyTermNoTraversalPoll struct { polled bag.Bag[ids.NodeID] alphaPreference int alphaConfidence int + + metrics *earlyTermNoTraversalMetrics + start time.Time + finished bool } // Vote registers a response for this poll @@ -67,20 +173,39 @@ func (p *earlyTermNoTraversalPoll) Drop(vdr ids.NodeID) { // transitive voting. // 4. A single element has achieved an alphaConfidence majority. func (p *earlyTermNoTraversalPoll) Finished() bool { + if p.finished { + return true + } + remaining := p.polled.Len() if remaining == 0 { + p.finished = true + p.metrics.observeExhausted(time.Since(p.start)) return true // Case 1 } received := p.votes.Len() maxPossibleVotes := received + remaining if maxPossibleVotes < p.alphaPreference { + p.finished = true + p.metrics.observeEarlyFail(time.Since(p.start)) return true // Case 2 } _, freq := p.votes.Mode() - return freq >= p.alphaPreference && maxPossibleVotes < p.alphaConfidence || // Case 3 - freq >= p.alphaConfidence // Case 4 + if freq >= p.alphaPreference && maxPossibleVotes < p.alphaConfidence { + p.finished = true + p.metrics.observeEarlyAlphaPref(time.Since(p.start)) + return true // Case 3 + } + + if freq >= p.alphaConfidence { + p.finished = true + p.metrics.observeEarlyAlphaConf(time.Since(p.start)) + return true // Case 4 + } + + return false } // Result returns the result of this poll diff --git a/snow/consensus/snowman/poll/early_term_no_traversal_test.go b/snow/consensus/snowman/poll/early_term_no_traversal_test.go index 9d215c246eec..232169c01d41 100644 --- a/snow/consensus/snowman/poll/early_term_no_traversal_test.go +++ b/snow/consensus/snowman/poll/early_term_no_traversal_test.go @@ -6,18 +6,25 @@ package poll import ( "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/utils/bag" ) +func newEarlyTermNoTraversalTestFactory(require *require.Assertions, alpha int) Factory { + factory, err := NewEarlyTermNoTraversalFactory(alpha, alpha, prometheus.NewRegistry()) + require.NoError(err) + return factory +} + func TestEarlyTermNoTraversalResults(t *testing.T) { require := require.New(t) vdrs := bag.Of(vdr1) // k = 1 alpha := 1 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Vote(vdr1, blkID1) @@ -31,10 +38,12 @@ func TestEarlyTermNoTraversalResults(t *testing.T) { } func TestEarlyTermNoTraversalString(t *testing.T) { + require := require.New(t) + vdrs := bag.Of(vdr1, vdr2) // k = 2 alpha := 2 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Vote(vdr1, blkID1) @@ -43,7 +52,7 @@ func TestEarlyTermNoTraversalString(t *testing.T) { NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 received Bag[ids.ID]: (Size = 1) SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 1` - require.Equal(t, expected, poll.String()) + require.Equal(expected, poll.String()) } func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) { @@ -52,7 +61,7 @@ func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) { vdrs := bag.Of(vdr1, vdr2) // k = 2 alpha := 2 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Vote(vdr1, blkID1) @@ -72,7 +81,7 @@ func TestEarlyTermNoTraversalTerminatesEarlyWithoutAlphaPreference(t *testing.T) vdrs := bag.Of(vdr1, vdr2, vdr3) // k = 3 alpha := 2 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Drop(vdr1) @@ -90,7 +99,8 @@ func TestEarlyTermNoTraversalTerminatesEarlyWithAlphaPreference(t *testing.T) { alphaPreference := 3 alphaConfidence := 5 - factory := NewEarlyTermNoTraversalFactory(alphaPreference, alphaConfidence) + factory, err := NewEarlyTermNoTraversalFactory(alphaPreference, alphaConfidence, prometheus.NewRegistry()) + require.NoError(err) poll := factory.New(vdrs) poll.Vote(vdr1, blkID1) @@ -114,7 +124,8 @@ func TestEarlyTermNoTraversalTerminatesEarlyWithAlphaConfidence(t *testing.T) { alphaPreference := 3 alphaConfidence := 3 - factory := NewEarlyTermNoTraversalFactory(alphaPreference, alphaConfidence) + factory, err := NewEarlyTermNoTraversalFactory(alphaPreference, alphaConfidence, prometheus.NewRegistry()) + require.NoError(err) poll := factory.New(vdrs) poll.Vote(vdr1, blkID1) @@ -138,7 +149,7 @@ func TestEarlyTermNoTraversalForSharedAncestor(t *testing.T) { vdrs := bag.Of(vdr1, vdr2, vdr3, vdr4) // k = 4 alpha := 4 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Vote(vdr1, blkID2) @@ -160,7 +171,7 @@ func TestEarlyTermNoTraversalWithWeightedResponses(t *testing.T) { vdrs := bag.Of(vdr1, vdr2, vdr2) // k = 3 alpha := 2 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Vote(vdr2, blkID1) @@ -174,12 +185,14 @@ func TestEarlyTermNoTraversalWithWeightedResponses(t *testing.T) { } func TestEarlyTermNoTraversalDropWithWeightedResponses(t *testing.T) { + require := require.New(t) + vdrs := bag.Of(vdr1, vdr2, vdr2) // k = 3 alpha := 2 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Drop(vdr2) - require.True(t, poll.Finished()) + require.True(poll.Finished()) } diff --git a/snow/consensus/snowman/poll/set_test.go b/snow/consensus/snowman/poll/set_test.go index 97166e0e9379..d01ab3bb8262 100644 --- a/snow/consensus/snowman/poll/set_test.go +++ b/snow/consensus/snowman/poll/set_test.go @@ -30,7 +30,8 @@ var ( func TestNewSetErrorOnPollsMetrics(t *testing.T) { require := require.New(t) - factory := NewEarlyTermNoTraversalFactory(1, 1) + alpha := 1 + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} registerer := prometheus.NewRegistry() @@ -45,7 +46,8 @@ func TestNewSetErrorOnPollsMetrics(t *testing.T) { func TestNewSetErrorOnPollDurationMetrics(t *testing.T) { require := require.New(t) - factory := NewEarlyTermNoTraversalFactory(1, 1) + alpha := 1 + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} registerer := prometheus.NewRegistry() @@ -63,7 +65,7 @@ func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // k = 3 alpha := 3 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} registerer := prometheus.NewRegistry() s, err := NewSet(factory, log, registerer) @@ -99,7 +101,7 @@ func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // k = 3 alpha := 3 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} registerer := prometheus.NewRegistry() s, err := NewSet(factory, log, registerer) @@ -135,7 +137,7 @@ func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // k = 3 alpha := 3 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} registerer := prometheus.NewRegistry() s, err := NewSet(factory, log, registerer) @@ -179,7 +181,7 @@ func TestCreateAndFinishSuccessfulPoll(t *testing.T) { vdrs := bag.Of(vdr1, vdr2) // k = 2 alpha := 2 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} registerer := prometheus.NewRegistry() s, err := NewSet(factory, log, registerer) @@ -211,7 +213,7 @@ func TestCreateAndFinishFailedPoll(t *testing.T) { vdrs := bag.Of(vdr1, vdr2) // k = 2 alpha := 1 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} registerer := prometheus.NewRegistry() s, err := NewSet(factory, log, registerer) @@ -240,7 +242,7 @@ func TestSetString(t *testing.T) { vdrs := bag.Of(vdr1) // k = 1 alpha := 1 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} registerer := prometheus.NewRegistry() s, err := NewSet(factory, log, registerer) diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index 3a2870ebd85b..5164957f9532 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -111,10 +111,14 @@ func New(config Config) (*Transitive, error) { acceptedFrontiers := tracker.NewAccepted() config.Validators.RegisterSetCallbackListener(config.Ctx.SubnetID, acceptedFrontiers) - factory := poll.NewEarlyTermNoTraversalFactory( + factory, err := poll.NewEarlyTermNoTraversalFactory( config.Params.AlphaPreference, config.Params.AlphaConfidence, + config.Ctx.Registerer, ) + if err != nil { + return nil, err + } polls, err := poll.NewSet( factory, config.Ctx.Log, From f99a64a4012ed07455e69e25d987d891e74505a6 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 14 Jun 2024 12:10:23 -0400 Subject: [PATCH 064/102] Update C-chain wallet context (#3118) --- tests/e2e/c/interchain_workflow.go | 6 +- tests/e2e/p/interchain_workflow.go | 4 +- tests/e2e/x/interchain_workflow.go | 4 +- wallet/chain/c/backend.go | 13 ++-- wallet/chain/c/builder.go | 37 +++++---- wallet/chain/c/context.go | 77 ++++++------------- wallet/chain/c/signer.go | 13 ++-- wallet/chain/c/wallet.go | 2 - wallet/subnet/primary/api.go | 4 +- .../primary/examples/c-chain-export/main.go | 2 +- .../primary/examples/c-chain-import/main.go | 5 +- wallet/subnet/primary/wallet.go | 6 +- 12 files changed, 75 insertions(+), 98 deletions(-) diff --git a/tests/e2e/c/interchain_workflow.go b/tests/e2e/c/interchain_workflow.go index f5afc4abe059..bfb342818a5f 100644 --- a/tests/e2e/c/interchain_workflow.go +++ b/tests/e2e/c/interchain_workflow.go @@ -86,6 +86,8 @@ var _ = e2e.DescribeCChain("[Interchain Workflow]", func() { ginkgo.By("defining common configuration") xBuilder := xWallet.Builder() xContext := xBuilder.Context() + cBuilder := cWallet.Builder() + cContext := cBuilder.Context() avaxAssetID := xContext.AVAXAssetID // Use the same owner for import funds to X-Chain and P-Chain recipientOwner := secp256k1fx.OutputOwners{ @@ -119,7 +121,7 @@ var _ = e2e.DescribeCChain("[Interchain Workflow]", func() { ginkgo.By("importing AVAX from the C-Chain to the X-Chain", func() { _, err := xWallet.IssueImportTx( - cWallet.BlockchainID(), + cContext.BlockchainID, &recipientOwner, e2e.WithDefaultContext(), ) @@ -146,7 +148,7 @@ var _ = e2e.DescribeCChain("[Interchain Workflow]", func() { ginkgo.By("importing AVAX from the C-Chain to the P-Chain", func() { _, err = pWallet.IssueImportTx( - cWallet.BlockchainID(), + cContext.BlockchainID, &recipientOwner, e2e.WithDefaultContext(), ) diff --git a/tests/e2e/p/interchain_workflow.go b/tests/e2e/p/interchain_workflow.go index d27d080d9c00..548c82ac1211 100644 --- a/tests/e2e/p/interchain_workflow.go +++ b/tests/e2e/p/interchain_workflow.go @@ -60,6 +60,8 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL xContext := xBuilder.Context() pBuilder := pWallet.Builder() pContext := pBuilder.Context() + cBuilder := cWallet.Builder() + cContext := cBuilder.Context() ginkgo.By("defining common configuration") recipientEthAddress := evm.GetEthAddress(recipientKey) @@ -186,7 +188,7 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL ginkgo.By("exporting AVAX from the P-Chain to the C-Chain", func() { _, err := pWallet.IssueExportTx( - cWallet.BlockchainID(), + cContext.BlockchainID, exportOutputs, e2e.WithDefaultContext(), ) diff --git a/tests/e2e/x/interchain_workflow.go b/tests/e2e/x/interchain_workflow.go index 838c6d11773d..ecc52f41f032 100644 --- a/tests/e2e/x/interchain_workflow.go +++ b/tests/e2e/x/interchain_workflow.go @@ -44,6 +44,8 @@ var _ = e2e.DescribeXChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL recipientEthAddress := evm.GetEthAddress(recipientKey) xBuilder := xWallet.Builder() xContext := xBuilder.Context() + cBuilder := cWallet.Builder() + cContext := cBuilder.Context() avaxAssetID := xContext.AVAXAssetID // Use the same owner for sending to X-Chain and importing funds to P-Chain recipientOwner := secp256k1fx.OutputOwners{ @@ -96,7 +98,7 @@ var _ = e2e.DescribeXChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL ginkgo.By("exporting AVAX from the X-Chain to the C-Chain", func() { _, err := xWallet.IssueExportTx( - cWallet.BlockchainID(), + cContext.BlockchainID, exportOutputs, e2e.WithDefaultContext(), ) diff --git a/wallet/chain/c/backend.go b/wallet/chain/c/backend.go index 3301015f0eeb..8d1ea6f34f07 100644 --- a/wallet/chain/c/backend.go +++ b/wallet/chain/c/backend.go @@ -4,6 +4,7 @@ package c import ( + "context" "errors" "fmt" "math/big" @@ -16,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" - stdcontext "context" ethcommon "github.com/ethereum/go-ethereum/common" ) @@ -32,11 +32,10 @@ type Backend interface { BuilderBackend SignerBackend - AcceptAtomicTx(ctx stdcontext.Context, tx *evm.Tx) error + AcceptAtomicTx(ctx context.Context, tx *evm.Tx) error } type backend struct { - Context common.ChainUTXOs accountsLock sync.RWMutex @@ -49,18 +48,16 @@ type Account struct { } func NewBackend( - ctx Context, utxos common.ChainUTXOs, accounts map[ethcommon.Address]*Account, ) Backend { return &backend{ - Context: ctx, ChainUTXOs: utxos, accounts: accounts, } } -func (b *backend) AcceptAtomicTx(ctx stdcontext.Context, tx *evm.Tx) error { +func (b *backend) AcceptAtomicTx(ctx context.Context, tx *evm.Tx) error { switch tx := tx.UnsignedAtomicTx.(type) { case *evm.UnsignedImportTx: for _, input := range tx.ImportedInputs { @@ -131,7 +128,7 @@ func (b *backend) AcceptAtomicTx(ctx stdcontext.Context, tx *evm.Tx) error { return nil } -func (b *backend) Balance(_ stdcontext.Context, addr ethcommon.Address) (*big.Int, error) { +func (b *backend) Balance(_ context.Context, addr ethcommon.Address) (*big.Int, error) { b.accountsLock.RLock() defer b.accountsLock.RUnlock() @@ -142,7 +139,7 @@ func (b *backend) Balance(_ stdcontext.Context, addr ethcommon.Address) (*big.In return account.Balance, nil } -func (b *backend) Nonce(_ stdcontext.Context, addr ethcommon.Address) (uint64, error) { +func (b *backend) Nonce(_ context.Context, addr ethcommon.Address) (uint64, error) { b.accountsLock.RLock() defer b.accountsLock.RUnlock() diff --git a/wallet/chain/c/builder.go b/wallet/chain/c/builder.go index 28e1eccc2d54..0554cb39ba95 100644 --- a/wallet/chain/c/builder.go +++ b/wallet/chain/c/builder.go @@ -4,6 +4,7 @@ package c import ( + "context" "errors" "math/big" @@ -17,7 +18,6 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" - stdcontext "context" ethcommon "github.com/ethereum/go-ethereum/common" ) @@ -41,6 +41,10 @@ var ( // Builder provides a convenient interface for building unsigned C-chain // transactions. type Builder interface { + // Context returns the configuration of the chain that this builder uses to + // create transactions. + Context() *Context + // GetBalance calculates the amount of AVAX that this builder has control // over. GetBalance( @@ -86,16 +90,15 @@ type Builder interface { // BuilderBackend specifies the required information needed to build unsigned // C-chain transactions. type BuilderBackend interface { - Context - - UTXOs(ctx stdcontext.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) - Balance(ctx stdcontext.Context, addr ethcommon.Address) (*big.Int, error) - Nonce(ctx stdcontext.Context, addr ethcommon.Address) (uint64, error) + UTXOs(ctx context.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) + Balance(ctx context.Context, addr ethcommon.Address) (*big.Int, error) + Nonce(ctx context.Context, addr ethcommon.Address) (uint64, error) } type builder struct { avaxAddrs set.Set[ids.ShortID] ethAddrs set.Set[ethcommon.Address] + context *Context backend BuilderBackend } @@ -110,15 +113,21 @@ type builder struct { func NewBuilder( avaxAddrs set.Set[ids.ShortID], ethAddrs set.Set[ethcommon.Address], + context *Context, backend BuilderBackend, ) Builder { return &builder{ avaxAddrs: avaxAddrs, ethAddrs: ethAddrs, + context: context, backend: backend, } } +func (b *builder) Context() *Context { + return b.context +} + func (b *builder) GetBalance( options ...common.Option, ) (*big.Int, error) { @@ -152,7 +161,7 @@ func (b *builder) GetImportableBalance( var ( addrs = ops.Addresses(b.avaxAddrs) minIssuanceTime = ops.MinIssuanceTime() - avaxAssetID = b.backend.AVAXAssetID() + avaxAssetID = b.context.AVAXAssetID balance uint64 ) for _, utxo := range utxos { @@ -186,7 +195,7 @@ func (b *builder) NewImportTx( var ( addrs = ops.Addresses(b.avaxAddrs) minIssuanceTime = ops.MinIssuanceTime() - avaxAssetID = b.backend.AVAXAssetID() + avaxAssetID = b.context.AVAXAssetID importedInputs = make([]*avax.TransferableInput, 0, len(utxos)) importedAmount uint64 @@ -218,8 +227,8 @@ func (b *builder) NewImportTx( utils.Sort(importedInputs) tx := &evm.UnsignedImportTx{ - NetworkID: b.backend.NetworkID(), - BlockchainID: b.backend.BlockchainID(), + NetworkID: b.context.NetworkID, + BlockchainID: b.context.BlockchainID, SourceChain: chainID, ImportedInputs: importedInputs, } @@ -260,7 +269,7 @@ func (b *builder) NewExportTx( options ...common.Option, ) (*evm.UnsignedExportTx, error) { var ( - avaxAssetID = b.backend.AVAXAssetID() + avaxAssetID = b.context.AVAXAssetID exportedOutputs = make([]*avax.TransferableOutput, len(outputs)) exportedAmount uint64 ) @@ -280,8 +289,8 @@ func (b *builder) NewExportTx( avax.SortTransferableOutputs(exportedOutputs, evm.Codec) tx := &evm.UnsignedExportTx{ - NetworkID: b.backend.NetworkID(), - BlockchainID: b.backend.BlockchainID(), + NetworkID: b.context.NetworkID, + BlockchainID: b.context.BlockchainID, DestinationChain: chainID, ExportedOutputs: exportedOutputs, } @@ -378,7 +387,7 @@ func (b *builder) NewExportTx( utils.Sort(inputs) tx.Ins = inputs - snowCtx, err := newSnowContext(b.backend) + snowCtx, err := newSnowContext(b.context) if err != nil { return nil, err } diff --git a/wallet/chain/c/context.go b/wallet/chain/c/context.go index dc0537e23069..d56a75a0070b 100644 --- a/wallet/chain/c/context.go +++ b/wallet/chain/c/context.go @@ -4,99 +4,66 @@ package c import ( + "context" + "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/avm" - - stdcontext "context" ) const Alias = "C" -var _ Context = (*context)(nil) - -type Context interface { - NetworkID() uint32 - BlockchainID() ids.ID - AVAXAssetID() ids.ID +type Context struct { + NetworkID uint32 + BlockchainID ids.ID + AVAXAssetID ids.ID } -type context struct { - networkID uint32 - blockchainID ids.ID - avaxAssetID ids.ID -} - -func NewContextFromURI(ctx stdcontext.Context, uri string) (Context, error) { +func NewContextFromURI(ctx context.Context, uri string) (*Context, error) { infoClient := info.NewClient(uri) xChainClient := avm.NewClient(uri, "X") return NewContextFromClients(ctx, infoClient, xChainClient) } func NewContextFromClients( - ctx stdcontext.Context, + ctx context.Context, infoClient info.Client, xChainClient avm.Client, -) (Context, error) { +) (*Context, error) { networkID, err := infoClient.GetNetworkID(ctx) if err != nil { return nil, err } - chainID, err := infoClient.GetBlockchainID(ctx, Alias) + blockchainID, err := infoClient.GetBlockchainID(ctx, Alias) if err != nil { return nil, err } - asset, err := xChainClient.GetAssetDescription(ctx, "AVAX") + avaxAsset, err := xChainClient.GetAssetDescription(ctx, "AVAX") if err != nil { return nil, err } - return NewContext( - networkID, - chainID, - asset.AssetID, - ), nil -} - -func NewContext( - networkID uint32, - blockchainID ids.ID, - avaxAssetID ids.ID, -) Context { - return &context{ - networkID: networkID, - blockchainID: blockchainID, - avaxAssetID: avaxAssetID, - } -} - -func (c *context) NetworkID() uint32 { - return c.networkID -} - -func (c *context) BlockchainID() ids.ID { - return c.blockchainID -} - -func (c *context) AVAXAssetID() ids.ID { - return c.avaxAssetID + return &Context{ + NetworkID: networkID, + BlockchainID: blockchainID, + AVAXAssetID: avaxAsset.AssetID, + }, nil } -func newSnowContext(c Context) (*snow.Context, error) { - chainID := c.BlockchainID() +func newSnowContext(c *Context) (*snow.Context, error) { lookup := ids.NewAliaser() return &snow.Context{ - NetworkID: c.NetworkID(), + NetworkID: c.NetworkID, SubnetID: constants.PrimaryNetworkID, - ChainID: chainID, - CChainID: chainID, - AVAXAssetID: c.AVAXAssetID(), + ChainID: c.BlockchainID, + CChainID: c.BlockchainID, + AVAXAssetID: c.AVAXAssetID, Log: logging.NoLog{}, BCLookup: lookup, - }, lookup.Alias(chainID, Alias) + }, lookup.Alias(c.BlockchainID, Alias) } diff --git a/wallet/chain/c/signer.go b/wallet/chain/c/signer.go index 24de72c13941..1e69db75be19 100644 --- a/wallet/chain/c/signer.go +++ b/wallet/chain/c/signer.go @@ -4,6 +4,7 @@ package c import ( + "context" "errors" "fmt" @@ -19,8 +20,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - - stdcontext "context" ) const version = 0 @@ -45,7 +44,7 @@ type Signer interface { // // If the signer doesn't have the ability to provide a required signature, // the signature slot will be skipped without reporting an error. - SignAtomic(ctx stdcontext.Context, tx *evm.Tx) error + SignAtomic(ctx context.Context, tx *evm.Tx) error } type EthKeychain interface { @@ -57,7 +56,7 @@ type EthKeychain interface { } type SignerBackend interface { - GetUTXO(ctx stdcontext.Context, chainID, utxoID ids.ID) (*avax.UTXO, error) + GetUTXO(ctx context.Context, chainID, utxoID ids.ID) (*avax.UTXO, error) } type txSigner struct { @@ -74,7 +73,7 @@ func NewSigner(avaxKC keychain.Keychain, ethKC EthKeychain, backend SignerBacken } } -func (s *txSigner) SignAtomic(ctx stdcontext.Context, tx *evm.Tx) error { +func (s *txSigner) SignAtomic(ctx context.Context, tx *evm.Tx) error { switch utx := tx.UnsignedAtomicTx.(type) { case *evm.UnsignedImportTx: signers, err := s.getImportSigners(ctx, utx.SourceChain, utx.ImportedInputs) @@ -90,7 +89,7 @@ func (s *txSigner) SignAtomic(ctx stdcontext.Context, tx *evm.Tx) error { } } -func (s *txSigner) getImportSigners(ctx stdcontext.Context, sourceChainID ids.ID, ins []*avax.TransferableInput) ([][]keychain.Signer, error) { +func (s *txSigner) getImportSigners(ctx context.Context, sourceChainID ids.ID, ins []*avax.TransferableInput) ([][]keychain.Signer, error) { txSigners := make([][]keychain.Signer, len(ins)) for credIndex, transferInput := range ins { input, ok := transferInput.In.(*secp256k1fx.TransferInput) @@ -152,7 +151,7 @@ func (s *txSigner) getExportSigners(ins []evm.EVMInput) [][]keychain.Signer { return txSigners } -func SignUnsignedAtomic(ctx stdcontext.Context, signer Signer, utx evm.UnsignedAtomicTx) (*evm.Tx, error) { +func SignUnsignedAtomic(ctx context.Context, signer Signer, utx evm.UnsignedAtomicTx) (*evm.Tx, error) { tx := &evm.Tx{UnsignedAtomicTx: utx} return tx, signer.SignAtomic(ctx, tx) } diff --git a/wallet/chain/c/wallet.go b/wallet/chain/c/wallet.go index 1f8d6d251748..1eeb77e830cb 100644 --- a/wallet/chain/c/wallet.go +++ b/wallet/chain/c/wallet.go @@ -25,8 +25,6 @@ var ( ) type Wallet interface { - Context - // Builder returns the builder that will be used to create the transactions. Builder() Builder diff --git a/wallet/subnet/primary/api.go b/wallet/subnet/primary/api.go index a7c271b385bf..2aedc5c476c9 100644 --- a/wallet/subnet/primary/api.go +++ b/wallet/subnet/primary/api.go @@ -62,7 +62,7 @@ type AVAXState struct { XClient avm.Client XCTX *xbuilder.Context CClient evm.Client - CCTX c.Context + CCTX *c.Context UTXOs walletcommon.UTXOs } @@ -112,7 +112,7 @@ func FetchState( codec: xbuilder.Parser.Codec(), }, { - id: cCTX.BlockchainID(), + id: cCTX.BlockchainID, client: cClient, codec: evm.Codec, }, diff --git a/wallet/subnet/primary/examples/c-chain-export/main.go b/wallet/subnet/primary/examples/c-chain-export/main.go index 41ecb5ca814e..a9a4c61773b8 100644 --- a/wallet/subnet/primary/examples/c-chain-export/main.go +++ b/wallet/subnet/primary/examples/c-chain-export/main.go @@ -42,7 +42,7 @@ func main() { cWallet := wallet.C() // Pull out useful constants to use when issuing transactions. - cChainID := cWallet.BlockchainID() + cChainID := cWallet.Builder().Context().BlockchainID owner := secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ diff --git a/wallet/subnet/primary/examples/c-chain-import/main.go b/wallet/subnet/primary/examples/c-chain-import/main.go index 387d435db4df..2f257fe6fd11 100644 --- a/wallet/subnet/primary/examples/c-chain-import/main.go +++ b/wallet/subnet/primary/examples/c-chain-import/main.go @@ -46,8 +46,9 @@ func main() { cWallet := wallet.C() // Pull out useful constants to use when issuing transactions. - cChainID := cWallet.BlockchainID() - avaxAssetID := cWallet.AVAXAssetID() + cContext := cWallet.Builder().Context() + cChainID := cContext.BlockchainID + avaxAssetID := cContext.AVAXAssetID owner := secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ diff --git a/wallet/subnet/primary/wallet.go b/wallet/subnet/primary/wallet.go index 382a042a6bee..179e9351ccca 100644 --- a/wallet/subnet/primary/wallet.go +++ b/wallet/subnet/primary/wallet.go @@ -132,10 +132,10 @@ func MakeWallet(ctx context.Context, config *WalletConfig) (Wallet, error) { xBuilder := xbuilder.New(avaxAddrs, avaxState.XCTX, xBackend) xSigner := xsigner.New(config.AVAXKeychain, xBackend) - cChainID := avaxState.CCTX.BlockchainID() + cChainID := avaxState.CCTX.BlockchainID cUTXOs := common.NewChainUTXOs(cChainID, avaxState.UTXOs) - cBackend := c.NewBackend(avaxState.CCTX, cUTXOs, ethState.Accounts) - cBuilder := c.NewBuilder(avaxAddrs, ethAddrs, cBackend) + cBackend := c.NewBackend(cUTXOs, ethState.Accounts) + cBuilder := c.NewBuilder(avaxAddrs, ethAddrs, avaxState.CCTX, cBackend) cSigner := c.NewSigner(config.AVAXKeychain, config.EthKeychain, cBackend) return NewWallet( From e99d1ba7da3f8f9cf0f9ddd04d08f2ad801f17a9 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 14 Jun 2024 16:02:54 -0400 Subject: [PATCH 065/102] Standardize wallet tx acceptance polling (#3110) --- tests/antithesis/avalanchego/main.go | 28 +--------- tests/antithesis/xsvm/main.go | 7 ++- tests/e2e/vms/xsvm.go | 6 +- tests/e2e/x/transfer/virtuous.go | 9 +-- vms/avm/client.go | 64 +++++++++++++--------- vms/example/xsvm/api/client.go | 9 ++- vms/example/xsvm/cmd/issue/export/cmd.go | 2 +- vms/example/xsvm/cmd/issue/importtx/cmd.go | 2 +- vms/example/xsvm/cmd/issue/transfer/cmd.go | 2 +- vms/platformvm/client.go | 60 ++++++++++---------- wallet/chain/c/wallet.go | 60 +++++++++++--------- wallet/chain/p/wallet.go | 14 +---- wallet/chain/x/wallet.go | 21 +------ 13 files changed, 127 insertions(+), 157 deletions(-) diff --git a/tests/antithesis/avalanchego/main.go b/tests/antithesis/avalanchego/main.go index 26a29fab749b..3a0d29a0a3ed 100644 --- a/tests/antithesis/avalanchego/main.go +++ b/tests/antithesis/avalanchego/main.go @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/tests/antithesis" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" @@ -27,7 +26,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm" - "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" @@ -581,8 +579,7 @@ func (w *workload) confirmXChainTx(ctx context.Context, tx *xtxs.Tx) { txID := tx.ID() for _, uri := range w.uris { client := avm.NewClient(uri, "X") - status, err := client.ConfirmTx(ctx, txID, 100*time.Millisecond) - if err != nil { + if err := avm.AwaitTxAccepted(client, ctx, txID, 100*time.Millisecond); err != nil { log.Printf("failed to confirm X-chain transaction %s on %s: %s", txID, uri, err) assert.Unreachable("failed to determine the status of an X-chain transaction", map[string]any{ "worker": w.id, @@ -592,16 +589,6 @@ func (w *workload) confirmXChainTx(ctx context.Context, tx *xtxs.Tx) { }) return } - if status != choices.Accepted { - log.Printf("failed to confirm X-chain transaction %s on %s: status == %s", txID, uri, status) - assert.Unreachable("failed to confirm an X-chain transaction", map[string]any{ - "worker": w.id, - "txID": txID, - "uri": uri, - "status": status, - }) - return - } log.Printf("confirmed X-chain transaction %s on %s", txID, uri) } log.Printf("confirmed X-chain transaction %s on all nodes", txID) @@ -611,8 +598,7 @@ func (w *workload) confirmPChainTx(ctx context.Context, tx *ptxs.Tx) { txID := tx.ID() for _, uri := range w.uris { client := platformvm.NewClient(uri) - s, err := client.AwaitTxDecided(ctx, txID, 100*time.Millisecond) - if err != nil { + if err := platformvm.AwaitTxAccepted(client, ctx, txID, 100*time.Millisecond); err != nil { log.Printf("failed to determine the status of a P-chain transaction %s on %s: %s", txID, uri, err) assert.Unreachable("failed to determine the status of a P-chain transaction", map[string]any{ "worker": w.id, @@ -622,16 +608,6 @@ func (w *workload) confirmPChainTx(ctx context.Context, tx *ptxs.Tx) { }) return } - if s.Status != status.Committed { - log.Printf("failed to confirm P-chain transaction %s on %s: status == %s", txID, uri, s.Status) - assert.Unreachable("failed to confirm a P-chain transaction", map[string]any{ - "worker": w.id, - "txID": txID, - "uri": uri, - "status": s.Status, - }) - return - } log.Printf("confirmed P-chain transaction %s on %s", txID, uri) } log.Printf("confirmed P-chain transaction %s on all nodes", txID) diff --git a/tests/antithesis/xsvm/main.go b/tests/antithesis/xsvm/main.go index ecda411fc5ad..2c73a8287820 100644 --- a/tests/antithesis/xsvm/main.go +++ b/tests/antithesis/xsvm/main.go @@ -25,7 +25,10 @@ import ( "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/transfer" ) -const NumKeys = 5 +const ( + NumKeys = 5 + PollingInterval = 50 * time.Millisecond +) func main() { c, err := antithesis.NewConfig(os.Args) @@ -171,7 +174,7 @@ func (w *workload) run(ctx context.Context) { func (w *workload) confirmTransferTx(ctx context.Context, tx *status.TxIssuance) { for _, uri := range w.uris { client := api.NewClient(uri, w.chainID.String()) - if err := api.WaitForAcceptance(ctx, client, w.key.Address(), tx.Nonce); err != nil { + if err := api.AwaitTxAccepted(ctx, client, w.key.Address(), tx.Nonce, PollingInterval); err != nil { log.Printf("worker %d failed to confirm transaction %s on %s: %s", w.id, tx.TxID, uri, err) assert.Unreachable("failed to confirm transaction", map[string]any{ "worker": w.id, diff --git a/tests/e2e/vms/xsvm.go b/tests/e2e/vms/xsvm.go index ebb00f882b16..71eeb7936c43 100644 --- a/tests/e2e/vms/xsvm.go +++ b/tests/e2e/vms/xsvm.go @@ -5,6 +5,7 @@ package vms import ( "fmt" + "time" "github.com/stretchr/testify/require" @@ -22,6 +23,8 @@ import ( ginkgo "github.com/onsi/ginkgo/v2" ) +const pollingInterval = 50 * time.Millisecond + var ( subnetAName = "xsvm-a" subnetBName = "xsvm-b" @@ -85,11 +88,12 @@ var _ = ginkgo.Describe("[XSVM]", func() { ginkgo.By("checking that the export transaction has been accepted on all nodes") for _, node := range network.Nodes[1:] { - require.NoError(api.WaitForAcceptance( + require.NoError(api.AwaitTxAccepted( e2e.DefaultContext(), api.NewClient(node.URI, sourceChain.ChainID.String()), sourceChain.PreFundedKey.Address(), exportTxStatus.Nonce, + pollingInterval, )) } diff --git a/tests/e2e/x/transfer/virtuous.go b/tests/e2e/x/transfer/virtuous.go index b30bbd30316c..58a0351ba123 100644 --- a/tests/e2e/x/transfer/virtuous.go +++ b/tests/e2e/x/transfer/virtuous.go @@ -14,7 +14,6 @@ import ( "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/tests" "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/set" @@ -238,16 +237,12 @@ RECEIVER NEW BALANCE (AFTER) : %21d AVAX txID := tx.ID() for _, u := range rpcEps { xc := avm.NewClient(u, "X") - status, err := xc.ConfirmTx(e2e.DefaultContext(), txID, 2*time.Second) - require.NoError(err) - require.Equal(choices.Accepted, status) + require.NoError(avm.AwaitTxAccepted(xc, e2e.DefaultContext(), txID, 2*time.Second)) } for _, u := range rpcEps { xc := avm.NewClient(u, "X") - status, err := xc.ConfirmTx(e2e.DefaultContext(), txID, 2*time.Second) - require.NoError(err) - require.Equal(choices.Accepted, status) + require.NoError(avm.AwaitTxAccepted(xc, e2e.DefaultContext(), txID, 2*time.Second)) mm, err := tests.GetNodeMetrics(e2e.DefaultContext(), u) require.NoError(err) diff --git a/vms/avm/client.go b/vms/avm/client.go index 63df6543446e..d53ed9388c7a 100644 --- a/vms/avm/client.go +++ b/vms/avm/client.go @@ -5,6 +5,7 @@ package avm import ( "context" + "errors" "fmt" "time" @@ -19,7 +20,11 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) -var _ Client = (*client)(nil) +var ( + _ Client = (*client)(nil) + + ErrRejected = errors.New("rejected") +) // Client for interacting with an AVM (X-Chain) instance type Client interface { @@ -35,12 +40,6 @@ type Client interface { // Deprecated: GetTxStatus only returns Accepted or Unknown, GetTx should be // used instead to determine if the tx was accepted. GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (choices.Status, error) - // ConfirmTx attempts to confirm [txID] by repeatedly checking its status. - // Note: ConfirmTx will block until either the context is done or the client - // returns a decided status. - // TODO: Move this function off of the Client interface into a utility - // function. - ConfirmTx(ctx context.Context, txID ids.ID, freq time.Duration, options ...rpc.Option) (choices.Status, error) // GetTx returns the byte representation of [txID] GetTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) // GetUTXOs returns the byte representation of the UTXOs controlled by [addrs] @@ -285,26 +284,6 @@ func (c *client) GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Op return res.Status, err } -func (c *client) ConfirmTx(ctx context.Context, txID ids.ID, freq time.Duration, options ...rpc.Option) (choices.Status, error) { - ticker := time.NewTicker(freq) - defer ticker.Stop() - - for { - status, err := c.GetTxStatus(ctx, txID, options...) - if err == nil { - if status.Decided() { - return status, nil - } - } - - select { - case <-ticker.C: - case <-ctx.Done(): - return status, ctx.Err() - } - } -} - func (c *client) GetTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) { res := &api.FormattedTx{} err := c.requester.SendRequest(ctx, "avm.getTx", &api.GetTxArgs{ @@ -766,3 +745,34 @@ func (c *client) Export( }, res, options...) return res.TxID, err } + +func AwaitTxAccepted( + c Client, + ctx context.Context, + txID ids.ID, + freq time.Duration, + options ...rpc.Option, +) error { + ticker := time.NewTicker(freq) + defer ticker.Stop() + + for { + status, err := c.GetTxStatus(ctx, txID, options...) + if err != nil { + return err + } + + switch status { + case choices.Accepted: + return nil + case choices.Rejected: + return ErrRejected + } + + select { + case <-ticker.C: + case <-ctx.Done(): + return ctx.Err() + } + } +} diff --git a/vms/example/xsvm/api/client.go b/vms/example/xsvm/api/client.go index 4b1dba462dfe..6395e0aa4cf2 100644 --- a/vms/example/xsvm/api/client.go +++ b/vms/example/xsvm/api/client.go @@ -17,7 +17,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) -const defaultPollingInterval = 50 * time.Millisecond +const DefaultPollingInterval = 50 * time.Millisecond // Client defines the xsvm API client. type Client interface { @@ -245,20 +245,23 @@ func (c *client) Message( return resp.Message, resp.Signature, resp.Message.Initialize() } -func WaitForAcceptance( +func AwaitTxAccepted( ctx context.Context, c Client, address ids.ShortID, nonce uint64, + freq time.Duration, options ...rpc.Option, ) error { - ticker := time.NewTicker(defaultPollingInterval) + ticker := time.NewTicker(freq) defer ticker.Stop() + for { currentNonce, err := c.Nonce(ctx, address, options...) if err != nil { return err } + if currentNonce > nonce { // The nonce increasing indicates the acceptance of a transaction // issued with the specified nonce. diff --git a/vms/example/xsvm/cmd/issue/export/cmd.go b/vms/example/xsvm/cmd/issue/export/cmd.go index ff0b0c5f379f..b8fb7145e4ea 100644 --- a/vms/example/xsvm/cmd/issue/export/cmd.go +++ b/vms/example/xsvm/cmd/issue/export/cmd.go @@ -71,7 +71,7 @@ func Export(ctx context.Context, config *Config) (*status.TxIssuance, error) { return nil, err } - if err := api.WaitForAcceptance(ctx, client, address, nonce); err != nil { + if err := api.AwaitTxAccepted(ctx, client, address, nonce, api.DefaultPollingInterval); err != nil { return nil, err } diff --git a/vms/example/xsvm/cmd/issue/importtx/cmd.go b/vms/example/xsvm/cmd/issue/importtx/cmd.go index d097ea53e1c3..e0892e2fdaa3 100644 --- a/vms/example/xsvm/cmd/issue/importtx/cmd.go +++ b/vms/example/xsvm/cmd/issue/importtx/cmd.go @@ -139,7 +139,7 @@ func Import(ctx context.Context, config *Config) (*status.TxIssuance, error) { return nil, err } - if err := api.WaitForAcceptance(ctx, client, address, nonce); err != nil { + if err := api.AwaitTxAccepted(ctx, client, address, nonce, api.DefaultPollingInterval); err != nil { return nil, err } diff --git a/vms/example/xsvm/cmd/issue/transfer/cmd.go b/vms/example/xsvm/cmd/issue/transfer/cmd.go index 764ca978e348..cd0e9abe48a7 100644 --- a/vms/example/xsvm/cmd/issue/transfer/cmd.go +++ b/vms/example/xsvm/cmd/issue/transfer/cmd.go @@ -70,7 +70,7 @@ func Transfer(ctx context.Context, config *Config) (*status.TxIssuance, error) { return nil, err } - if err := api.WaitForAcceptance(ctx, client, address, nonce); err != nil { + if err := api.AwaitTxAccepted(ctx, client, address, nonce, api.DefaultPollingInterval); err != nil { return nil, err } diff --git a/vms/platformvm/client.go b/vms/platformvm/client.go index 34c5eecbe85a..11453efb5f6b 100644 --- a/vms/platformvm/client.go +++ b/vms/platformvm/client.go @@ -88,16 +88,6 @@ type Client interface { GetTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) // GetTxStatus returns the status of the transaction corresponding to [txID] GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (*GetTxStatusResponse, error) - // AwaitTxDecided polls [GetTxStatus] until a status is returned that - // implies the tx may be decided. - // TODO: Move this function off of the Client interface into a utility - // function. - AwaitTxDecided( - ctx context.Context, - txID ids.ID, - freq time.Duration, - options ...rpc.Option, - ) (*GetTxStatusResponse, error) // GetStake returns the amount of nAVAX that [addrs] have cumulatively // staked on the Primary Network. // @@ -409,27 +399,6 @@ func (c *client) GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Op return res, err } -func (c *client) AwaitTxDecided(ctx context.Context, txID ids.ID, freq time.Duration, options ...rpc.Option) (*GetTxStatusResponse, error) { - ticker := time.NewTicker(freq) - defer ticker.Stop() - - for { - res, err := c.GetTxStatus(ctx, txID, options...) - if err == nil { - switch res.Status { - case status.Committed, status.Aborted, status.Dropped: - return res, nil - } - } - - select { - case <-ticker.C: - case <-ctx.Done(): - return nil, ctx.Err() - } - } -} - func (c *client) GetStake( ctx context.Context, addrs []ids.ShortID, @@ -545,3 +514,32 @@ func (c *client) GetBlockByHeight(ctx context.Context, height uint64, options .. } return formatting.Decode(res.Encoding, res.Block) } + +func AwaitTxAccepted( + c Client, + ctx context.Context, + txID ids.ID, + freq time.Duration, + options ...rpc.Option, +) error { + ticker := time.NewTicker(freq) + defer ticker.Stop() + + for { + res, err := c.GetTxStatus(ctx, txID, options...) + if err != nil { + return err + } + + switch res.Status { + case status.Committed, status.Aborted: + return nil + } + + select { + case <-ticker.C: + case <-ctx.Done(): + return ctx.Err() + } + } +} diff --git a/wallet/chain/c/wallet.go b/wallet/chain/c/wallet.go index 1eeb77e830cb..5685316fba4f 100644 --- a/wallet/chain/c/wallet.go +++ b/wallet/chain/c/wallet.go @@ -4,7 +4,7 @@ package c import ( - "errors" + "context" "math/big" "time" @@ -12,17 +12,14 @@ import ( "github.com/ava-labs/coreth/plugin/evm" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/rpc" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ethcommon "github.com/ethereum/go-ethereum/common" ) -var ( - _ Wallet = (*wallet)(nil) - - errNotCommitted = errors.New("not committed") -) +var _ Wallet = (*wallet)(nil) type Wallet interface { // Builder returns the builder that will be used to create the transactions. @@ -165,25 +162,45 @@ func (w *wallet) IssueAtomicTx( return w.Backend.AcceptAtomicTx(ctx, tx) } - pollFrequency := ops.PollFrequency() - ticker := time.NewTicker(pollFrequency) + if err := awaitTxAccepted(w.avaxClient, ctx, txID, ops.PollFrequency()); err != nil { + return err + } + + return w.Backend.AcceptAtomicTx(ctx, tx) +} + +func (w *wallet) baseFee(options []common.Option) (*big.Int, error) { + ops := common.NewOptions(options) + baseFee := ops.BaseFee(nil) + if baseFee != nil { + return baseFee, nil + } + + ctx := ops.Context() + return w.ethClient.EstimateBaseFee(ctx) +} + +// TODO: Upstream this function into coreth. +func awaitTxAccepted( + c evm.Client, + ctx context.Context, + txID ids.ID, + freq time.Duration, + options ...rpc.Option, +) error { + ticker := time.NewTicker(freq) defer ticker.Stop() for { - status, err := w.avaxClient.GetAtomicTxStatus(ctx, txID) + status, err := c.GetAtomicTxStatus(ctx, txID, options...) if err != nil { return err } - switch status { - case evm.Accepted: - return w.Backend.AcceptAtomicTx(ctx, tx) - case evm.Dropped, evm.Unknown: - return errNotCommitted + if status == evm.Accepted { + return nil } - // The tx is Processing. - select { case <-ticker.C: case <-ctx.Done(): @@ -191,14 +208,3 @@ func (w *wallet) IssueAtomicTx( } } } - -func (w *wallet) baseFee(options []common.Option) (*big.Int, error) { - ops := common.NewOptions(options) - baseFee := ops.BaseFee(nil) - if baseFee != nil { - return baseFee, nil - } - - ctx := ops.Context() - return w.ethClient.EstimateBaseFee(ctx) -} diff --git a/wallet/chain/p/wallet.go b/wallet/chain/p/wallet.go index b96e6f3723e9..2a23e8fdd131 100644 --- a/wallet/chain/p/wallet.go +++ b/wallet/chain/p/wallet.go @@ -5,13 +5,11 @@ package p import ( "errors" - "fmt" "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm" - "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/chain/p/builder" @@ -521,17 +519,9 @@ func (w *wallet) IssueTx( return w.Backend.AcceptTx(ctx, tx) } - txStatus, err := w.client.AwaitTxDecided(ctx, txID, ops.PollFrequency()) - if err != nil { + if err := platformvm.AwaitTxAccepted(w.client, ctx, txID, ops.PollFrequency()); err != nil { return err } - if err := w.Backend.AcceptTx(ctx, tx); err != nil { - return err - } - - if txStatus.Status != status.Committed { - return fmt.Errorf("%w: %s", ErrNotCommitted, txStatus.Reason) - } - return nil + return w.Backend.AcceptTx(ctx, tx) } diff --git a/wallet/chain/x/wallet.go b/wallet/chain/x/wallet.go index bb6484b5526a..a5bac3e8b6ce 100644 --- a/wallet/chain/x/wallet.go +++ b/wallet/chain/x/wallet.go @@ -4,10 +4,7 @@ package x import ( - "errors" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -18,11 +15,7 @@ import ( "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) -var ( - ErrNotAccepted = errors.New("not accepted") - - _ Wallet = (*wallet)(nil) -) +var _ Wallet = (*wallet)(nil) type Wallet interface { // Builder returns the builder that will be used to create the transactions. @@ -313,17 +306,9 @@ func (w *wallet) IssueTx( return w.backend.AcceptTx(ctx, tx) } - txStatus, err := w.client.ConfirmTx(ctx, txID, ops.PollFrequency()) - if err != nil { - return err - } - - if err := w.backend.AcceptTx(ctx, tx); err != nil { + if err := avm.AwaitTxAccepted(w.client, ctx, txID, ops.PollFrequency()); err != nil { return err } - if txStatus != choices.Accepted { - return ErrNotAccepted - } - return nil + return w.backend.AcceptTx(ctx, tx) } From 7455c9971e3dd06d3248daf716ce678cfb2abd16 Mon Sep 17 00:00:00 2001 From: marun Date: Fri, 14 Jun 2024 22:33:07 +0200 Subject: [PATCH 066/102] [antithesis] Remove assertions incompatible with fault injection (#3104) Co-authored-by: Stephen Buttolph --- tests/antithesis/avalanchego/main.go | 56 ---------------------------- tests/antithesis/xsvm/main.go | 6 --- 2 files changed, 62 deletions(-) diff --git a/tests/antithesis/avalanchego/main.go b/tests/antithesis/avalanchego/main.go index 3a0d29a0a3ed..a7101b6bc3cb 100644 --- a/tests/antithesis/avalanchego/main.go +++ b/tests/antithesis/avalanchego/main.go @@ -261,10 +261,6 @@ func (w *workload) issueXChainBaseTx(ctx context.Context) { ) if err != nil { log.Printf("failed to issue X-chain baseTx: %s", err) - assert.Unreachable("failed to issue X-chain baseTx", map[string]any{ - "worker": w.id, - "err": err, - }) return } log.Printf("issued new X-chain baseTx %s in %s", baseTx.ID(), time.Since(baseStartTime)) @@ -318,10 +314,6 @@ func (w *workload) issueXChainCreateAssetTx(ctx context.Context) { ) if err != nil { log.Printf("failed to issue X-chain create asset transaction: %s", err) - assert.Unreachable("failed to issue X-chain create asset transaction", map[string]any{ - "worker": w.id, - "err": err, - }) return } log.Printf("created new X-chain asset %s in %s", createAssetTx.ID(), time.Since(createAssetStartTime)) @@ -376,10 +368,6 @@ func (w *workload) issueXChainOperationTx(ctx context.Context) { ) if err != nil { log.Printf("failed to issue X-chain create asset transaction: %s", err) - assert.Unreachable("failed to issue X-chain create asset transaction", map[string]any{ - "worker": w.id, - "err": err, - }) return } log.Printf("created new X-chain asset %s in %s", createAssetTx.ID(), time.Since(createAssetStartTime)) @@ -391,10 +379,6 @@ func (w *workload) issueXChainOperationTx(ctx context.Context) { ) if err != nil { log.Printf("failed to issue X-chain operation transaction: %s", err) - assert.Unreachable("failed to issue X-chain operation transaction", map[string]any{ - "worker": w.id, - "err": err, - }) return } log.Printf("issued X-chain operation tx %s in %s", operationTx.ID(), time.Since(operationStartTime)) @@ -454,10 +438,6 @@ func (w *workload) issueXToPTransfer(ctx context.Context) { ) if err != nil { log.Printf("failed to issue X-chain export transaction: %s", err) - assert.Unreachable("failed to issue X-chain export transaction", map[string]any{ - "worker": w.id, - "err": err, - }) return } log.Printf("created X-chain export transaction %s in %s", exportTx.ID(), time.Since(exportStartTime)) @@ -472,10 +452,6 @@ func (w *workload) issueXToPTransfer(ctx context.Context) { ) if err != nil { log.Printf("failed to issue P-chain import transaction: %s", err) - assert.Unreachable("failed to issue P-chain import transaction", map[string]any{ - "worker": w.id, - "err": err, - }) return } log.Printf("created P-chain import transaction %s in %s", importTx.ID(), time.Since(importStartTime)) @@ -536,10 +512,6 @@ func (w *workload) issuePToXTransfer(ctx context.Context) { ) if err != nil { log.Printf("failed to issue P-chain export transaction: %s", err) - assert.Unreachable("failed to issue P-chain export transaction", map[string]any{ - "worker": w.id, - "err": err, - }) return } log.Printf("created P-chain export transaction %s in %s", exportTx.ID(), time.Since(exportStartTime)) @@ -551,10 +523,6 @@ func (w *workload) issuePToXTransfer(ctx context.Context) { ) if err != nil { log.Printf("failed to issue X-chain import transaction: %s", err) - assert.Unreachable("failed to issue X-chain import transaction", map[string]any{ - "worker": w.id, - "err": err, - }) return } log.Printf("created X-chain import transaction %s in %s", importTx.ID(), time.Since(importStartTime)) @@ -581,12 +549,6 @@ func (w *workload) confirmXChainTx(ctx context.Context, tx *xtxs.Tx) { client := avm.NewClient(uri, "X") if err := avm.AwaitTxAccepted(client, ctx, txID, 100*time.Millisecond); err != nil { log.Printf("failed to confirm X-chain transaction %s on %s: %s", txID, uri, err) - assert.Unreachable("failed to determine the status of an X-chain transaction", map[string]any{ - "worker": w.id, - "txID": txID, - "uri": uri, - "err": err, - }) return } log.Printf("confirmed X-chain transaction %s on %s", txID, uri) @@ -600,12 +562,6 @@ func (w *workload) confirmPChainTx(ctx context.Context, tx *ptxs.Tx) { client := platformvm.NewClient(uri) if err := platformvm.AwaitTxAccepted(client, ctx, txID, 100*time.Millisecond); err != nil { log.Printf("failed to determine the status of a P-chain transaction %s on %s: %s", txID, uri, err) - assert.Unreachable("failed to determine the status of a P-chain transaction", map[string]any{ - "worker": w.id, - "txID": txID, - "uri": uri, - "err": err, - }) return } log.Printf("confirmed P-chain transaction %s on %s", txID, uri) @@ -631,12 +587,6 @@ func (w *workload) verifyXChainTxConsumedUTXOs(ctx context.Context, tx *xtxs.Tx) ) if err != nil { log.Printf("failed to fetch X-chain UTXOs on %s: %s", uri, err) - assert.Unreachable("failed to fetch X-chain UTXOs", map[string]any{ - "worker": w.id, - "txID": txID, - "uri": uri, - "err": err, - }) return } @@ -677,12 +627,6 @@ func (w *workload) verifyPChainTxConsumedUTXOs(ctx context.Context, tx *ptxs.Tx) ) if err != nil { log.Printf("failed to fetch P-chain UTXOs on %s: %s", uri, err) - assert.Unreachable("failed to fetch P-chain UTXOs", map[string]any{ - "worker": w.id, - "uri": uri, - "txID": txID, - "err": err, - }) return } diff --git a/tests/antithesis/xsvm/main.go b/tests/antithesis/xsvm/main.go index 2c73a8287820..9e70ebff1fbf 100644 --- a/tests/antithesis/xsvm/main.go +++ b/tests/antithesis/xsvm/main.go @@ -176,12 +176,6 @@ func (w *workload) confirmTransferTx(ctx context.Context, tx *status.TxIssuance) client := api.NewClient(uri, w.chainID.String()) if err := api.AwaitTxAccepted(ctx, client, w.key.Address(), tx.Nonce, PollingInterval); err != nil { log.Printf("worker %d failed to confirm transaction %s on %s: %s", w.id, tx.TxID, uri, err) - assert.Unreachable("failed to confirm transaction", map[string]any{ - "worker": w.id, - "txID": tx.TxID, - "uri": uri, - "err": err, - }) return } } From 576b3927c54b0e7ad91602572f4aa8f9fe24d768 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 17 Jun 2024 13:46:05 -0400 Subject: [PATCH 067/102] Use health labels (#3122) --- api/health/health.go | 30 ++++++++++----------- api/health/metrics.go | 27 ------------------- api/health/worker.go | 61 +++++++++++++++++++++++++++---------------- 3 files changed, 53 insertions(+), 65 deletions(-) delete mode 100644 api/health/metrics.go diff --git a/api/health/health.go b/api/health/health.go index 9997d665e77f..01661b7e85ab 100644 --- a/api/health/health.go +++ b/api/health/health.go @@ -14,6 +14,10 @@ import ( ) const ( + // CheckLabel is the label used to differentiate between health checks. + CheckLabel = "check" + // TagLabel is the label used to differentiate between health check tags. + TagLabel = "tag" // AllTag is automatically added to every registered check. AllTag = "all" // ApplicationTag checks will act as if they specified every tag that has @@ -62,23 +66,19 @@ type health struct { } func New(log logging.Logger, registerer prometheus.Registerer) (Health, error) { - readinessWorker, err := newWorker(log, "readiness", registerer) - if err != nil { - return nil, err - } - - healthWorker, err := newWorker(log, "health", registerer) - if err != nil { - return nil, err - } - - livenessWorker, err := newWorker(log, "liveness", registerer) + failingChecks := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "checks_failing", + Help: "number of currently failing health checks", + }, + []string{CheckLabel, TagLabel}, + ) return &health{ log: log, - readiness: readinessWorker, - health: healthWorker, - liveness: livenessWorker, - }, err + readiness: newWorker(log, "readiness", failingChecks), + health: newWorker(log, "health", failingChecks), + liveness: newWorker(log, "liveness", failingChecks), + }, registerer.Register(failingChecks) } func (h *health) RegisterReadinessCheck(name string, checker Checker, tags ...string) error { diff --git a/api/health/metrics.go b/api/health/metrics.go deleted file mode 100644 index fdb7b2ed813b..000000000000 --- a/api/health/metrics.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package health - -import "github.com/prometheus/client_golang/prometheus" - -type metrics struct { - // failingChecks keeps track of the number of check failing - failingChecks *prometheus.GaugeVec -} - -func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, error) { - metrics := &metrics{ - failingChecks: prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "checks_failing", - Help: "number of currently failing health checks", - }, - []string{"tag"}, - ), - } - metrics.failingChecks.WithLabelValues(AllTag).Set(0) - metrics.failingChecks.WithLabelValues(ApplicationTag).Set(0) - return metrics, registerer.Register(metrics.failingChecks) -} diff --git a/api/health/worker.go b/api/health/worker.go index 91fad853b94b..ee9b7bbe85d6 100644 --- a/api/health/worker.go +++ b/api/health/worker.go @@ -28,11 +28,11 @@ var ( ) type worker struct { - log logging.Logger - namespace string - metrics *metrics - checksLock sync.RWMutex - checks map[string]*taggedChecker + log logging.Logger + name string + failingChecks *prometheus.GaugeVec + checksLock sync.RWMutex + checks map[string]*taggedChecker resultsLock sync.RWMutex results map[string]Result @@ -53,19 +53,25 @@ type taggedChecker struct { func newWorker( log logging.Logger, - namespace string, - registerer prometheus.Registerer, -) (*worker, error) { - metrics, err := newMetrics(namespace, registerer) + name string, + failingChecks *prometheus.GaugeVec, +) *worker { + // Initialize the number of failing checks to 0 for all checks + for _, tag := range []string{AllTag, ApplicationTag} { + failingChecks.With(prometheus.Labels{ + CheckLabel: name, + TagLabel: tag, + }).Set(0) + } return &worker{ - log: log, - namespace: namespace, - metrics: metrics, - checks: make(map[string]*taggedChecker), - results: make(map[string]Result), - closer: make(chan struct{}), - tags: make(map[string]set.Set[string]), - }, err + log: log, + name: name, + failingChecks: failingChecks, + checks: make(map[string]*taggedChecker), + results: make(map[string]Result), + closer: make(chan struct{}), + tags: make(map[string]set.Set[string]), + } } func (w *worker) RegisterCheck(name string, check Checker, tags ...string) error { @@ -107,7 +113,7 @@ func (w *worker) RegisterCheck(name string, check Checker, tags ...string) error // Whenever a new check is added - it is failing w.log.Info("registered new check and initialized its state to failing", - zap.String("namespace", w.namespace), + zap.String("name", w.name), zap.String("name", name), zap.Strings("tags", tags), ) @@ -244,7 +250,7 @@ func (w *worker) runCheck(ctx context.Context, wg *sync.WaitGroup, name string, if prevResult.Error == nil { w.log.Warn("check started failing", - zap.String("namespace", w.namespace), + zap.String("name", w.name), zap.String("name", name), zap.Strings("tags", check.tags), zap.Error(err), @@ -253,7 +259,7 @@ func (w *worker) runCheck(ctx context.Context, wg *sync.WaitGroup, name string, } } else if prevResult.Error != nil { w.log.Info("check started passing", - zap.String("namespace", w.namespace), + zap.String("name", w.name), zap.String("name", name), zap.Strings("tags", check.tags), ) @@ -271,7 +277,10 @@ func (w *worker) updateMetrics(tc *taggedChecker, healthy bool, register bool) { if tc.isApplicationCheck { // Note: [w.tags] will include AllTag. for tag := range w.tags { - gauge := w.metrics.failingChecks.WithLabelValues(tag) + gauge := w.failingChecks.With(prometheus.Labels{ + CheckLabel: w.name, + TagLabel: tag, + }) if healthy { gauge.Dec() } else { @@ -285,7 +294,10 @@ func (w *worker) updateMetrics(tc *taggedChecker, healthy bool, register bool) { } } else { for _, tag := range tc.tags { - gauge := w.metrics.failingChecks.WithLabelValues(tag) + gauge := w.failingChecks.With(prometheus.Labels{ + CheckLabel: w.name, + TagLabel: tag, + }) if healthy { gauge.Dec() } else { @@ -297,7 +309,10 @@ func (w *worker) updateMetrics(tc *taggedChecker, healthy bool, register bool) { } } } - gauge := w.metrics.failingChecks.WithLabelValues(AllTag) + gauge := w.failingChecks.With(prometheus.Labels{ + CheckLabel: w.name, + TagLabel: AllTag, + }) if healthy { gauge.Dec() } else { From 5d5b9cfc472dac10eb46e54af6ca18566d0b3ca8 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 18 Jun 2024 11:11:28 -0400 Subject: [PATCH 068/102] Remove `Decided` from the `Consensus` interface (#3123) --- snow/consensus/snowman/consensus.go | 3 --- snow/consensus/snowman/consensus_test.go | 4 ---- snow/consensus/snowman/topological.go | 11 ---------- snow/engine/snowman/transitive.go | 26 +++++++++++++++++------- 4 files changed, 19 insertions(+), 25 deletions(-) diff --git a/snow/consensus/snowman/consensus.go b/snow/consensus/snowman/consensus.go index 9d5aa00c4e82..19fc1600e4f0 100644 --- a/snow/consensus/snowman/consensus.go +++ b/snow/consensus/snowman/consensus.go @@ -39,9 +39,6 @@ type Consensus interface { // Returns if a critical error has occurred. Add(Block) error - // Decided returns true if the block has been decided. - Decided(Block) bool - // Processing returns true if the block ID is currently processing. Processing(ids.ID) bool diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go index 4d5d9e1e6b68..1de19a0b2df4 100644 --- a/snow/consensus/snowman/consensus_test.go +++ b/snow/consensus/snowman/consensus_test.go @@ -291,7 +291,6 @@ func StatusOrProcessingPreviouslyAcceptedTest(t *testing.T, factory Factory) { require.Equal(choices.Accepted, snowmantest.Genesis.Status()) require.False(sm.Processing(snowmantest.Genesis.ID())) - require.True(sm.Decided(snowmantest.Genesis)) require.True(sm.IsPreferred(snowmantest.Genesis.ID())) pref, ok := sm.PreferenceAtHeight(snowmantest.Genesis.Height()) @@ -329,7 +328,6 @@ func StatusOrProcessingPreviouslyRejectedTest(t *testing.T, factory Factory) { require.Equal(choices.Rejected, block.Status()) require.False(sm.Processing(block.ID())) - require.True(sm.Decided(block)) require.False(sm.IsPreferred(block.ID())) _, ok := sm.PreferenceAtHeight(block.Height()) @@ -365,7 +363,6 @@ func StatusOrProcessingUnissuedTest(t *testing.T, factory Factory) { require.Equal(choices.Processing, block.Status()) require.False(sm.Processing(block.ID())) - require.False(sm.Decided(block)) require.False(sm.IsPreferred(block.ID())) _, ok := sm.PreferenceAtHeight(block.Height()) @@ -402,7 +399,6 @@ func StatusOrProcessingIssuedTest(t *testing.T, factory Factory) { require.NoError(sm.Add(block)) require.Equal(choices.Processing, block.Status()) require.True(sm.Processing(block.ID())) - require.False(sm.Decided(block)) require.True(sm.IsPreferred(block.ID())) pref, ok := sm.PreferenceAtHeight(block.Height()) diff --git a/snow/consensus/snowman/topological.go b/snow/consensus/snowman/topological.go index 96f717f32ba4..79703b8c3c70 100644 --- a/snow/consensus/snowman/topological.go +++ b/snow/consensus/snowman/topological.go @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/set" @@ -182,16 +181,6 @@ func (ts *Topological) Add(blk Block) error { return nil } -func (ts *Topological) Decided(blk Block) bool { - // If the block is decided, then it must have been previously issued. - if blk.Status().Decided() { - return true - } - // If the block is marked as fetched, we can check if it has been - // transitively rejected. - return blk.Status() == choices.Processing && blk.Height() <= ts.lastAcceptedHeight -} - func (ts *Topological) Processing(blkID ids.ID) bool { // The last accepted block is in the blocks map, so we first must ensure the // requested block isn't the last accepted block. diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index 5164957f9532..f424ff993e99 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -758,7 +758,7 @@ func (t *Transitive) issueFrom( delete(t.blkReqSourceMetric, req) } - issued := t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) + issued := t.isDecided(blk) || t.Consensus.Processing(blkID) if issued { // A dependency should never be waiting on a decided or processing // block. However, if the block was marked as rejected by the VM, the @@ -794,7 +794,7 @@ func (t *Transitive) issueWithAncestors( } // The block was issued into consensus. This is the happy path. - if status != choices.Unknown && (t.Consensus.Decided(blk) || t.Consensus.Processing(blkID)) { + if status != choices.Unknown && (t.isDecided(blk) || t.Consensus.Processing(blkID)) { return true, nil } @@ -815,7 +815,7 @@ func (t *Transitive) issueWithAncestors( // If the block is queued to be added to consensus, then it was issued. func (t *Transitive) wasIssued(blk snowman.Block) bool { blkID := blk.ID() - return t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) || t.pendingContains(blkID) + return t.isDecided(blk) || t.Consensus.Processing(blkID) || t.pendingContains(blkID) } // Issue [blk] to consensus once its ancestors have been issued. @@ -849,7 +849,7 @@ func (t *Transitive) issue( // block on the parent if needed parentID := blk.Parent() - if parent, err := t.getBlock(ctx, parentID); err != nil || !(t.Consensus.Decided(parent) || t.Consensus.Processing(parentID)) { + if parent, err := t.getBlock(ctx, parentID); err != nil || !(t.isDecided(parent) || t.Consensus.Processing(parentID)) { t.Ctx.Log.Verbo("block waiting for parent to be issued", zap.Stringer("blkID", blkID), zap.Stringer("parentID", parentID), @@ -958,7 +958,7 @@ func (t *Transitive) deliver( t.removeFromPending(blk) blkID := blk.ID() - if t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) { + if t.isDecided(blk) || t.Consensus.Processing(blkID) { // If [blk] is decided, then it shouldn't be added to consensus. // Similarly, if [blkID] is already in the processing set, it shouldn't // be added to consensus again. @@ -1073,7 +1073,7 @@ func (t *Transitive) removeFromPending(blk snowman.Block) { func (t *Transitive) addToNonVerifieds(blk snowman.Block) { // don't add this blk if it's decided or processing. blkID := blk.ID() - if t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) { + if t.isDecided(blk) || t.Consensus.Processing(blkID) { return } parentID := blk.Parent() @@ -1163,7 +1163,7 @@ func (t *Transitive) getProcessingAncestor(ctx context.Context, initialVote ids. return ids.Empty, false } - if t.Consensus.Decided(blk) { + if t.isDecided(blk) { t.Ctx.Log.Debug("dropping vote", zap.String("reason", "bubbled vote already decided"), zap.Stringer("initialVoteID", initialVote), @@ -1178,3 +1178,15 @@ func (t *Transitive) getProcessingAncestor(ctx context.Context, initialVote ids. bubbledVote = blk.Parent() } } + +// isDecided reports true if the provided block's status is Accepted, Rejected, +// or if the block's height implies that the block is either Accepted or +// Rejected. +func (t *Transitive) isDecided(blk snowman.Block) bool { + if blk.Status().Decided() { + return true + } + + _, lastAcceptedHeight := t.Consensus.LastAccepted() + return blk.Height() <= lastAcceptedHeight +} From 2e72c7c29c498e7ffa5bb31ff18495e14e2cdfb7 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 18 Jun 2024 11:42:55 -0400 Subject: [PATCH 069/102] Remove .Status() from .Accepted() (#3124) --- snow/consensus/snowman/snowman_block.go | 13 ++++--------- snow/consensus/snowman/topological.go | 19 ++++++++++++------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/snow/consensus/snowman/snowman_block.go b/snow/consensus/snowman/snowman_block.go index 7e8d339d201a..236c93645a02 100644 --- a/snow/consensus/snowman/snowman_block.go +++ b/snow/consensus/snowman/snowman_block.go @@ -5,14 +5,12 @@ package snowman import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" ) // Tracks the state of a snowman block type snowmanBlock struct { - // parameters to initialize the snowball instance with - params snowball.Parameters + t *Topological // block that this node contains. For the genesis, this value will be nil blk Block @@ -38,7 +36,7 @@ func (n *snowmanBlock) AddChild(child Block) { // if the snowball instance is nil, this is the first child. So the instance // should be initialized. if n.sb == nil { - n.sb = snowball.NewTree(snowball.SnowballFactory, n.params, childID) + n.sb = snowball.NewTree(snowball.SnowballFactory, n.t.params, childID) n.children = make(map[ids.ID]Block) } else { n.sb.Add(childID) @@ -47,11 +45,8 @@ func (n *snowmanBlock) AddChild(child Block) { n.children[childID] = child } -func (n *snowmanBlock) Accepted() bool { +func (n *snowmanBlock) Decided() bool { // if the block is nil, then this is the genesis which is defined as // accepted - if n.blk == nil { - return true - } - return n.blk.Status() == choices.Accepted + return n.blk == nil || n.blk.Height() <= n.t.lastAcceptedHeight } diff --git a/snow/consensus/snowman/topological.go b/snow/consensus/snowman/topological.go index 79703b8c3c70..8c09e2798fcc 100644 --- a/snow/consensus/snowman/topological.go +++ b/snow/consensus/snowman/topological.go @@ -126,7 +126,7 @@ func (ts *Topological) Initialize( ts.lastAcceptedID = lastAcceptedID ts.lastAcceptedHeight = lastAcceptedHeight ts.blocks = map[ids.ID]*snowmanBlock{ - lastAcceptedID: {params: ts.params}, + lastAcceptedID: {t: ts}, } ts.preferredHeights = make(map[uint64]ids.ID) ts.preference = lastAcceptedID @@ -162,8 +162,8 @@ func (ts *Topological) Add(blk Block) error { // add the block as a child of its parent, and add the block to the tree parentNode.AddChild(blk) ts.blocks[blkID] = &snowmanBlock{ - params: ts.params, - blk: blk, + t: ts, + blk: blk, } // If we are extending the preference, this is the new preference @@ -278,7 +278,12 @@ func (ts *Topological) RecordPoll(ctx context.Context, voteBag bag.Bag[ids.ID]) // Runtime = |live set| ; Space = Constant // Traverse from the preferred ID to the last accepted ancestor. - for block := startBlock; !block.Accepted(); { + // + // It is guaranteed that the first decided block we encounter is the last + // accepted block because the startBlock is the preferred block. The + // preferred block is guaranteed to either be the last accepted block or + // extend the accepted chain. + for block := startBlock; !block.Decided(); { blkID := block.blk.ID() ts.preferredIDs.Add(blkID) ts.preferredHeights[block.blk.Height()] = blkID @@ -349,7 +354,7 @@ func (ts *Topological) calculateInDegree(votes bag.Bag[ids.ID]) { } // If the vote is for the last accepted block, the vote is dropped - if votedBlock.Accepted() { + if votedBlock.Decided() { continue } @@ -373,7 +378,7 @@ func (ts *Topological) calculateInDegree(votes bag.Bag[ids.ID]) { // iterate through all the block's ancestors and set up the inDegrees of // the blocks - for n := ts.blocks[parentID]; !n.Accepted(); n = ts.blocks[parentID] { + for n := ts.blocks[parentID]; !n.Decided(); n = ts.blocks[parentID] { parentID = n.blk.Parent() // Increase the inDegree by one @@ -417,7 +422,7 @@ func (ts *Topological) pushVotes() []votes { // If the block is accepted, then we don't need to push votes to the // parent block - if block.Accepted() { + if block.Decided() { continue } From e740b44c209f43cf9785848298cd9d3bcc83b973 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 19 Jun 2024 14:49:01 -0400 Subject: [PATCH 070/102] Refactor `event.Blocker` into `job.Scheduler` (#3125) --- snow/engine/snowman/issuer.go | 43 +-- snow/engine/snowman/job/scheduler.go | 109 +++++++ snow/engine/snowman/job/scheduler_test.go | 338 ++++++++++++++++++++++ snow/engine/snowman/transitive.go | 106 +++---- snow/engine/snowman/transitive_test.go | 153 +++++++++- snow/engine/snowman/voter.go | 52 ++-- snow/event/blockable.go | 24 -- snow/event/blocker.go | 92 ------ snow/event/blocker_test.go | 116 -------- 9 files changed, 675 insertions(+), 358 deletions(-) create mode 100644 snow/engine/snowman/job/scheduler.go create mode 100644 snow/engine/snowman/job/scheduler_test.go delete mode 100644 snow/event/blockable.go delete mode 100644 snow/event/blocker.go delete mode 100644 snow/event/blocker_test.go diff --git a/snow/engine/snowman/issuer.go b/snow/engine/snowman/issuer.go index b3677d3cc21e..9af5fb9716ac 100644 --- a/snow/engine/snowman/issuer.go +++ b/snow/engine/snowman/issuer.go @@ -10,45 +10,30 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/snow/engine/snowman/job" ) +var _ job.Job[ids.ID] = (*issuer)(nil) + // issuer issues [blk] into to consensus after its dependencies are met. type issuer struct { t *Transitive nodeID ids.NodeID // nodeID of the peer that provided this block blk snowman.Block - issuedMetric prometheus.Counter - abandoned bool - deps set.Set[ids.ID] push bool + issuedMetric prometheus.Counter } -func (i *issuer) Dependencies() set.Set[ids.ID] { - return i.deps -} - -// Mark that a dependency has been met -func (i *issuer) Fulfill(ctx context.Context, id ids.ID) { - i.deps.Remove(id) - i.Update(ctx) -} - -// Abandon the attempt to issue [i.block] -func (i *issuer) Abandon(ctx context.Context, _ ids.ID) { - if !i.abandoned { - blkID := i.blk.ID() - i.t.removeFromPending(i.blk) - i.t.addToNonVerifieds(i.blk) - i.t.blocked.Abandon(ctx, blkID) +func (i *issuer) Execute(ctx context.Context, _ []ids.ID, abandoned []ids.ID) error { + if len(abandoned) == 0 { + // If the parent block wasn't abandoned, this block can be issued. + return i.t.deliver(ctx, i.nodeID, i.blk, i.push, i.issuedMetric) } - i.abandoned = true -} -func (i *issuer) Update(ctx context.Context) { - if i.abandoned || i.deps.Len() != 0 || i.t.errs.Errored() { - return - } - // Issue the block into consensus - i.t.errs.Add(i.t.deliver(ctx, i.nodeID, i.blk, i.push, i.issuedMetric)) + // If the parent block was abandoned, this block should be abandoned as + // well. + blkID := i.blk.ID() + i.t.removeFromPending(i.blk) + i.t.addToNonVerifieds(i.blk) + return i.t.blocked.Abandon(ctx, blkID) } diff --git a/snow/engine/snowman/job/scheduler.go b/snow/engine/snowman/job/scheduler.go new file mode 100644 index 000000000000..e05f27130dec --- /dev/null +++ b/snow/engine/snowman/job/scheduler.go @@ -0,0 +1,109 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Package job provides a Scheduler to manage and execute Jobs with +// dependencies. +package job + +import "context" + +// Job is a unit of work that can be executed based on the result of resolving +// requested dependencies. +type Job[T any] interface { + Execute(ctx context.Context, fulfilled []T, abandoned []T) error +} + +type job[T comparable] struct { + // Once all dependencies are resolved, the job will be executed. + numUnresolved int + fulfilled []T + abandoned []T + job Job[T] +} + +// Scheduler implements a dependency graph for jobs. Jobs can be registered with +// dependencies, and once all dependencies are resolved, the job will be +// executed. +type Scheduler[T comparable] struct { + // dependents maps a dependency to the jobs that depend on it. + dependents map[T][]*job[T] +} + +func NewScheduler[T comparable]() *Scheduler[T] { + return &Scheduler[T]{ + dependents: make(map[T][]*job[T]), + } +} + +// Schedule a job to be executed once all of its dependencies are resolved. If a +// job is scheduled with no dependencies, it's executed immediately. +// +// In order to prevent a memory leak, all dependencies must eventually either be +// fulfilled or abandoned. +// +// While registering a job with duplicate dependencies is discouraged, it is +// allowed. +func (s *Scheduler[T]) Schedule(ctx context.Context, userJob Job[T], dependencies ...T) error { + numUnresolved := len(dependencies) + if numUnresolved == 0 { + return userJob.Execute(ctx, nil, nil) + } + + j := &job[T]{ + numUnresolved: numUnresolved, + job: userJob, + } + for _, d := range dependencies { + s.dependents[d] = append(s.dependents[d], j) + } + return nil +} + +// NumDependencies returns the number of dependencies that jobs are currently +// blocking on. +func (s *Scheduler[_]) NumDependencies() int { + return len(s.dependents) +} + +// Fulfill a dependency. If all dependencies for a job are resolved, the job +// will be executed. +// +// It is safe to call the scheduler during the execution of a job. +func (s *Scheduler[T]) Fulfill(ctx context.Context, dependency T) error { + return s.resolveDependency(ctx, dependency, true) +} + +// Abandon a dependency. If all dependencies for a job are resolved, the job +// will be executed. +// +// It is safe to call the scheduler during the execution of a job. +func (s *Scheduler[T]) Abandon(ctx context.Context, dependency T) error { + return s.resolveDependency(ctx, dependency, false) +} + +func (s *Scheduler[T]) resolveDependency( + ctx context.Context, + dependency T, + fulfilled bool, +) error { + jobs := s.dependents[dependency] + delete(s.dependents, dependency) + + for _, job := range jobs { + job.numUnresolved-- + if fulfilled { + job.fulfilled = append(job.fulfilled, dependency) + } else { + job.abandoned = append(job.abandoned, dependency) + } + + if job.numUnresolved > 0 { + continue + } + + if err := job.job.Execute(ctx, job.fulfilled, job.abandoned); err != nil { + return err + } + } + return nil +} diff --git a/snow/engine/snowman/job/scheduler_test.go b/snow/engine/snowman/job/scheduler_test.go new file mode 100644 index 000000000000..db6502c5f74c --- /dev/null +++ b/snow/engine/snowman/job/scheduler_test.go @@ -0,0 +1,338 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package job + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +const ( + depToResolve = iota + depToNeglect +) + +var errDuplicateExecution = errors.New("job already executed") + +type testJob struct { + calledExecute bool + fulfilled []int + abandoned []int +} + +func (j *testJob) Execute(_ context.Context, fulfilled []int, abandoned []int) error { + if j.calledExecute { + return errDuplicateExecution + } + j.calledExecute = true + j.fulfilled = fulfilled + j.abandoned = abandoned + return nil +} + +func (j *testJob) reset() { + j.calledExecute = false + j.fulfilled = nil + j.abandoned = nil +} + +func newSchedulerWithJob[T comparable]( + t *testing.T, + job Job[T], + dependencies []T, + fulfilled []T, + abandoned []T, +) *Scheduler[T] { + s := NewScheduler[T]() + require.NoError(t, s.Schedule(context.Background(), job, dependencies...)) + for _, d := range fulfilled { + require.NoError(t, s.Fulfill(context.Background(), d)) + } + for _, d := range abandoned { + require.NoError(t, s.Abandon(context.Background(), d)) + } + return s +} + +func TestScheduler_Schedule(t *testing.T) { + userJob := &testJob{} + tests := []struct { + name string + scheduler *Scheduler[int] + dependencies []int + expectedExecuted bool + expectedNumDependencies int + expectedScheduler *Scheduler[int] + }{ + { + name: "no dependencies", + scheduler: NewScheduler[int](), + dependencies: nil, + expectedExecuted: true, + expectedNumDependencies: 0, + expectedScheduler: NewScheduler[int](), + }, + { + name: "one dependency", + scheduler: NewScheduler[int](), + dependencies: []int{depToResolve}, + expectedExecuted: false, + expectedNumDependencies: 1, + expectedScheduler: &Scheduler[int]{ + dependents: map[int][]*job[int]{ + depToResolve: { + { + numUnresolved: 1, + fulfilled: nil, + abandoned: nil, + job: userJob, + }, + }, + }, + }, + }, + { + name: "two dependencies", + scheduler: NewScheduler[int](), + dependencies: []int{depToResolve, depToNeglect}, + expectedExecuted: false, + expectedNumDependencies: 2, + expectedScheduler: &Scheduler[int]{ + dependents: map[int][]*job[int]{ + depToResolve: { + { + numUnresolved: 2, + fulfilled: nil, + abandoned: nil, + job: userJob, + }, + }, + depToNeglect: { + { + numUnresolved: 2, + fulfilled: nil, + abandoned: nil, + job: userJob, + }, + }, + }, + }, + }, + { + name: "additional dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve}, nil, nil), + dependencies: []int{depToResolve}, + expectedExecuted: false, + expectedNumDependencies: 1, + expectedScheduler: &Scheduler[int]{ + dependents: map[int][]*job[int]{ + depToResolve: { + { + numUnresolved: 1, + fulfilled: nil, + abandoned: nil, + job: userJob, + }, + { + numUnresolved: 1, + fulfilled: nil, + abandoned: nil, + job: userJob, + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + // Reset the variable between tests + userJob.reset() + + require.NoError(test.scheduler.Schedule(context.Background(), userJob, test.dependencies...)) + require.Equal(test.expectedNumDependencies, test.scheduler.NumDependencies()) + require.Equal(test.expectedExecuted, userJob.calledExecute) + require.Empty(userJob.fulfilled) + require.Empty(userJob.abandoned) + require.Equal(test.expectedScheduler, test.scheduler) + }) + } +} + +func TestScheduler_Fulfill(t *testing.T) { + userJob := &testJob{} + tests := []struct { + name string + scheduler *Scheduler[int] + expectedExecuted bool + expectedFulfilled []int + expectedAbandoned []int + expectedScheduler *Scheduler[int] + }{ + { + name: "no jobs", + scheduler: NewScheduler[int](), + expectedExecuted: false, + expectedFulfilled: nil, + expectedAbandoned: nil, + expectedScheduler: NewScheduler[int](), + }, + { + name: "single dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve}, nil, nil), + expectedExecuted: true, + expectedFulfilled: []int{depToResolve}, + expectedAbandoned: nil, + expectedScheduler: NewScheduler[int](), + }, + { + name: "non-existent dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToNeglect}, nil, nil), + expectedExecuted: false, + expectedFulfilled: nil, + expectedAbandoned: nil, + expectedScheduler: newSchedulerWithJob(t, userJob, []int{depToNeglect}, nil, nil), + }, + { + name: "incomplete dependencies", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve, depToNeglect}, nil, nil), + expectedExecuted: false, + expectedFulfilled: nil, + expectedAbandoned: nil, + expectedScheduler: &Scheduler[int]{ + dependents: map[int][]*job[int]{ + depToNeglect: { + { + numUnresolved: 1, + fulfilled: []int{depToResolve}, + abandoned: nil, + job: userJob, + }, + }, + }, + }, + }, + { + name: "duplicate dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve, depToResolve}, nil, nil), + expectedExecuted: true, + expectedFulfilled: []int{depToResolve, depToResolve}, + expectedAbandoned: nil, + expectedScheduler: NewScheduler[int](), + }, + { + name: "previously abandoned", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve, depToNeglect}, nil, []int{depToNeglect}), + expectedExecuted: true, + expectedFulfilled: []int{depToResolve}, + expectedAbandoned: []int{depToNeglect}, + expectedScheduler: NewScheduler[int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + // Reset the variable between tests + userJob.reset() + + require.NoError(test.scheduler.Fulfill(context.Background(), depToResolve)) + require.Equal(test.expectedExecuted, userJob.calledExecute) + require.Equal(test.expectedFulfilled, userJob.fulfilled) + require.Equal(test.expectedAbandoned, userJob.abandoned) + require.Equal(test.expectedScheduler, test.scheduler) + }) + } +} + +func TestScheduler_Abandon(t *testing.T) { + userJob := &testJob{} + tests := []struct { + name string + scheduler *Scheduler[int] + expectedExecuted bool + expectedFulfilled []int + expectedAbandoned []int + expectedScheduler *Scheduler[int] + }{ + { + name: "no jobs", + scheduler: NewScheduler[int](), + expectedExecuted: false, + expectedFulfilled: nil, + expectedAbandoned: nil, + expectedScheduler: NewScheduler[int](), + }, + { + name: "single dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve}, nil, nil), + expectedExecuted: true, + expectedFulfilled: nil, + expectedAbandoned: []int{depToResolve}, + expectedScheduler: NewScheduler[int](), + }, + { + name: "non-existent dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToNeglect}, nil, nil), + expectedExecuted: false, + expectedFulfilled: nil, + expectedAbandoned: nil, + expectedScheduler: newSchedulerWithJob(t, userJob, []int{depToNeglect}, nil, nil), + }, + { + name: "incomplete dependencies", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve, depToNeglect}, nil, nil), + expectedExecuted: false, + expectedFulfilled: nil, + expectedAbandoned: nil, + expectedScheduler: &Scheduler[int]{ + dependents: map[int][]*job[int]{ + depToNeglect: { + { + numUnresolved: 1, + fulfilled: nil, + abandoned: []int{depToResolve}, + job: userJob, + }, + }, + }, + }, + }, + { + name: "duplicate dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve, depToResolve}, nil, nil), + expectedExecuted: true, + expectedFulfilled: nil, + expectedAbandoned: []int{depToResolve, depToResolve}, + expectedScheduler: NewScheduler[int](), + }, + { + name: "previously fulfilled", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve, depToNeglect}, []int{depToNeglect}, nil), + expectedExecuted: true, + expectedFulfilled: []int{depToNeglect}, + expectedAbandoned: []int{depToResolve}, + expectedScheduler: NewScheduler[int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + // Reset the variable between tests + userJob.reset() + + require.NoError(test.scheduler.Abandon(context.Background(), depToResolve)) + require.Equal(test.expectedExecuted, userJob.calledExecute) + require.Equal(test.expectedFulfilled, userJob.fulfilled) + require.Equal(test.expectedAbandoned, userJob.abandoned) + require.Equal(test.expectedScheduler, test.scheduler) + }) + } +} diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index f424ff993e99..680cb9c5e09c 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -21,7 +21,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/ancestor" - "github.com/ava-labs/avalanchego/snow/event" + "github.com/ava-labs/avalanchego/snow/engine/snowman/job" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/bimap" @@ -30,7 +30,6 @@ import ( "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" ) const nonVerifiedCacheSize = 64 * units.MiB @@ -83,14 +82,11 @@ type Transitive struct { // operations that are blocked on a block being issued. This could be // issuing another block, responding to a query, or applying votes to consensus - blocked event.Blocker + blocked *job.Scheduler[ids.ID] // number of times build block needs to be called once the number of // processing blocks has gone below the optimal number. pendingBuildBlocks int - - // errs tracks if an error has occurred in a callback - errs wrappers.Errs } func New(config Config) (*Transitive, error) { @@ -147,6 +143,7 @@ func New(config Config) (*Transitive, error) { nonVerifieds: ancestor.NewTree(), nonVerifiedCache: nonVerifiedCache, acceptedFrontiers: acceptedFrontiers, + blocked: job.NewScheduler[ids.ID](), polls: polls, blkReqs: bimap.New[common.Request, ids.ID](), blkReqSourceMetric: make(map[common.Request]prometheus.Counter), @@ -293,8 +290,11 @@ func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID } delete(t.blkReqSourceMetric, req) - // Because the get request was dropped, we no longer expect blkID to be issued. - t.blocked.Abandon(ctx, blkID) + // Because the get request was dropped, we no longer expect blkID to be + // issued. + if err := t.blocked.Abandon(ctx, blkID); err != nil { + return err + } return t.executeDeferredWork(ctx) } @@ -391,21 +391,24 @@ func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin // issued into consensus v := &voter{ t: t, - vdr: nodeID, + nodeID: nodeID, requestID: requestID, responseOptions: responseOptions, } // Wait until [preferredID] and [preferredIDAtHeight] have been issued to // consensus before applying this chit. + var deps []ids.ID if !addedPreferred { - v.deps.Add(preferredID) + deps = append(deps, preferredID) } if !addedPreferredIDAtHeight { - v.deps.Add(preferredIDAtHeight) + deps = append(deps, preferredIDAtHeight) } - t.blocked.Register(ctx, v) + if err := t.blocked.Schedule(ctx, v, deps...); err != nil { + return err + } return t.executeDeferredWork(ctx) } @@ -415,14 +418,14 @@ func (t *Transitive) QueryFailed(ctx context.Context, nodeID ids.NodeID, request return t.Chits(ctx, nodeID, requestID, lastAccepted, lastAccepted, lastAccepted) } - t.blocked.Register( - ctx, - &voter{ - t: t, - vdr: nodeID, - requestID: requestID, - }, - ) + v := &voter{ + t: t, + nodeID: nodeID, + requestID: requestID, + } + if err := t.blocked.Schedule(ctx, v); err != nil { + return err + } return t.executeDeferredWork(ctx) } @@ -534,7 +537,7 @@ func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { zap.Uint32("requestID", t.requestID), zap.Stringer("polls", t.polls), zap.Reflect("outstandingBlockRequests", t.blkReqs), - zap.Stringer("blockedJobs", &t.blocked), + zap.Int("numMissingDependencies", t.blocked.NumDependencies()), zap.Int("pendingBuildBlocks", t.pendingBuildBlocks), ) @@ -560,7 +563,7 @@ func (t *Transitive) executeDeferredWork(ctx context.Context) error { t.metrics.numRequests.Set(float64(t.blkReqs.Len())) t.metrics.numBlocked.Set(float64(len(t.pending))) - t.metrics.numBlockers.Set(float64(t.blocked.Len())) + t.metrics.numBlockers.Set(float64(t.blocked.NumDependencies())) t.metrics.numNonVerifieds.Set(float64(t.nonVerifieds.Len())) return nil } @@ -646,9 +649,6 @@ func (t *Transitive) sendChits(ctx context.Context, nodeID ids.NodeID, requestID // Build blocks if they have been requested and the number of processing blocks // is less than optimal. func (t *Transitive) buildBlocks(ctx context.Context) error { - if err := t.errs.Err; err != nil { - return err - } for t.pendingBuildBlocks > 0 && t.Consensus.NumProcessing() < t.Params.OptimalProcessing { t.pendingBuildBlocks-- @@ -758,14 +758,14 @@ func (t *Transitive) issueFrom( delete(t.blkReqSourceMetric, req) } - issued := t.isDecided(blk) || t.Consensus.Processing(blkID) - if issued { - // A dependency should never be waiting on a decided or processing - // block. However, if the block was marked as rejected by the VM, the - // dependencies may still be waiting. Therefore, they should abandoned. - t.blocked.Abandon(ctx, blkID) + if !t.isDecided(blk) && !t.Consensus.Processing(blkID) { + return false, nil } - return issued, t.errs.Err + + // A dependency should never be waiting on a decided or processing block. + // However, if the block was marked as rejected by the VM, the dependencies + // may still be waiting. Therefore, they should abandoned. + return true, t.blocked.Abandon(ctx, blkID) } // issueWithAncestors attempts to issue the branch ending with [blk] to consensus. @@ -806,8 +806,7 @@ func (t *Transitive) issueWithAncestors( // We don't have this block and have no reason to expect that we will get it. // Abandon the block to avoid a memory leak. - t.blocked.Abandon(ctx, blkID) - return false, t.errs.Err + return false, t.blocked.Abandon(ctx, blkID) } // If the block has been decided, then it is marked as having been issued. @@ -843,22 +842,24 @@ func (t *Transitive) issue( t: t, nodeID: nodeID, blk: blk, - issuedMetric: issuedMetric, push: push, + issuedMetric: issuedMetric, } // block on the parent if needed - parentID := blk.Parent() + var ( + parentID = blk.Parent() + deps []ids.ID + ) if parent, err := t.getBlock(ctx, parentID); err != nil || !(t.isDecided(parent) || t.Consensus.Processing(parentID)) { t.Ctx.Log.Verbo("block waiting for parent to be issued", zap.Stringer("blkID", blkID), zap.Stringer("parentID", parentID), ) - i.deps.Add(parentID) + deps = append(deps, parentID) } - t.blocked.Register(ctx, i) - return t.errs.Err + return t.blocked.Schedule(ctx, i, deps...) } // Request that [vdr] send us block [blkID] @@ -962,20 +963,18 @@ func (t *Transitive) deliver( // If [blk] is decided, then it shouldn't be added to consensus. // Similarly, if [blkID] is already in the processing set, it shouldn't // be added to consensus again. - t.blocked.Abandon(ctx, blkID) - return t.errs.Err + return t.blocked.Abandon(ctx, blkID) } parentID := blk.Parent() parent, err := t.getBlock(ctx, parentID) // Because the dependency must have been fulfilled by the time this function // is called - we don't expect [err] to be non-nil. But it is handled for - // completness and future proofing. + // completeness and future proofing. if err != nil || !(parent.Status() == choices.Accepted || t.Consensus.Processing(parentID)) { // if the parent isn't processing or the last accepted block, then this // block is effectively rejected - t.blocked.Abandon(ctx, blkID) - return t.errs.Err + return t.blocked.Abandon(ctx, blkID) } // By ensuring that the parent is either processing or accepted, it is @@ -986,8 +985,7 @@ func (t *Transitive) deliver( return err } if !blkAdded { - t.blocked.Abandon(ctx, blkID) - return t.errs.Err + return t.blocked.Abandon(ctx, blkID) } // Add all the oracle blocks if they exist. We call verify on all the blocks @@ -1026,7 +1024,9 @@ func (t *Transitive) deliver( t.sendQuery(ctx, blkID, blk.Bytes(), push) } - t.blocked.Fulfill(ctx, blkID) + if err := t.blocked.Fulfill(ctx, blkID); err != nil { + return err + } for _, blk := range added { blkID := blk.ID() if t.Consensus.IsPreferred(blkID) { @@ -1034,7 +1034,9 @@ func (t *Transitive) deliver( } t.removeFromPending(blk) - t.blocked.Fulfill(ctx, blkID) + if err := t.blocked.Fulfill(ctx, blkID); err != nil { + return err + } if req, ok := t.blkReqs.DeleteValue(blkID); ok { delete(t.blkReqSourceMetric, req) } @@ -1042,7 +1044,9 @@ func (t *Transitive) deliver( for _, blk := range dropped { blkID := blk.ID() t.removeFromPending(blk) - t.blocked.Abandon(ctx, blkID) + if err := t.blocked.Abandon(ctx, blkID); err != nil { + return err + } if req, ok := t.blkReqs.DeleteValue(blkID); ok { delete(t.blkReqSourceMetric, req) } @@ -1052,12 +1056,12 @@ func (t *Transitive) deliver( // immediately by votes that were pending their issuance. If this is the // case, we should not be requesting any chits. if t.Consensus.NumProcessing() == 0 { - return t.errs.Err + return nil } // If we should issue multiple queries at the same time, we need to repoll t.repoll(ctx) - return t.errs.Err + return nil } // Returns true if the block whose ID is [blkID] is waiting to be issued to consensus diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 8c912df64a4a..18a1d320495e 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -170,7 +170,7 @@ func TestEngineDropsAttemptToIssueBlockAfterFailedRequest(t *testing.T) { // job blocked on [parent]'s issuance. require.NoError(engine.Put(context.Background(), peerID, 0, child.Bytes())) require.NotNil(request) - require.Len(engine.blocked, 1) + require.Equal(1, engine.blocked.NumDependencies()) vm.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { return nil, errUnknownBytes @@ -179,7 +179,7 @@ func TestEngineDropsAttemptToIssueBlockAfterFailedRequest(t *testing.T) { // Because this request doesn't provide [parent], the [child] job should be // cancelled. require.NoError(engine.Put(context.Background(), request.NodeID, request.RequestID, nil)) - require.Empty(engine.blocked) + require.Zero(engine.blocked.NumDependencies()) } func TestEngineQuery(t *testing.T) { @@ -315,7 +315,7 @@ func TestEngineQuery(t *testing.T) { require.NoError(engine.Put(context.Background(), getRequest.NodeID, getRequest.RequestID, child.Bytes())) require.Equal(choices.Accepted, parent.Status()) require.Equal(choices.Accepted, child.Status()) - require.Empty(engine.blocked) + require.Zero(engine.blocked.NumDependencies()) } func TestEngineMultipleQuery(t *testing.T) { @@ -461,7 +461,7 @@ func TestEngineMultipleQuery(t *testing.T) { require.NoError(te.Chits(context.Background(), vdr2, *queryRequestID, blk0.ID(), blk0.ID(), blk0.ID())) require.Equal(choices.Accepted, blk1.Status()) - require.Empty(te.blocked) + require.Zero(te.blocked.NumDependencies()) } func TestEngineBlockedIssue(t *testing.T) { @@ -879,12 +879,12 @@ func TestEngineAbandonChit(t *testing.T) { // Register a voter dependency on an unknown block. require.NoError(te.Chits(context.Background(), vdr, reqID, fakeBlkID, fakeBlkID, fakeBlkID)) - require.Len(te.blocked, 1) + require.Equal(1, te.blocked.NumDependencies()) sender.CantSendPullQuery = false require.NoError(te.GetFailed(context.Background(), vdr, reqID)) - require.Empty(te.blocked) + require.Zero(te.blocked.NumDependencies()) } func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { @@ -932,7 +932,7 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { // Register a voter dependency on an unknown block. require.NoError(te.Chits(context.Background(), vdr, reqID, fakeBlkID, fakeBlkID, fakeBlkID)) - require.Len(te.blocked, 1) + require.Equal(1, te.blocked.NumDependencies()) sender.CantSendPullQuery = false @@ -944,7 +944,7 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { // Respond with an unexpected block and verify that the request is correctly // cleared. require.NoError(te.Put(context.Background(), vdr, reqID, snowmantest.GenesisBytes)) - require.Empty(te.blocked) + require.Zero(te.blocked.NumDependencies()) } func TestEngineBlockingChitRequest(t *testing.T) { @@ -988,7 +988,7 @@ func TestEngineBlockingChitRequest(t *testing.T) { require.NoError(te.PushQuery(context.Background(), vdr, 0, blockingBlk.Bytes(), 0)) - require.Len(te.blocked, 2) + require.Equal(2, te.blocked.NumDependencies()) sender.CantSendPullQuery = false @@ -1000,7 +1000,7 @@ func TestEngineBlockingChitRequest(t *testing.T) { te.metrics.issued.WithLabelValues(unknownSource), )) - require.Empty(te.blocked) + require.Zero(te.blocked.NumDependencies()) } func TestEngineBlockingChitResponse(t *testing.T) { @@ -1098,7 +1098,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { missingBlk.ID(), blockingBlk.ID(), )) - require.Len(te.blocked, 2) + require.Equal(2, te.blocked.NumDependencies()) queryRequest = nil sender.SendPullQueryF = func(_ context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { @@ -2857,6 +2857,137 @@ func TestEngineVoteStallRegression(t *testing.T) { require.Equal(choices.Rejected, rejectedChain[0].Status()) } +// When a voter is registered with multiple dependencies, the engine must not +// execute the voter until all of the dependencies have been resolved; even if +// one of the dependencies has been abandoned. +func TestEngineEarlyTerminateVoterRegression(t *testing.T) { + require := require.New(t) + + config := DefaultConfig(t) + nodeID := ids.GenerateTestNodeID() + require.NoError(config.Validators.AddStaker(config.Ctx.SubnetID, nodeID, nil, ids.Empty, 1)) + + sender := &common.SenderTest{ + T: t, + SendChitsF: func(context.Context, ids.NodeID, uint32, ids.ID, ids.ID, ids.ID) {}, + } + sender.Default(true) + config.Sender = sender + + chain := snowmantest.BuildDescendants(snowmantest.Genesis, 3) + vm := &block.TestVM{ + TestVM: common.TestVM{ + T: t, + InitializeF: func( + context.Context, + *snow.Context, + database.Database, + []byte, + []byte, + []byte, + chan<- common.Message, + []*common.Fx, + common.AppSender, + ) error { + return nil + }, + SetStateF: func(context.Context, snow.State) error { + return nil + }, + }, + ParseBlockF: MakeParseBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + chain, + ), + GetBlockF: MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + ), + SetPreferenceF: func(context.Context, ids.ID) error { + return nil + }, + LastAcceptedF: MakeLastAcceptedBlockF( + snowmantest.Genesis, + chain, + ), + } + vm.Default(true) + config.VM = vm + + engine, err := New(config) + require.NoError(err) + require.NoError(engine.Start(context.Background(), 0)) + + var pollRequestIDs []uint32 + sender.SendPullQueryF = func(_ context.Context, polledNodeIDs set.Set[ids.NodeID], requestID uint32, _ ids.ID, _ uint64) { + require.Equal(set.Of(nodeID), polledNodeIDs) + pollRequestIDs = append(pollRequestIDs, requestID) + } + + getRequestIDs := make(map[ids.ID]uint32) + sender.SendGetF = func(_ context.Context, requestedNodeID ids.NodeID, requestID uint32, blkID ids.ID) { + require.Equal(nodeID, requestedNodeID) + getRequestIDs[blkID] = requestID + } + + // Issue block 0 to trigger poll 0. + require.NoError(engine.PushQuery( + context.Background(), + nodeID, + 0, + chain[0].Bytes(), + 0, + )) + require.Len(pollRequestIDs, 1) + require.Empty(getRequestIDs) + + // Update GetBlock to return, the newly issued, block 0. This is needed to + // enable the issuance of block 1. + vm.GetBlockF = MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + chain[:1], + ) + + // Vote for block 2 or block 1 in poll 0. This should trigger Get requests + // for both block 2 and block 1. + require.NoError(engine.Chits( + context.Background(), + nodeID, + pollRequestIDs[0], + chain[2].ID(), + chain[1].ID(), + snowmantest.GenesisID, + )) + require.Len(pollRequestIDs, 1) + require.Contains(getRequestIDs, chain[1].ID()) + require.Contains(getRequestIDs, chain[2].ID()) + + // Mark the request for block 2 as failed. This should not cause the poll to + // be applied as there is still an outstanding request for block 1. + require.NoError(engine.GetFailed( + context.Background(), + nodeID, + getRequestIDs[chain[2].ID()], + )) + require.Len(pollRequestIDs, 1) + + // Issue block 1. This should cause the poll to be applied to both block 0 + // and block 1. + require.NoError(engine.Put( + context.Background(), + nodeID, + getRequestIDs[chain[1].ID()], + chain[1].Bytes(), + )) + // Because Put added a new preferred block to the chain, a new poll will be + // created. + require.Len(pollRequestIDs, 2) + require.Equal(choices.Accepted, chain[0].Status()) + require.Equal(choices.Accepted, chain[1].Status()) + // Block 2 still hasn't been issued, so it's status should remain + // Processing. + require.Equal(choices.Processing, chain[2].Status()) +} + func TestGetProcessingAncestor(t *testing.T) { var ( ctx = snowtest.ConsensusContext( diff --git a/snow/engine/snowman/voter.go b/snow/engine/snowman/voter.go index f987faf2aac8..c57a1c733551 100644 --- a/snow/engine/snowman/voter.go +++ b/snow/engine/snowman/voter.go @@ -9,39 +9,25 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/snowman/job" "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/set" ) -// Voter records chits received from [vdr] once its dependencies are met. +var _ job.Job[ids.ID] = (*voter)(nil) + +// Voter records chits received from [nodeID] once its dependencies are met. type voter struct { t *Transitive - vdr ids.NodeID + nodeID ids.NodeID requestID uint32 responseOptions []ids.ID - deps set.Set[ids.ID] -} - -func (v *voter) Dependencies() set.Set[ids.ID] { - return v.deps } -// Mark that a dependency has been met. -func (v *voter) Fulfill(ctx context.Context, id ids.ID) { - v.deps.Remove(id) - v.Update(ctx) -} - -// Abandon this attempt to record chits. -func (v *voter) Abandon(ctx context.Context, id ids.ID) { - v.Fulfill(ctx, id) -} - -func (v *voter) Update(ctx context.Context) { - if v.deps.Len() != 0 || v.t.errs.Errored() { - return - } - +// The resolution results from the dependencies of the voter aren't explicitly +// used. The responseOptions are used to determine which block to apply the vote +// to. The dependencies are only used to optimistically delay the application of +// the vote until the blocks have been issued. +func (v *voter) Execute(ctx context.Context, _ []ids.ID, _ []ids.ID) error { var ( vote ids.ID shouldVote bool @@ -60,13 +46,13 @@ func (v *voter) Update(ctx context.Context) { var results []bag.Bag[ids.ID] if shouldVote { v.t.selectedVoteIndex.Observe(float64(voteIndex)) - results = v.t.polls.Vote(v.requestID, v.vdr, vote) + results = v.t.polls.Vote(v.requestID, v.nodeID, vote) } else { - results = v.t.polls.Drop(v.requestID, v.vdr) + results = v.t.polls.Drop(v.requestID, v.nodeID) } if len(results) == 0 { - return + return nil } for _, result := range results { @@ -75,24 +61,20 @@ func (v *voter) Update(ctx context.Context) { zap.Stringer("result", &result), ) if err := v.t.Consensus.RecordPoll(ctx, result); err != nil { - v.t.errs.Add(err) + return err } } - if v.t.errs.Errored() { - return - } - if err := v.t.VM.SetPreference(ctx, v.t.Consensus.Preference()); err != nil { - v.t.errs.Add(err) - return + return err } if v.t.Consensus.NumProcessing() == 0 { v.t.Ctx.Log.Debug("Snowman engine can quiesce") - return + return nil } v.t.Ctx.Log.Debug("Snowman engine can't quiesce") v.t.repoll(ctx) + return nil } diff --git a/snow/event/blockable.go b/snow/event/blockable.go deleted file mode 100644 index 404e95c2aee3..000000000000 --- a/snow/event/blockable.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package event - -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" -) - -// Blockable defines what an object must implement to be able to block on -// dependent events being completed. -type Blockable interface { - // IDs that this object is blocking on - Dependencies() set.Set[ids.ID] - // Notify this object that an event has been fulfilled - Fulfill(context.Context, ids.ID) - // Notify this object that an event has been abandoned - Abandon(context.Context, ids.ID) - // Update the state of this object without changing the status of any events - Update(context.Context) -} diff --git a/snow/event/blocker.go b/snow/event/blocker.go deleted file mode 100644 index 9c15ffb50604..000000000000 --- a/snow/event/blocker.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package event - -import ( - "context" - "fmt" - "strings" - - "github.com/ava-labs/avalanchego/ids" -) - -const ( - minBlockerSize = 16 -) - -// Blocker tracks Blockable events. -// Blocker is used to track events that require their dependencies to be -// fulfilled before them. Once a Blockable event is registered, it will be -// notified once any of its dependencies are fulfilled or abandoned. -type Blocker map[ids.ID][]Blockable - -func (b *Blocker) init() { - if *b == nil { - *b = make(map[ids.ID][]Blockable, minBlockerSize) - } -} - -// Returns the number of items that have dependencies waiting on -// them to be fulfilled -func (b *Blocker) Len() int { - return len(*b) -} - -// Fulfill notifies all objects blocking on the event whose ID is that -// the event has happened -func (b *Blocker) Fulfill(ctx context.Context, id ids.ID) { - b.init() - - blocking := (*b)[id] - delete(*b, id) - - for _, pending := range blocking { - pending.Fulfill(ctx, id) - } -} - -// Abandon notifies all objects blocking on the event whose ID is that -// the event has been abandoned -func (b *Blocker) Abandon(ctx context.Context, id ids.ID) { - b.init() - - blocking := (*b)[id] - delete(*b, id) - - for _, pending := range blocking { - pending.Abandon(ctx, id) - } -} - -// Register a new Blockable and its dependencies -func (b *Blocker) Register(ctx context.Context, pending Blockable) { - b.init() - - for pendingID := range pending.Dependencies() { - (*b)[pendingID] = append((*b)[pendingID], pending) - } - - pending.Update(ctx) -} - -// PrefixedString returns the same value as the String function, with all the -// new lines prefixed by [prefix] -func (b *Blocker) PrefixedString(prefix string) string { - b.init() - - sb := strings.Builder{} - sb.WriteString(fmt.Sprintf("Blocking on %d IDs:", len(*b))) - for key, value := range *b { - sb.WriteString(fmt.Sprintf("\n%sID[%s]: %d", - prefix, - key, - len(value), - )) - } - return strings.TrimSuffix(sb.String(), "\n") -} - -func (b *Blocker) String() string { - return b.PrefixedString("") -} diff --git a/snow/event/blocker_test.go b/snow/event/blocker_test.go deleted file mode 100644 index d7620bfebe1a..000000000000 --- a/snow/event/blocker_test.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package event - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" -) - -func TestBlocker(t *testing.T) { - require := require.New(t) - - b := Blocker(nil) - - a := newTestBlockable() - - id0 := ids.GenerateTestID() - id1 := ids.GenerateTestID() - id2 := ids.GenerateTestID() - - calledDep := new(bool) - a.dependencies = func() set.Set[ids.ID] { - *calledDep = true - - s := set.Of(id0, id1) - return s - } - calledFill := new(bool) - a.fulfill = func(context.Context, ids.ID) { - *calledFill = true - } - calledAbandon := new(bool) - a.abandon = func(context.Context, ids.ID) { - *calledAbandon = true - } - calledUpdate := new(bool) - a.update = func(context.Context) { - *calledUpdate = true - } - - b.Register(context.Background(), a) - - require.True(*calledDep) - require.False(*calledFill) - require.False(*calledAbandon) - require.True(*calledUpdate) - - b.Fulfill(context.Background(), id2) - b.Abandon(context.Background(), id2) - - require.True(*calledDep) - require.False(*calledFill) - require.False(*calledAbandon) - require.True(*calledUpdate) - - b.Fulfill(context.Background(), id0) - - require.True(*calledDep) - require.True(*calledFill) - require.False(*calledAbandon) - require.True(*calledUpdate) - - b.Abandon(context.Background(), id0) - - require.True(*calledDep) - require.True(*calledFill) - require.False(*calledAbandon) - require.True(*calledUpdate) - - b.Abandon(context.Background(), id1) - - require.True(*calledDep) - require.True(*calledFill) - require.True(*calledAbandon) - require.True(*calledUpdate) -} - -type testBlockable struct { - dependencies func() set.Set[ids.ID] - fulfill func(context.Context, ids.ID) - abandon func(context.Context, ids.ID) - update func(context.Context) -} - -func newTestBlockable() *testBlockable { - return &testBlockable{ - dependencies: func() set.Set[ids.ID] { - return set.Set[ids.ID]{} - }, - fulfill: func(context.Context, ids.ID) {}, - abandon: func(context.Context, ids.ID) {}, - update: func(context.Context) {}, - } -} - -func (b *testBlockable) Dependencies() set.Set[ids.ID] { - return b.dependencies() -} - -func (b *testBlockable) Fulfill(ctx context.Context, id ids.ID) { - b.fulfill(ctx, id) -} - -func (b *testBlockable) Abandon(ctx context.Context, id ids.ID) { - b.abandon(ctx, id) -} - -func (b *testBlockable) Update(ctx context.Context) { - b.update(ctx) -} From 28b4790cbcc00584ec4081cd09208825ef46d3ae Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 19 Jun 2024 16:27:57 -0400 Subject: [PATCH 071/102] Remove block lookup from `deliver` (#3130) --- snow/engine/snowman/transitive.go | 32 +++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index 680cb9c5e09c..cd252980e34f 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -958,22 +958,15 @@ func (t *Transitive) deliver( // longer pending t.removeFromPending(blk) - blkID := blk.ID() - if t.isDecided(blk) || t.Consensus.Processing(blkID) { - // If [blk] is decided, then it shouldn't be added to consensus. - // Similarly, if [blkID] is already in the processing set, it shouldn't - // be added to consensus again. - return t.blocked.Abandon(ctx, blkID) - } - - parentID := blk.Parent() - parent, err := t.getBlock(ctx, parentID) - // Because the dependency must have been fulfilled by the time this function - // is called - we don't expect [err] to be non-nil. But it is handled for - // completeness and future proofing. - if err != nil || !(parent.Status() == choices.Accepted || t.Consensus.Processing(parentID)) { - // if the parent isn't processing or the last accepted block, then this - // block is effectively rejected + var ( + parentID = blk.Parent() + blkID = blk.ID() + ) + if !t.canIssueChildOn(parentID) || t.Consensus.Processing(blkID) { + // If the parent isn't processing or the last accepted block, then this + // block is effectively rejected. + // Additionally, if [blkID] is already in the processing set, it + // shouldn't be added to consensus again. return t.blocked.Abandon(ctx, blkID) } @@ -1183,6 +1176,13 @@ func (t *Transitive) getProcessingAncestor(ctx context.Context, initialVote ids. } } +// canIssueChildOn reports true if it is valid for a child of parentID to be +// verified and added to consensus. +func (t *Transitive) canIssueChildOn(parentID ids.ID) bool { + lastAcceptedID, _ := t.Consensus.LastAccepted() + return parentID == lastAcceptedID || t.Consensus.Processing(parentID) +} + // isDecided reports true if the provided block's status is Accepted, Rejected, // or if the block's height implies that the block is either Accepted or // Rejected. From f8fa3cfd59ed7f52f1dc8baddd366457db453eda Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu, 20 Jun 2024 09:27:43 -0700 Subject: [PATCH 072/102] [chains/atomic] Remove a nested if statement (#3135) Signed-off-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> --- chains/atomic/state.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/chains/atomic/state.go b/chains/atomic/state.go index 1eed23803fb7..b134d2f7a5d8 100644 --- a/chains/atomic/state.go +++ b/chains/atomic/state.go @@ -147,12 +147,7 @@ func (s *state) SetValue(e *Element) error { // current engine state. func (s *state) RemoveValue(key []byte) error { value, err := s.loadValue(key) - if err != nil { - if err != database.ErrNotFound { - // An unexpected error occurred, so we should propagate that error - return err - } - + if err == database.ErrNotFound { // The value doesn't exist, so we should optimistically delete it dbElem := dbElement{Present: false} valueBytes, err := Codec.Marshal(CodecVersion, &dbElem) @@ -161,6 +156,9 @@ func (s *state) RemoveValue(key []byte) error { } return s.valueDB.Put(key, valueBytes) } + if err != nil { + return err + } // Don't allow the removal of something that was already removed. if !value.Present { From a0741de1c1d1fd89c959f4f8ed84fb34594e0cdf Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu, 20 Jun 2024 09:48:16 -0700 Subject: [PATCH 073/102] [vms/platformvm] Minor grammer fixes in `state` struct code comments (#3136) --- vms/platformvm/state/state.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 295e67c52ff3..2c090422e06d 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -288,11 +288,11 @@ type state struct { currentHeight uint64 addedBlockIDs map[uint64]ids.ID // map of height -> blockID - blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID. If the entry is ids.Empty, it is not in the database + blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID; if the entry is ids.Empty, it is not in the database blockIDDB database.Database addedBlocks map[ids.ID]block.Block // map of blockID -> Block - blockCache cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database + blockCache cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block; if the entry is nil, it is not in the database blockDB database.Database validatorsDB database.Database @@ -319,14 +319,14 @@ type state struct { validatorPublicKeyDiffsDB database.Database addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} - txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}. If the entry is nil, it isn't in the database + txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}; if the entry is nil, it is not in the database txDB database.Database addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO rewardUTXODB database.Database - modifiedUTXOs map[ids.ID]*avax.UTXO // map of modified UTXOID -> *UTXO if the UTXO is nil, it has been removed + modifiedUTXOs map[ids.ID]*avax.UTXO // map of modified UTXOID -> *UTXO; if the UTXO is nil, it has been removed utxoDB database.Database utxoState avax.UTXOState @@ -335,17 +335,16 @@ type state struct { subnetBaseDB database.Database subnetDB linkeddb.LinkedDB - // Subnet ID --> Owner of the subnet - subnetOwners map[ids.ID]fx.Owner - subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner if the entry is nil, it is not in the database + subnetOwners map[ids.ID]fx.Owner // map of subnetID -> owner + subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner; if the entry is nil, it is not in the database subnetOwnerDB database.Database transformedSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx - transformedSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx if the entry is nil, it is not in the database + transformedSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx; if the entry is nil, it is not in the database transformedSubnetDB database.Database modifiedSupplies map[ids.ID]uint64 // map of subnetID -> current supply - supplyCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply if the entry is nil, it is not in the database + supplyCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply; if the entry is nil, it is not in the database supplyDB database.Database addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet From d0c209421a98f69d37fbef1659705f20bdbcf02c Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Thu, 20 Jun 2024 18:22:06 -0700 Subject: [PATCH 074/102] bump protobuf (fixes some build issues) (#3142) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 34d9c6cfacde..cf4ca86ee324 100644 --- a/go.mod +++ b/go.mod @@ -65,7 +65,7 @@ require ( gonum.org/v1/gonum v0.11.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 google.golang.org/grpc v1.62.0 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -108,7 +108,7 @@ require ( github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect diff --git a/go.sum b/go.sum index 79f7b0d77e36..f969c7512b4f 100644 --- a/go.sum +++ b/go.sum @@ -269,8 +269,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -1027,8 +1027,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From cafd71c8c3f433faf92c207164e4a1e8548ce1e9 Mon Sep 17 00:00:00 2001 From: marun Date: Fri, 21 Jun 2024 20:26:01 +0200 Subject: [PATCH 075/102] Emit version in JSON format for --json-version (#3129) --- config/flags.go | 1 + config/keys.go | 1 + main/main.go | 19 ++++++++++++++- version/string.go | 53 +++++++++++++++++++++++------------------- version/string_test.go | 23 ++++++++++++++++++ 5 files changed, 72 insertions(+), 25 deletions(-) create mode 100644 version/string_test.go diff --git a/config/flags.go b/config/flags.go index 661eb9e84e66..3fb99e5f03e3 100644 --- a/config/flags.go +++ b/config/flags.go @@ -69,6 +69,7 @@ func deprecateFlags(fs *pflag.FlagSet) error { func addProcessFlags(fs *pflag.FlagSet) { // If true, print the version and quit. fs.Bool(VersionKey, false, "If true, print version and quit") + fs.Bool(VersionJSONKey, false, "If true, print version in JSON format and quit") } func addNodeFlags(fs *pflag.FlagSet) { diff --git a/config/keys.go b/config/keys.go index 25348ae54c8d..289c83170614 100644 --- a/config/keys.go +++ b/config/keys.go @@ -13,6 +13,7 @@ const ( ConfigContentKey = "config-file-content" ConfigContentTypeKey = "config-file-content-type" VersionKey = "version" + VersionJSONKey = "version-json" GenesisFileKey = "genesis-file" GenesisFileContentKey = "genesis-file-content" NetworkNameKey = "network-id" diff --git a/main/main.go b/main/main.go index 5651bf61940f..88de72dadbbe 100644 --- a/main/main.go +++ b/main/main.go @@ -4,6 +4,7 @@ package main import ( + "encoding/json" "errors" "fmt" "os" @@ -29,8 +30,24 @@ func main() { os.Exit(1) } + if v.GetBool(config.VersionJSONKey) && v.GetBool(config.VersionKey) { + fmt.Println("can't print both JSON and human readable versions") + os.Exit(1) + } + + if v.GetBool(config.VersionJSONKey) { + versions := version.GetVersions() + jsonBytes, err := json.MarshalIndent(versions, "", " ") + if err != nil { + fmt.Printf("couldn't marshal versions: %s\n", err) + os.Exit(1) + } + fmt.Println(string(jsonBytes)) + os.Exit(0) + } + if v.GetBool(config.VersionKey) { - fmt.Print(version.String) + fmt.Println(version.GetVersions().String()) os.Exit(0) } diff --git a/version/string.go b/version/string.go index 9abe555bcebb..80df9bea697a 100644 --- a/version/string.go +++ b/version/string.go @@ -9,32 +9,37 @@ import ( "strings" ) -var ( - // String is displayed when CLI arg --version is used - String string +// GitCommit is set in the build script at compile time +var GitCommit string - // GitCommit is set in the build script at compile time - GitCommit string -) +// Versions contains the versions relevant to a build of avalanchego. In +// addition to supporting construction of the string displayed by +// --version, it is used to produce the output of --version-json and can +// be used to unmarshal that output. +type Versions struct { + Application string `json:"application"` + Database string `json:"database"` + RPCChainVM uint64 `json:"rpcchainvm"` + // Commit may be empty if GitCommit was not set at compile time + Commit string `json:"commit"` + Go string `json:"go"` +} -func init() { - format := "%s [database=%s, rpcchainvm=%d" - args := []interface{}{ - CurrentApp, - CurrentDatabase, - RPCChainVMProtocol, - } - if GitCommit != "" { - format += ", commit=%s" - args = append(args, GitCommit) +func GetVersions() *Versions { + return &Versions{ + Application: CurrentApp.String(), + Database: CurrentDatabase.String(), + RPCChainVM: uint64(RPCChainVMProtocol), + Commit: GitCommit, + Go: strings.TrimPrefix(runtime.Version(), "go"), } +} - // add golang version - goVersion := runtime.Version() - goVersionNumber := strings.TrimPrefix(goVersion, "go") - format += ", go=%s" - args = append(args, goVersionNumber) - - format += "]\n" - String = fmt.Sprintf(format, args...) +func (v *Versions) String() string { + // This format maintains consistency with previous --version output + versionString := fmt.Sprintf("%s [database=%s, rpcchainvm=%d, ", v.Application, v.Database, v.RPCChainVM) + if len(v.Commit) > 0 { + versionString += fmt.Sprintf("commit=%s, ", v.Commit) + } + return versionString + fmt.Sprintf("go=%s]", v.Go) } diff --git a/version/string_test.go b/version/string_test.go new file mode 100644 index 000000000000..58f44668b3e0 --- /dev/null +++ b/version/string_test.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package version + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestVersionsGetString(t *testing.T) { + versions := Versions{ + Application: "1", + Database: "2", + RPCChainVM: 3, + Commit: "4", + Go: "5", + } + require.Equal(t, "1 [database=2, rpcchainvm=3, commit=4, go=5]", versions.String()) + versions.Commit = "" + require.Equal(t, "1 [database=2, rpcchainvm=3, go=5]", versions.String()) +} From 6eef08fe19e657e930d3824bdce87e315f4a4f1a Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 21 Jun 2024 20:27:30 +0200 Subject: [PATCH 076/102] Repackaged NextBlockTime and GetNextStakerChangeTime (#3134) --- vms/platformvm/block/builder/builder.go | 4 +- vms/platformvm/block/executor/manager.go | 2 +- .../block/executor/proposal_block_test.go | 2 +- vms/platformvm/block/executor/verifier.go | 2 +- vms/platformvm/state/chain_time_helpers.go | 70 +++++++++++++++++++ .../txs/executor/proposal_tx_executor.go | 2 +- .../staker_tx_verification_helpers.go | 34 --------- vms/platformvm/txs/executor/state_changes.go | 25 ------- 8 files changed, 76 insertions(+), 65 deletions(-) create mode 100644 vms/platformvm/state/chain_time_helpers.go diff --git a/vms/platformvm/block/builder/builder.go b/vms/platformvm/block/builder/builder.go index 1a7f2f0556e6..a445bb52cb1b 100644 --- a/vms/platformvm/block/builder/builder.go +++ b/vms/platformvm/block/builder/builder.go @@ -171,7 +171,7 @@ func (b *builder) durationToSleep() (time.Duration, error) { return 0, fmt.Errorf("%w: %s", errMissingPreferredState, preferredID) } - nextStakerChangeTime, err := txexecutor.GetNextStakerChangeTime(preferredState) + nextStakerChangeTime, err := state.GetNextStakerChangeTime(preferredState) if err != nil { return 0, fmt.Errorf("%w of %s: %w", errCalculatingNextStakerTime, preferredID, err) } @@ -216,7 +216,7 @@ func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { return nil, fmt.Errorf("%w: %s", state.ErrMissingParentState, preferredID) } - timestamp, timeWasCapped, err := txexecutor.NextBlockTime(preferredState, b.txExecutorBackend.Clk) + timestamp, timeWasCapped, err := state.NextBlockTime(preferredState, b.txExecutorBackend.Clk) if err != nil { return nil, fmt.Errorf("could not calculate next staker change time: %w", err) } diff --git a/vms/platformvm/block/executor/manager.go b/vms/platformvm/block/executor/manager.go index ecc07f579815..80e3e4503139 100644 --- a/vms/platformvm/block/executor/manager.go +++ b/vms/platformvm/block/executor/manager.go @@ -132,7 +132,7 @@ func (m *manager) VerifyTx(tx *txs.Tx) error { return err } - nextBlkTime, _, err := executor.NextBlockTime(stateDiff, m.txExecutorBackend.Clk) + nextBlkTime, _, err := state.NextBlockTime(stateDiff, m.txExecutorBackend.Clk) if err != nil { return err } diff --git a/vms/platformvm/block/executor/proposal_block_test.go b/vms/platformvm/block/executor/proposal_block_test.go index 3a3f45fd7c0d..f0037754d06a 100644 --- a/vms/platformvm/block/executor/proposal_block_test.go +++ b/vms/platformvm/block/executor/proposal_block_test.go @@ -1436,7 +1436,7 @@ func TestAddValidatorProposalBlock(t *testing.T) { // Advance time until next staker change time is [validatorEndTime] for { - nextStakerChangeTime, err := executor.GetNextStakerChangeTime(env.state) + nextStakerChangeTime, err := state.GetNextStakerChangeTime(env.state) require.NoError(err) if nextStakerChangeTime.Equal(validatorEndTime) { break diff --git a/vms/platformvm/block/executor/verifier.go b/vms/platformvm/block/executor/verifier.go index c56abc45e3c1..3feccf4e4c96 100644 --- a/vms/platformvm/block/executor/verifier.go +++ b/vms/platformvm/block/executor/verifier.go @@ -266,7 +266,7 @@ func (v *verifier) banffNonOptionBlock(b block.BanffBlock) error { ) } - nextStakerChangeTime, err := executor.GetNextStakerChangeTime(parentState) + nextStakerChangeTime, err := state.GetNextStakerChangeTime(parentState) if err != nil { return fmt.Errorf("could not verify block timestamp: %w", err) } diff --git a/vms/platformvm/state/chain_time_helpers.go b/vms/platformvm/state/chain_time_helpers.go new file mode 100644 index 000000000000..036eb168d73d --- /dev/null +++ b/vms/platformvm/state/chain_time_helpers.go @@ -0,0 +1,70 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "fmt" + "time" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils/timer/mockable" +) + +func NextBlockTime(state Chain, clk *mockable.Clock) (time.Time, bool, error) { + var ( + timestamp = clk.Time() + parentTime = state.GetTimestamp() + ) + if parentTime.After(timestamp) { + timestamp = parentTime + } + // [timestamp] = max(now, parentTime) + + nextStakerChangeTime, err := GetNextStakerChangeTime(state) + if err != nil { + return time.Time{}, false, fmt.Errorf("failed getting next staker change time: %w", err) + } + + // timeWasCapped means that [timestamp] was reduced to [nextStakerChangeTime] + timeWasCapped := !timestamp.Before(nextStakerChangeTime) + if timeWasCapped { + timestamp = nextStakerChangeTime + } + // [timestamp] = min(max(now, parentTime), nextStakerChangeTime) + return timestamp, timeWasCapped, nil +} + +// GetNextStakerChangeTime returns the next time a staker will be either added +// or removed to/from the current validator set. +func GetNextStakerChangeTime(state Chain) (time.Time, error) { + currentStakerIterator, err := state.GetCurrentStakerIterator() + if err != nil { + return time.Time{}, err + } + defer currentStakerIterator.Release() + + pendingStakerIterator, err := state.GetPendingStakerIterator() + if err != nil { + return time.Time{}, err + } + defer pendingStakerIterator.Release() + + hasCurrentStaker := currentStakerIterator.Next() + hasPendingStaker := pendingStakerIterator.Next() + switch { + case hasCurrentStaker && hasPendingStaker: + nextCurrentTime := currentStakerIterator.Value().NextTime + nextPendingTime := pendingStakerIterator.Value().NextTime + if nextCurrentTime.Before(nextPendingTime) { + return nextCurrentTime, nil + } + return nextPendingTime, nil + case hasCurrentStaker: + return currentStakerIterator.Value().NextTime, nil + case hasPendingStaker: + return pendingStakerIterator.Value().NextTime, nil + default: + return time.Time{}, database.ErrNotFound + } +} diff --git a/vms/platformvm/txs/executor/proposal_tx_executor.go b/vms/platformvm/txs/executor/proposal_tx_executor.go index 294e01486a17..c54b8207fb06 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor.go @@ -269,7 +269,7 @@ func (e *ProposalTxExecutor) AdvanceTimeTx(tx *txs.AdvanceTimeTx) error { // Only allow timestamp to move forward as far as the time of next staker // set change time - nextStakerChangeTime, err := GetNextStakerChangeTime(e.OnCommitState) + nextStakerChangeTime, err := state.GetNextStakerChangeTime(e.OnCommitState) if err != nil { return err } diff --git a/vms/platformvm/txs/executor/staker_tx_verification_helpers.go b/vms/platformvm/txs/executor/staker_tx_verification_helpers.go index 3a74cea28696..eb18c6609299 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification_helpers.go +++ b/vms/platformvm/txs/executor/staker_tx_verification_helpers.go @@ -94,40 +94,6 @@ func getDelegatorRules( }, nil } -// GetNextStakerChangeTime returns the next time a staker will be either added -// or removed to/from the current validator set. -func GetNextStakerChangeTime(state state.Chain) (time.Time, error) { - currentStakerIterator, err := state.GetCurrentStakerIterator() - if err != nil { - return time.Time{}, err - } - defer currentStakerIterator.Release() - - pendingStakerIterator, err := state.GetPendingStakerIterator() - if err != nil { - return time.Time{}, err - } - defer pendingStakerIterator.Release() - - hasCurrentStaker := currentStakerIterator.Next() - hasPendingStaker := pendingStakerIterator.Next() - switch { - case hasCurrentStaker && hasPendingStaker: - nextCurrentTime := currentStakerIterator.Value().NextTime - nextPendingTime := pendingStakerIterator.Value().NextTime - if nextCurrentTime.Before(nextPendingTime) { - return nextCurrentTime, nil - } - return nextPendingTime, nil - case hasCurrentStaker: - return currentStakerIterator.Value().NextTime, nil - case hasPendingStaker: - return pendingStakerIterator.Value().NextTime, nil - default: - return time.Time{}, database.ErrNotFound - } -} - // GetValidator returns information about the given validator, which may be a // current validator or pending validator. func GetValidator(state state.Chain, subnetID ids.ID, nodeID ids.NodeID) (*state.Staker, error) { diff --git a/vms/platformvm/txs/executor/state_changes.go b/vms/platformvm/txs/executor/state_changes.go index 36981b095e8c..3086358304a3 100644 --- a/vms/platformvm/txs/executor/state_changes.go +++ b/vms/platformvm/txs/executor/state_changes.go @@ -10,7 +10,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" @@ -58,30 +57,6 @@ func VerifyNewChainTime( return nil } -func NextBlockTime(state state.Chain, clk *mockable.Clock) (time.Time, bool, error) { - var ( - timestamp = clk.Time() - parentTime = state.GetTimestamp() - ) - if parentTime.After(timestamp) { - timestamp = parentTime - } - // [timestamp] = max(now, parentTime) - - nextStakerChangeTime, err := GetNextStakerChangeTime(state) - if err != nil { - return time.Time{}, false, fmt.Errorf("failed getting next staker change time: %w", err) - } - - // timeWasCapped means that [timestamp] was reduced to [nextStakerChangeTime] - timeWasCapped := !timestamp.Before(nextStakerChangeTime) - if timeWasCapped { - timestamp = nextStakerChangeTime - } - // [timestamp] = min(max(now, parentTime), nextStakerChangeTime) - return timestamp, timeWasCapped, nil -} - // AdvanceTimeTo applies all state changes to [parentState] resulting from // advancing the chain time to [newChainTime]. // Returns true iff the validator set changed. From 1a9bc457d8c7a2c9b1336fdac67e9103898a696a Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon, 24 Jun 2024 05:21:18 -0700 Subject: [PATCH 077/102] [vms/platformvm] Cleanup execution config tests (#3137) --- .../config/execution_config_test.go | 117 ++++-------------- 1 file changed, 27 insertions(+), 90 deletions(-) diff --git a/vms/platformvm/config/execution_config_test.go b/vms/platformvm/config/execution_config_test.go index 4c4f4ba9dc96..f3fe8e4c6082 100644 --- a/vms/platformvm/config/execution_config_test.go +++ b/vms/platformvm/config/execution_config_test.go @@ -4,6 +4,8 @@ package config import ( + "encoding/json" + "reflect" "testing" "time" @@ -12,6 +14,23 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/network" ) +// Requires all values in a struct to be initialized +func verifyInitializedStruct(tb testing.TB, s interface{}) { + tb.Helper() + + require := require.New(tb) + + structType := reflect.TypeOf(s) + require.Equal(reflect.Struct, structType.Kind()) + + v := reflect.ValueOf(s) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + require.True(field.IsValid(), "invalid field: ", structType.Field(i).Name) + require.False(field.IsZero(), "zero field: ", structType.Field(i).Name) + } +} + func TestExecutionConfigUnmarshal(t *testing.T) { t.Run("default values from empty json", func(t *testing.T) { require := require.New(t) @@ -41,39 +60,7 @@ func TestExecutionConfigUnmarshal(t *testing.T) { t.Run("all values extracted from json", func(t *testing.T) { require := require.New(t) - b := []byte(`{ - "network": { - "max-validator-set-staleness": 1, - "target-gossip-size": 2, - "push-gossip-percent-stake": 0.3, - "push-gossip-num-validators": 4, - "push-gossip-num-peers": 5, - "push-regossip-num-validators": 6, - "push-regossip-num-peers": 7, - "push-gossip-discarded-cache-size": 8, - "push-gossip-max-regossip-frequency": 9, - "push-gossip-frequency": 10, - "pull-gossip-poll-size": 11, - "pull-gossip-frequency": 12, - "pull-gossip-throttling-period": 13, - "pull-gossip-throttling-limit": 14, - "expected-bloom-filter-elements": 15, - "expected-bloom-filter-false-positive-probability": 16, - "max-bloom-filter-false-positive-probability": 17 - }, - "block-cache-size": 1, - "tx-cache-size": 2, - "transformed-subnet-tx-cache-size": 3, - "reward-utxos-cache-size": 5, - "chain-cache-size": 6, - "chain-db-cache-size": 7, - "block-id-cache-size": 8, - "fx-owner-cache-size": 9, - "checksums-enabled": true, - "mempool-prune-frequency": 60000000000 - }`) - ec, err := GetExecutionConfig(b) - require.NoError(err) + expected := &ExecutionConfig{ Network: network.Config{ MaxValidatorSetStaleness: 1, @@ -105,64 +92,14 @@ func TestExecutionConfigUnmarshal(t *testing.T) { ChecksumsEnabled: true, MempoolPruneFrequency: time.Minute, } - require.Equal(expected, ec) - }) + verifyInitializedStruct(t, *expected) + verifyInitializedStruct(t, expected.Network) - t.Run("default values applied correctly", func(t *testing.T) { - require := require.New(t) - b := []byte(`{ - "network": { - "max-validator-set-staleness": 1, - "target-gossip-size": 2, - "push-gossip-discarded-cache-size": 1024, - "push-gossip-max-regossip-frequency": 10000000000, - "pull-gossip-poll-size": 3, - "pull-gossip-frequency": 4, - "pull-gossip-throttling-period": 5 - }, - "block-cache-size": 1, - "tx-cache-size": 2, - "transformed-subnet-tx-cache-size": 3, - "reward-utxos-cache-size": 5, - "chain-cache-size": 6, - "chain-db-cache-size": 7, - "block-id-cache-size": 8, - "fx-owner-cache-size": 9, - "checksums-enabled": true - }`) - ec, err := GetExecutionConfig(b) + b, err := json.Marshal(expected) require.NoError(err) - expected := &ExecutionConfig{ - Network: network.Config{ - MaxValidatorSetStaleness: 1, - TargetGossipSize: 2, - PushGossipPercentStake: DefaultExecutionConfig.Network.PushGossipPercentStake, - PushGossipNumValidators: DefaultExecutionConfig.Network.PushGossipNumValidators, - PushGossipNumPeers: DefaultExecutionConfig.Network.PushGossipNumPeers, - PushRegossipNumValidators: DefaultExecutionConfig.Network.PushRegossipNumValidators, - PushRegossipNumPeers: DefaultExecutionConfig.Network.PushRegossipNumPeers, - PushGossipDiscardedCacheSize: 1024, - PushGossipMaxRegossipFrequency: 10 * time.Second, - PushGossipFrequency: DefaultExecutionConfig.Network.PushGossipFrequency, - PullGossipPollSize: 3, - PullGossipFrequency: 4, - PullGossipThrottlingPeriod: 5, - PullGossipThrottlingLimit: DefaultExecutionConfig.Network.PullGossipThrottlingLimit, - ExpectedBloomFilterElements: DefaultExecutionConfig.Network.ExpectedBloomFilterElements, - ExpectedBloomFilterFalsePositiveProbability: DefaultExecutionConfig.Network.ExpectedBloomFilterFalsePositiveProbability, - MaxBloomFilterFalsePositiveProbability: DefaultExecutionConfig.Network.MaxBloomFilterFalsePositiveProbability, - }, - BlockCacheSize: 1, - TxCacheSize: 2, - TransformedSubnetTxCacheSize: 3, - RewardUTXOsCacheSize: 5, - ChainCacheSize: 6, - ChainDBCacheSize: 7, - BlockIDCacheSize: 8, - FxOwnerCacheSize: 9, - ChecksumsEnabled: true, - MempoolPruneFrequency: 30 * time.Minute, - } - require.Equal(expected, ec) + + actual, err := GetExecutionConfig(b) + require.NoError(err) + require.Equal(expected, actual) }) } From 318da000780e3c165eb0c01b05da315c19309971 Mon Sep 17 00:00:00 2001 From: marun Date: Tue, 25 Jun 2024 17:35:11 +0200 Subject: [PATCH 078/102] [tmpnet] Enable bootstrap of subnets with disjoint validator sets (#3138) Co-authored-by: Alberto Benegiamo --- tests/e2e/vms/xsvm.go | 53 +++++++++++++++++++++++++-------- tests/fixture/tmpnet/network.go | 6 ++-- tests/fixture/tmpnet/subnet.go | 4 +-- 3 files changed, 45 insertions(+), 18 deletions(-) diff --git a/tests/e2e/vms/xsvm.go b/tests/e2e/vms/xsvm.go index 71eeb7936c43..5d3557acd405 100644 --- a/tests/e2e/vms/xsvm.go +++ b/tests/e2e/vms/xsvm.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/tests/fixture/subnet" @@ -35,9 +36,17 @@ func XSVMSubnetsOrPanic(nodes ...*tmpnet.Node) []*tmpnet.Subnet { if err != nil { panic(err) } + subnetANodes := nodes + subnetBNodes := nodes + if len(nodes) > 1 { + // Validate tmpnet bootstrap of a disjoint validator set + midpoint := len(nodes) / 2 + subnetANodes = nodes[:midpoint] + subnetBNodes = nodes[midpoint:] + } return []*tmpnet.Subnet{ - subnet.NewXSVMOrPanic(subnetAName, key, nodes...), - subnet.NewXSVMOrPanic(subnetBName, key, nodes...), + subnet.NewXSVMOrPanic(subnetAName, key, subnetANodes...), + subnet.NewXSVMOrPanic(subnetBName, key, subnetBNodes...), } } @@ -55,14 +64,21 @@ var _ = ginkgo.Describe("[XSVM]", func() { sourceChain := sourceSubnet.Chains[0] destinationChain := destinationSubnet.Chains[0] - apiNode := network.Nodes[0] - tests.Outf(" issuing transactions on %s (%s)\n", apiNode.NodeID, apiNode.URI) + sourceValidators := getNodesForIDs(network.Nodes, sourceSubnet.ValidatorIDs) + require.NotEmpty(sourceValidators) + sourceAPINode := sourceValidators[0] + tests.Outf(" issuing transactions for source subnet on %s (%s)\n", sourceAPINode.NodeID, sourceAPINode.URI) + + destinationValidators := getNodesForIDs(network.Nodes, destinationSubnet.ValidatorIDs) + require.NotEmpty(destinationValidators) + destinationAPINode := destinationValidators[0] + tests.Outf(" issuing transactions for destination subnet on %s (%s)\n", destinationAPINode.NodeID, destinationAPINode.URI) destinationKey, err := secp256k1.NewPrivateKey() require.NoError(err) ginkgo.By("checking that the funded key has sufficient funds for the export") - sourceClient := api.NewClient(apiNode.URI, sourceChain.ChainID.String()) + sourceClient := api.NewClient(sourceAPINode.URI, sourceChain.ChainID.String()) initialSourcedBalance, err := sourceClient.Balance( e2e.DefaultContext(), sourceChain.PreFundedKey.Address(), @@ -75,7 +91,7 @@ var _ = ginkgo.Describe("[XSVM]", func() { exportTxStatus, err := export.Export( e2e.DefaultContext(), &export.Config{ - URI: apiNode.URI, + URI: sourceAPINode.URI, SourceChainID: sourceChain.ChainID, DestinationChainID: destinationChain.ChainID, Amount: units.Schmeckle, @@ -87,7 +103,7 @@ var _ = ginkgo.Describe("[XSVM]", func() { tests.Outf(" issued transaction with ID: %s\n", exportTxStatus.TxID) ginkgo.By("checking that the export transaction has been accepted on all nodes") - for _, node := range network.Nodes[1:] { + for _, node := range sourceValidators[1:] { require.NoError(api.AwaitTxAccepted( e2e.DefaultContext(), api.NewClient(node.URI, sourceChain.ChainID.String()), @@ -104,7 +120,7 @@ var _ = ginkgo.Describe("[XSVM]", func() { transferTxStatus, err := transfer.Transfer( e2e.DefaultContext(), &transfer.Config{ - URI: apiNode.URI, + URI: destinationAPINode.URI, ChainID: destinationChain.ChainID, AssetID: destinationChain.ChainID, Amount: units.Schmeckle, @@ -116,14 +132,14 @@ var _ = ginkgo.Describe("[XSVM]", func() { tests.Outf(" issued transaction with ID: %s\n", transferTxStatus.TxID) ginkgo.By(fmt.Sprintf("importing to blockchain %s on subnet %s", destinationChain.ChainID, destinationSubnet.SubnetID)) - sourceURIs := make([]string, len(network.Nodes)) - for i, node := range network.Nodes { + sourceURIs := make([]string, len(sourceValidators)) + for i, node := range sourceValidators { sourceURIs[i] = node.URI } importTxStatus, err := importtx.Import( e2e.DefaultContext(), &importtx.Config{ - URI: apiNode.URI, + URI: destinationAPINode.URI, SourceURIs: sourceURIs, SourceChainID: sourceChain.ChainID.String(), DestinationChainID: destinationChain.ChainID.String(), @@ -140,9 +156,22 @@ var _ = ginkgo.Describe("[XSVM]", func() { require.GreaterOrEqual(initialSourcedBalance-units.Schmeckle, sourceBalance) ginkgo.By("checking that the balance of the destination key is non-zero") - destinationClient := api.NewClient(apiNode.URI, destinationChain.ChainID.String()) + destinationClient := api.NewClient(destinationAPINode.URI, destinationChain.ChainID.String()) destinationBalance, err := destinationClient.Balance(e2e.DefaultContext(), destinationKey.Address(), sourceChain.ChainID) require.NoError(err) require.Equal(units.Schmeckle, destinationBalance) }) }) + +// Retrieve the nodes corresponding to the provided IDs +func getNodesForIDs(nodes []*tmpnet.Node, nodeIDs []ids.NodeID) []*tmpnet.Node { + desiredNodes := make([]*tmpnet.Node, 0, len(nodeIDs)) + for _, node := range nodes { + for _, nodeID := range nodeIDs { + if node.NodeID == nodeID { + desiredNodes = append(desiredNodes, node) + } + } + } + return desiredNodes +} diff --git a/tests/fixture/tmpnet/network.go b/tests/fixture/tmpnet/network.go index 97e6a9a48c21..63796be267a4 100644 --- a/tests/fixture/tmpnet/network.go +++ b/tests/fixture/tmpnet/network.go @@ -409,7 +409,7 @@ func (n *Network) Bootstrap(ctx context.Context, w io.Writer) error { } // Don't restart the node during subnet creation since it will always be restarted afterwards. - if err := n.CreateSubnets(ctx, w, false /* restartRequired */); err != nil { + if err := n.CreateSubnets(ctx, w, bootstrapNode.URI, false /* restartRequired */); err != nil { return err } @@ -646,7 +646,7 @@ func (n *Network) GetSubnet(name string) *Subnet { // Ensure that each subnet on the network is created. If restartRequired is false, node restart // to pick up configuration changes becomes the responsibility of the caller. -func (n *Network) CreateSubnets(ctx context.Context, w io.Writer, restartRequired bool) error { +func (n *Network) CreateSubnets(ctx context.Context, w io.Writer, apiURI string, restartRequired bool) error { createdSubnets := make([]*Subnet, 0, len(n.Subnets)) for _, subnet := range n.Subnets { if len(subnet.ValidatorIDs) == 0 { @@ -748,7 +748,7 @@ func (n *Network) CreateSubnets(ctx context.Context, w io.Writer, restartRequire validatorNodes = append(validatorNodes, node) } - if err := subnet.AddValidators(ctx, w, validatorNodes...); err != nil { + if err := subnet.AddValidators(ctx, w, apiURI, validatorNodes...); err != nil { return err } } diff --git a/tests/fixture/tmpnet/subnet.go b/tests/fixture/tmpnet/subnet.go index eb07536ba7d3..9e4d30f83f7f 100644 --- a/tests/fixture/tmpnet/subnet.go +++ b/tests/fixture/tmpnet/subnet.go @@ -155,9 +155,7 @@ func (s *Subnet) CreateChains(ctx context.Context, w io.Writer, uri string) erro } // Add validators to the subnet -func (s *Subnet) AddValidators(ctx context.Context, w io.Writer, nodes ...*Node) error { - apiURI := nodes[0].URI - +func (s *Subnet) AddValidators(ctx context.Context, w io.Writer, apiURI string, nodes ...*Node) error { wallet, err := s.GetWallet(ctx, apiURI) if err != nil { return err From 24e9952d37f63114d0988ffd384485f7b4cf2c86 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 27 Jun 2024 14:16:30 -0400 Subject: [PATCH 079/102] Simplify dependency registration (#3139) --- snow/engine/snowman/issuer.go | 2 +- snow/engine/snowman/transitive.go | 128 ++++++++++++++---------------- 2 files changed, 60 insertions(+), 70 deletions(-) diff --git a/snow/engine/snowman/issuer.go b/snow/engine/snowman/issuer.go index 9af5fb9716ac..0a9069d00173 100644 --- a/snow/engine/snowman/issuer.go +++ b/snow/engine/snowman/issuer.go @@ -33,7 +33,7 @@ func (i *issuer) Execute(ctx context.Context, _ []ids.ID, abandoned []ids.ID) er // If the parent block was abandoned, this block should be abandoned as // well. blkID := i.blk.ID() - i.t.removeFromPending(i.blk) + delete(i.t.pending, blkID) i.t.addToNonVerifieds(i.blk) return i.t.blocked.Abandon(ctx, blkID) } diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index cd252980e34f..82ab3071f9c2 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -266,7 +266,7 @@ func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 // receive requests to fill the ancestry. dependencies that have already // been fetched, but with missing dependencies themselves won't be requested // from the vdr. - if _, err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { + if err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { return err } return t.executeDeferredWork(ctx) @@ -305,7 +305,7 @@ func (t *Transitive) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID // Try to issue [blkID] to consensus. // If we're missing an ancestor, request it from [vdr] - if _, err := t.issueFromByID(ctx, nodeID, blkID, issuedMetric); err != nil { + if err := t.issueFromByID(ctx, nodeID, blkID, issuedMetric); err != nil { return err } @@ -346,7 +346,7 @@ func (t *Transitive) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID // receive requests to fill the ancestry. dependencies that have already // been fetched, but with missing dependencies themselves won't be requested // from the vdr. - if _, err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { + if err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { return err } @@ -365,14 +365,12 @@ func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin ) issuedMetric := t.metrics.issued.WithLabelValues(pullGossipSource) - - addedPreferred, err := t.issueFromByID(ctx, nodeID, preferredID, issuedMetric) - if err != nil { + if err := t.issueFromByID(ctx, nodeID, preferredID, issuedMetric); err != nil { return err } var ( - addedPreferredIDAtHeight = addedPreferred + preferredIDAtHeightShouldBlock bool // Invariant: The order of [responseOptions] must be [preferredID] then // (optionally) [preferredIDAtHeight]. During vote application, the // first vote that can be applied will be used. So, the votes should be @@ -380,10 +378,10 @@ func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin responseOptions = []ids.ID{preferredID} ) if preferredID != preferredIDAtHeight { - addedPreferredIDAtHeight, err = t.issueFromByID(ctx, nodeID, preferredIDAtHeight, issuedMetric) - if err != nil { + if err := t.issueFromByID(ctx, nodeID, preferredIDAtHeight, issuedMetric); err != nil { return err } + preferredIDAtHeightShouldBlock = t.canDependOn(preferredIDAtHeight) responseOptions = append(responseOptions, preferredIDAtHeight) } @@ -399,10 +397,10 @@ func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin // Wait until [preferredID] and [preferredIDAtHeight] have been issued to // consensus before applying this chit. var deps []ids.ID - if !addedPreferred { + if t.canDependOn(preferredID) { deps = append(deps, preferredID) } - if !addedPreferredIDAtHeight { + if preferredIDAtHeightShouldBlock { deps = append(deps, preferredIDAtHeight) } @@ -682,16 +680,18 @@ func (t *Transitive) buildBlocks(ctx context.Context) error { } issuedMetric := t.metrics.issued.WithLabelValues(builtSource) - added, err := t.issueWithAncestors(ctx, blk, issuedMetric) - if err != nil { + if err := t.issueWithAncestors(ctx, blk, issuedMetric); err != nil { return err } - // issuing the block shouldn't have any missing dependencies - if added { + // TODO: Technically this may incorrectly log a warning if the block + // that was just built caused votes to be applied such that the block + // was rejected or was accepted along with one of its children. This + // should be cleaned up to never produce an invalid warning. + if t.canIssueChildOn(blk.ID()) { t.Ctx.Log.Verbo("successfully issued new block from the VM") } else { - t.Ctx.Log.Warn("built block with unissued ancestors") + t.Ctx.Log.Warn("block that was just built is not extendable") } } return nil @@ -709,47 +709,46 @@ func (t *Transitive) repoll(ctx context.Context) { } } -// issueFromByID attempts to issue the branch ending with a block [blkID] into consensus. +// issueFromByID attempts to issue the branch ending with a block [blkID] into +// consensus. // If we do not have [blkID], request it. -// Returns true if the block is processing in consensus or is decided. func (t *Transitive) issueFromByID( ctx context.Context, nodeID ids.NodeID, blkID ids.ID, issuedMetric prometheus.Counter, -) (bool, error) { +) error { blk, err := t.getBlock(ctx, blkID) if err != nil { t.sendRequest(ctx, nodeID, blkID, issuedMetric) - return false, nil + return nil } return t.issueFrom(ctx, nodeID, blk, issuedMetric) } -// issueFrom attempts to issue the branch ending with block [blkID] to consensus. -// Returns true if the block is processing in consensus or is decided. -// If a dependency is missing, request it from [vdr]. +// issueFrom attempts to issue the branch ending with block [blkID] to +// consensus. +// If a dependency is missing, it will be requested it from [nodeID]. func (t *Transitive) issueFrom( ctx context.Context, nodeID ids.NodeID, blk snowman.Block, issuedMetric prometheus.Counter, -) (bool, error) { +) error { // issue [blk] and its ancestors to consensus. blkID := blk.ID() for !t.wasIssued(blk) { - if err := t.issue(ctx, nodeID, blk, false, issuedMetric); err != nil { - return false, err + err := t.issue(ctx, nodeID, blk, false, issuedMetric) + if err != nil { + return err } + // If we don't have this ancestor, request it from [nodeID] blkID = blk.Parent() - var err error blk, err = t.getBlock(ctx, blkID) - - // If we don't have this ancestor, request it from [vdr] - if err != nil || !blk.Status().Fetched() { + if err != nil { t.sendRequest(ctx, nodeID, blkID, issuedMetric) - return false, nil + return nil } } @@ -758,55 +757,45 @@ func (t *Transitive) issueFrom( delete(t.blkReqSourceMetric, req) } - if !t.isDecided(blk) && !t.Consensus.Processing(blkID) { - return false, nil + // If this block isn't pending, make sure nothing is blocked on it. + if _, isPending := t.pending[blkID]; !isPending { + return t.blocked.Abandon(ctx, blkID) } - - // A dependency should never be waiting on a decided or processing block. - // However, if the block was marked as rejected by the VM, the dependencies - // may still be waiting. Therefore, they should abandoned. - return true, t.blocked.Abandon(ctx, blkID) + return nil } -// issueWithAncestors attempts to issue the branch ending with [blk] to consensus. -// Returns true if the block is processing in consensus or is decided. -// If a dependency is missing and the dependency hasn't been requested, the issuance will be abandoned. +// issueWithAncestors attempts to issue the branch ending with [blk] to +// consensus. +// If a dependency is missing and the dependency hasn't been requested, the +// issuance will be abandoned. func (t *Transitive) issueWithAncestors( ctx context.Context, blk snowman.Block, issuedMetric prometheus.Counter, -) (bool, error) { +) error { blkID := blk.ID() // issue [blk] and its ancestors into consensus - status := blk.Status() - for status.Fetched() && !t.wasIssued(blk) { + for !t.wasIssued(blk) { err := t.issue(ctx, t.Ctx.NodeID, blk, true, issuedMetric) if err != nil { - return false, err + return err } blkID = blk.Parent() blk, err = t.getBlock(ctx, blkID) if err != nil { - status = choices.Unknown break } - status = blk.Status() } - // The block was issued into consensus. This is the happy path. - if status != choices.Unknown && (t.isDecided(blk) || t.Consensus.Processing(blkID)) { - return true, nil - } - - // There's an outstanding request for this block. - // We can just wait for that request to succeed or fail. + // There's an outstanding request for this block. We can wait for that + // request to succeed or fail. if t.blkReqs.HasValue(blkID) { - return false, nil + return nil } - // We don't have this block and have no reason to expect that we will get it. - // Abandon the block to avoid a memory leak. - return false, t.blocked.Abandon(ctx, blkID) + // If the block wasn't already issued, we have no reason to expect that it + // will be able to be issued. + return t.blocked.Abandon(ctx, blkID) } // If the block has been decided, then it is marked as having been issued. @@ -956,12 +945,10 @@ func (t *Transitive) deliver( ) error { // we are no longer waiting on adding the block to consensus, so it is no // longer pending - t.removeFromPending(blk) + blkID := blk.ID() + delete(t.pending, blkID) - var ( - parentID = blk.Parent() - blkID = blk.ID() - ) + parentID := blk.Parent() if !t.canIssueChildOn(parentID) || t.Consensus.Processing(blkID) { // If the parent isn't processing or the last accepted block, then this // block is effectively rejected. @@ -1026,7 +1013,7 @@ func (t *Transitive) deliver( t.sendQuery(ctx, blkID, blk.Bytes(), push) } - t.removeFromPending(blk) + delete(t.pending, blkID) if err := t.blocked.Fulfill(ctx, blkID); err != nil { return err } @@ -1036,7 +1023,7 @@ func (t *Transitive) deliver( } for _, blk := range dropped { blkID := blk.ID() - t.removeFromPending(blk) + delete(t.pending, blkID) if err := t.blocked.Abandon(ctx, blkID); err != nil { return err } @@ -1063,10 +1050,6 @@ func (t *Transitive) pendingContains(blkID ids.ID) bool { return ok } -func (t *Transitive) removeFromPending(blk snowman.Block) { - delete(t.pending, blk.ID()) -} - func (t *Transitive) addToNonVerifieds(blk snowman.Block) { // don't add this blk if it's decided or processing. blkID := blk.ID() @@ -1176,6 +1159,13 @@ func (t *Transitive) getProcessingAncestor(ctx context.Context, initialVote ids. } } +// canDependOn reports true if it is guaranteed for the provided block ID to +// eventually either be fulfilled or abandoned. +func (t *Transitive) canDependOn(blkID ids.ID) bool { + _, isPending := t.pending[blkID] + return isPending || t.blkReqs.HasValue(blkID) +} + // canIssueChildOn reports true if it is valid for a child of parentID to be // verified and added to consensus. func (t *Transitive) canIssueChildOn(parentID ids.ID) bool { From 267c02023a59cc9dfa24f232f76b859102c3b364 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 27 Jun 2024 16:57:22 -0400 Subject: [PATCH 080/102] Replace `wasIssued` with `shouldIssueBlock` (#3131) --- snow/engine/snowman/transitive.go | 46 +- snow/engine/snowman/transitive_test.go | 666 +++++++++++++++---------- 2 files changed, 418 insertions(+), 294 deletions(-) diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index 82ab3071f9c2..fc3bea56c41a 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -257,7 +257,7 @@ func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 issuedMetric = t.metrics.issued.WithLabelValues(unknownSource) } - if t.wasIssued(blk) { + if !t.shouldIssueBlock(blk) { t.metrics.numUselessPutBytes.Add(float64(len(blkBytes))) } @@ -335,7 +335,7 @@ func (t *Transitive) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID return nil } - if t.wasIssued(blk) { + if !t.shouldIssueBlock(blk) { t.metrics.numUselessPushQueryBytes.Add(float64(len(blkBytes))) } @@ -737,7 +737,7 @@ func (t *Transitive) issueFrom( ) error { // issue [blk] and its ancestors to consensus. blkID := blk.ID() - for !t.wasIssued(blk) { + for t.shouldIssueBlock(blk) { err := t.issue(ctx, nodeID, blk, false, issuedMetric) if err != nil { return err @@ -775,7 +775,7 @@ func (t *Transitive) issueWithAncestors( ) error { blkID := blk.ID() // issue [blk] and its ancestors into consensus - for !t.wasIssued(blk) { + for t.shouldIssueBlock(blk) { err := t.issue(ctx, t.Ctx.NodeID, blk, true, issuedMetric) if err != nil { return err @@ -798,14 +798,6 @@ func (t *Transitive) issueWithAncestors( return t.blocked.Abandon(ctx, blkID) } -// If the block has been decided, then it is marked as having been issued. -// If the block is processing, then it was issued. -// If the block is queued to be added to consensus, then it was issued. -func (t *Transitive) wasIssued(blk snowman.Block) bool { - blkID := blk.ID() - return t.isDecided(blk) || t.Consensus.Processing(blkID) || t.pendingContains(blkID) -} - // Issue [blk] to consensus once its ancestors have been issued. // If [push] is true, a push query will be used. Otherwise, a pull query will be // used. @@ -1044,12 +1036,6 @@ func (t *Transitive) deliver( return nil } -// Returns true if the block whose ID is [blkID] is waiting to be issued to consensus -func (t *Transitive) pendingContains(blkID ids.ID) bool { - _, ok := t.pending[blkID] - return ok -} - func (t *Transitive) addToNonVerifieds(blk snowman.Block) { // don't add this blk if it's decided or processing. blkID := blk.ID() @@ -1159,6 +1145,30 @@ func (t *Transitive) getProcessingAncestor(ctx context.Context, initialVote ids. } } +// shouldIssueBlock returns true if the provided block should be enqueued for +// issuance. If the block is already decided, already enqueued, or has already +// been issued, this function will return false. +func (t *Transitive) shouldIssueBlock(blk snowman.Block) bool { + height := blk.Height() + lastAcceptedID, lastAcceptedHeight := t.Consensus.LastAccepted() + if height <= lastAcceptedHeight { + return false // block is either accepted or rejected + } + + // This is guaranteed not to underflow because the above check ensures + // [height] > 0. + parentHeight := height - 1 + parentID := blk.Parent() + if parentHeight == lastAcceptedHeight && parentID != lastAcceptedID { + return false // the parent was rejected + } + + blkID := blk.ID() + _, isPending := t.pending[blkID] + return !isPending && // If the block is already pending, don't issue it again. + !t.Consensus.Processing(blkID) // If the block was previously issued, don't issue it again. +} + // canDependOn reports true if it is guaranteed for the provided block ID to // eventually either be fulfilled or abandoned. func (t *Transitive) canDependOn(blkID ids.ID) bool { diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 18a1d320495e..310a472b8009 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "errors" + "fmt" "testing" "time" @@ -32,11 +33,10 @@ import ( ) var ( - errUnknownBlock = errors.New("unknown block") - errUnknownBytes = errors.New("unknown bytes") - errInvalid = errors.New("invalid") - errUnexpectedCall = errors.New("unexpected call") - errTest = errors.New("non-nil test") + errUnknownBlock = errors.New("unknown block") + errUnknownBytes = errors.New("unknown bytes") + errInvalid = errors.New("invalid") + errTest = errors.New("non-nil test") ) func MakeGetBlockF(blks ...[]*snowmantest.Block) func(context.Context, ids.ID) (snowman.Block, error) { @@ -1676,287 +1676,49 @@ func TestEngineBuildBlockLimit(t *testing.T) { require.True(queried) } -func TestEngineReceiveNewRejectedBlock(t *testing.T) { +func TestEngineDropRejectedBlockOnReceipt(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) - - acceptedBlk := snowmantest.BuildChild(snowmantest.Genesis) - rejectedBlk := snowmantest.BuildChild(snowmantest.Genesis) - pendingBlk := snowmantest.BuildChild(rejectedBlk) - - vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - switch { - case bytes.Equal(b, acceptedBlk.Bytes()): - return acceptedBlk, nil - case bytes.Equal(b, rejectedBlk.Bytes()): - return rejectedBlk, nil - case bytes.Equal(b, pendingBlk.Bytes()): - return pendingBlk, nil - default: - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - } - - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case snowmantest.GenesisID: - return snowmantest.Genesis, nil - case acceptedBlk.ID(): - return acceptedBlk, nil - default: - return nil, errUnknownBlock - } - } - - var ( - asked bool - reqID uint32 - ) - sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ ids.ID, _ uint64) { - asked = true - reqID = rID - } - - require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - - require.True(asked) - - require.NoError(te.Chits(context.Background(), vdr, reqID, acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) - - sender.SendPullQueryF = nil - asked = false - - sender.SendGetF = func(_ context.Context, _ ids.NodeID, rID uint32, _ ids.ID) { - asked = true - reqID = rID - } - - require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - - require.True(asked) - - rejectedBlk.StatusV = choices.Rejected - - require.NoError(te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes())) - - require.Zero(te.blkReqs.Len()) -} - -func TestEngineRejectionAmplification(t *testing.T) { - require := require.New(t) - - vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) - - acceptedBlk := snowmantest.BuildChild(snowmantest.Genesis) - rejectedBlk := snowmantest.BuildChild(snowmantest.Genesis) - pendingBlk := snowmantest.BuildChild(rejectedBlk) - - vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - switch { - case bytes.Equal(b, acceptedBlk.Bytes()): - return acceptedBlk, nil - case bytes.Equal(b, rejectedBlk.Bytes()): - return rejectedBlk, nil - case bytes.Equal(b, pendingBlk.Bytes()): - return pendingBlk, nil - default: - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - } - - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case snowmantest.GenesisID: - return snowmantest.Genesis, nil - case acceptedBlk.ID(): - return acceptedBlk, nil - default: - return nil, errUnknownBlock - } - } - - var ( - queried bool - reqID uint32 - ) - sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ ids.ID, _ uint64) { - queried = true - reqID = rID - } - - require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - - require.True(queried) - - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case snowmantest.GenesisID: - return snowmantest.Genesis, nil - case acceptedBlk.ID(): - return acceptedBlk, nil - default: - return nil, errUnknownBlock - } - } - - require.NoError(te.Chits(context.Background(), vdr, reqID, acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) - - require.Zero(te.Consensus.NumProcessing()) - - queried = false - var asked bool - sender.SendPullQueryF = func(context.Context, set.Set[ids.NodeID], uint32, ids.ID, uint64) { - queried = true - } - sender.SendGetF = func(_ context.Context, _ ids.NodeID, rID uint32, blkID ids.ID) { - asked = true - reqID = rID - - require.Equal(rejectedBlk.ID(), blkID) - } - - require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - - require.False(queried) - require.True(asked) - - require.NoError(te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes())) - - require.False(queried) -} - -// Test that the node will not issue a block into consensus that it knows will -// be rejected because the parent is rejected. -func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) { - require := require.New(t) - - vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) - - acceptedBlk := snowmantest.BuildChild(snowmantest.Genesis) - rejectedBlk := snowmantest.BuildChild(snowmantest.Genesis) - pendingBlk := snowmantest.BuildChild(rejectedBlk) - pendingBlk.RejectV = errUnexpectedCall - - vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - switch { - case bytes.Equal(b, acceptedBlk.Bytes()): - return acceptedBlk, nil - case bytes.Equal(b, rejectedBlk.Bytes()): - return rejectedBlk, nil - case bytes.Equal(b, pendingBlk.Bytes()): - return pendingBlk, nil - default: - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - } - - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case snowmantest.GenesisID: - return snowmantest.Genesis, nil - case acceptedBlk.ID(): - return acceptedBlk, nil - case rejectedBlk.ID(): - return rejectedBlk, nil - default: - return nil, errUnknownBlock - } - } - - var ( - queried bool - reqID uint32 - ) - sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ ids.ID, _ uint64) { - queried = true - reqID = rID - } - - require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - - require.True(queried) - - require.NoError(te.Chits(context.Background(), vdr, reqID, acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) - - require.Zero(te.Consensus.NumProcessing()) - - require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - - require.Zero(te.Consensus.NumProcessing()) - - require.Empty(te.pending) -} - -// Test that the node will not issue a block into consensus that it knows will -// be rejected because the parent is failing verification. -func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) { - require := require.New(t) + nodeID, _, sender, vm, te := setup(t, DefaultConfig(t)) - vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) + // Ignore outbound chits + sender.SendChitsF = func(context.Context, ids.NodeID, uint32, ids.ID, ids.ID, ids.ID) {} acceptedBlk := snowmantest.BuildChild(snowmantest.Genesis) - rejectedBlk := snowmantest.BuildChild(snowmantest.Genesis) - rejectedBlk.VerifyV = errUnexpectedCall - pendingBlk := snowmantest.BuildChild(rejectedBlk) - pendingBlk.VerifyV = errUnexpectedCall - - vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - switch { - case bytes.Equal(b, acceptedBlk.Bytes()): - return acceptedBlk, nil - case bytes.Equal(b, rejectedBlk.Bytes()): - return rejectedBlk, nil - case bytes.Equal(b, pendingBlk.Bytes()): - return pendingBlk, nil - default: - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - } - - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case snowmantest.GenesisID: - return snowmantest.Genesis, nil - default: - return nil, errUnknownBlock - } - } - - var ( - queried bool - reqID uint32 + rejectedChain := snowmantest.BuildDescendants(snowmantest.Genesis, 2) + vm.ParseBlockF = MakeParseBlockF( + []*snowmantest.Block{ + snowmantest.Genesis, + acceptedBlk, + }, + rejectedChain, ) - sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ ids.ID, _ uint64) { - queried = true - reqID = rID + vm.GetBlockF = MakeGetBlockF([]*snowmantest.Block{ + snowmantest.Genesis, + acceptedBlk, + }) + + // Track outbound queries + var queryRequestIDs []uint32 + sender.SendPullQueryF = func(_ context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, _ ids.ID, _ uint64) { + require.Equal(set.Of(nodeID), nodeIDs) + queryRequestIDs = append(queryRequestIDs, requestID) } - require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - require.True(queried) + // Issue [acceptedBlk] to the engine. This + require.NoError(te.PushQuery(context.Background(), nodeID, 0, acceptedBlk.Bytes(), acceptedBlk.Height())) + require.Len(queryRequestIDs, 1) - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case snowmantest.GenesisID: - return snowmantest.Genesis, nil - case rejectedBlk.ID(): - return rejectedBlk, nil - case acceptedBlk.ID(): - return acceptedBlk, nil - default: - return nil, errUnknownBlock - } - } - - require.NoError(te.Chits(context.Background(), vdr, reqID, acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) + // Vote for [acceptedBlk] and cause it to be accepted. + require.NoError(te.Chits(context.Background(), nodeID, queryRequestIDs[0], acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) + require.Len(queryRequestIDs, 1) // Shouldn't have caused another query + require.Equal(choices.Accepted, acceptedBlk.Status()) - require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - require.Zero(te.Consensus.NumProcessing()) - require.Empty(te.pending) + // Attempt to issue rejectedChain[1] to the engine. This should be dropped + // because the engine knows it has rejected it's parent rejectedChain[0]. + require.NoError(te.PushQuery(context.Background(), nodeID, 0, rejectedChain[1].Bytes(), acceptedBlk.Height())) + require.Len(queryRequestIDs, 1) // Shouldn't have caused another query + require.Zero(te.blkReqs.Len()) } // Test that the node will not gossip a block that isn't preferred. @@ -2988,6 +2750,202 @@ func TestEngineEarlyTerminateVoterRegression(t *testing.T) { require.Equal(choices.Processing, chain[2].Status()) } +// Voting for an unissued cached block that fails verification should not +// register any dependencies. +// +// Full blockchain structure: +// +// Genesis +// / \ +// 0 2 +// | | +// 1 3 +// +// We first issue block 2, and then block 3 fails verification. This causes +// block 3 to be added to the invalid blocks cache. +// +// We then issue block 0, issue block 1, and accept block 0. +// +// If we then vote for block 3, the vote should be dropped and trigger a repoll +// which could then be used to accept block 1. +func TestEngineRegistersInvalidVoterDependencyRegression(t *testing.T) { + require := require.New(t) + + config := DefaultConfig(t) + nodeID := ids.GenerateTestNodeID() + require.NoError(config.Validators.AddStaker(config.Ctx.SubnetID, nodeID, nil, ids.Empty, 1)) + + sender := &common.SenderTest{ + T: t, + SendChitsF: func(context.Context, ids.NodeID, uint32, ids.ID, ids.ID, ids.ID) {}, + } + sender.Default(true) + config.Sender = sender + + var ( + acceptedChain = snowmantest.BuildDescendants(snowmantest.Genesis, 2) + rejectedChain = snowmantest.BuildDescendants(snowmantest.Genesis, 2) + ) + rejectedChain[1].VerifyV = errInvalid + + vm := &block.TestVM{ + TestVM: common.TestVM{ + T: t, + InitializeF: func( + context.Context, + *snow.Context, + database.Database, + []byte, + []byte, + []byte, + chan<- common.Message, + []*common.Fx, + common.AppSender, + ) error { + return nil + }, + SetStateF: func(context.Context, snow.State) error { + return nil + }, + }, + ParseBlockF: MakeParseBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + acceptedChain, + rejectedChain, + ), + GetBlockF: MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + ), + SetPreferenceF: func(context.Context, ids.ID) error { + return nil + }, + LastAcceptedF: MakeLastAcceptedBlockF( + snowmantest.Genesis, + acceptedChain, + rejectedChain, + ), + } + vm.Default(true) + config.VM = vm + + engine, err := New(config) + require.NoError(err) + require.NoError(engine.Start(context.Background(), 0)) + + var pollRequestIDs []uint32 + sender.SendPullQueryF = func(_ context.Context, polledNodeIDs set.Set[ids.NodeID], requestID uint32, _ ids.ID, _ uint64) { + require.Equal(set.Of(nodeID), polledNodeIDs) + pollRequestIDs = append(pollRequestIDs, requestID) + } + + // Issue rejectedChain[0] to consensus. + require.NoError(engine.PushQuery( + context.Background(), + nodeID, + 0, + rejectedChain[0].Bytes(), + 0, + )) + require.Len(pollRequestIDs, 1) + + // In order to attempt to issue rejectedChain[1], the engine expects the VM + // to be willing to provide rejectedChain[0]. + vm.GetBlockF = MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + rejectedChain[:1], + ) + + // Attempt to issue rejectedChain[1] which should add it to the invalid + // block cache. + require.NoError(engine.PushQuery( + context.Background(), + nodeID, + 0, + rejectedChain[1].Bytes(), + 0, + )) + require.Len(pollRequestIDs, 1) + + _, wasCached := engine.nonVerifiedCache.Get(rejectedChain[1].ID()) + require.True(wasCached) + + // Issue acceptedChain[0] to consensus. + require.NoError(engine.PushQuery( + context.Background(), + nodeID, + 0, + acceptedChain[0].Bytes(), + 0, + )) + // Because acceptedChain[0] isn't initially preferred, a new poll won't be + // created. + require.Len(pollRequestIDs, 1) + + // In order to vote for acceptedChain[0], the engine expects the VM to be + // willing to provide it. + vm.GetBlockF = MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + acceptedChain[:1], + rejectedChain[:1], + ) + + // Accept acceptedChain[0] and reject rejectedChain[0]. + require.NoError(engine.Chits( + context.Background(), + nodeID, + pollRequestIDs[0], + acceptedChain[0].ID(), + acceptedChain[0].ID(), + snowmantest.GenesisID, + )) + // There are no processing blocks, so no new poll should be created. + require.Len(pollRequestIDs, 1) + require.Equal(choices.Accepted, acceptedChain[0].Status()) + require.Equal(choices.Rejected, rejectedChain[0].Status()) + + // Issue acceptedChain[1] to consensus. + require.NoError(engine.PushQuery( + context.Background(), + nodeID, + 0, + acceptedChain[1].Bytes(), + 0, + )) + require.Len(pollRequestIDs, 2) + + // Vote for the transitively rejected rejectedChain[1]. This should cause a + // repoll. + require.NoError(engine.Chits( + context.Background(), + nodeID, + pollRequestIDs[1], + rejectedChain[1].ID(), + rejectedChain[1].ID(), + snowmantest.GenesisID, + )) + require.Len(pollRequestIDs, 3) + + // In order to vote for acceptedChain[1], the engine expects the VM to be + // willing to provide it. + vm.GetBlockF = MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + acceptedChain, + rejectedChain[:1], + ) + + // Accept acceptedChain[1]. + require.NoError(engine.Chits( + context.Background(), + nodeID, + pollRequestIDs[2], + acceptedChain[1].ID(), + acceptedChain[1].ID(), + snowmantest.GenesisID, + )) + require.Len(pollRequestIDs, 3) + require.Equal(choices.Accepted, acceptedChain[1].Status()) +} + func TestGetProcessingAncestor(t *testing.T) { var ( ctx = snowtest.ConsensusContext( @@ -3154,3 +3112,159 @@ func TestGetProcessingAncestor(t *testing.T) { }) } } + +// Test the engine's classification for blocks to either be dropped or try +// issuance. +// +// Full blockchain structure: +// +// Genesis +// / \ +// 0 7 +// / \ | +// 1 4 8 +// | | / \ +// 2 5 9 11 +// | | | +// 3 6 10 +// +// Genesis and 0 are accepted. +// 1 is issued. +// 5 and 9 are pending. +// +// Structure known to engine: +// +// Genesis +// / +// 0 +// / +// 1 +// +// 5 9 +func TestShouldIssueBlock(t *testing.T) { + var ( + ctx = snowtest.ConsensusContext( + snowtest.Context(t, snowtest.PChainID), + ) + chain0Through3 = snowmantest.BuildDescendants(snowmantest.Genesis, 4) + chain4Through6 = snowmantest.BuildDescendants(chain0Through3[0], 3) + chain7Through10 = snowmantest.BuildDescendants(snowmantest.Genesis, 4) + chain11Through11 = snowmantest.BuildDescendants(chain7Through10[1], 1) + blocks = join(chain0Through3, chain4Through6, chain7Through10, chain11Through11) + ) + + require.NoError(t, blocks[0].Accept(context.Background())) + + c := &snowman.Topological{} + require.NoError(t, c.Initialize( + ctx, + snowball.DefaultParameters, + blocks[0].ID(), + blocks[0].Height(), + blocks[0].Timestamp(), + )) + require.NoError(t, c.Add(blocks[1])) + + engine := &Transitive{ + Config: Config{ + Consensus: c, + }, + pending: map[ids.ID]snowman.Block{ + blocks[5].ID(): blocks[5], + blocks[9].ID(): blocks[9], + }, + } + + tests := []struct { + name string + block snowman.Block + expectedShouldIssue bool + }{ + { + name: "genesis", + block: snowmantest.Genesis, + expectedShouldIssue: false, + }, + { + name: "last accepted", + block: blocks[0], + expectedShouldIssue: false, + }, + { + name: "already processing", + block: blocks[1], + expectedShouldIssue: false, + }, + { + name: "next block to enqueue for issuance on top of a processing block", + block: blocks[2], + expectedShouldIssue: true, + }, + { + name: "block to enqueue for issuance which depends on another block", + block: blocks[3], + expectedShouldIssue: true, + }, + { + name: "next block to enqueue for issuance on top of an accepted block", + block: blocks[4], + expectedShouldIssue: true, + }, + { + name: "already pending block", + block: blocks[5], + expectedShouldIssue: false, + }, + { + name: "block to enqueue on top of a pending block", + block: blocks[6], + expectedShouldIssue: true, + }, + { + name: "block was directly rejected", + block: blocks[7], + expectedShouldIssue: false, + }, + { + name: "block was transitively rejected", + block: blocks[8], + expectedShouldIssue: false, + }, + { + name: "block was transitively rejected but that is not known and was marked as pending", + block: blocks[9], + expectedShouldIssue: false, + }, + { + name: "block was transitively rejected but that is not known and is built on top of pending", + block: blocks[10], + expectedShouldIssue: true, + }, + { + name: "block was transitively rejected but that is not known", + block: blocks[11], + expectedShouldIssue: true, + }, + } + for i, test := range tests { + t.Run(fmt.Sprintf("%d %s", i-1, test.name), func(t *testing.T) { + shouldIssue := engine.shouldIssueBlock(test.block) + require.Equal(t, test.expectedShouldIssue, shouldIssue) + }) + } +} + +// join the provided slices into a single slice. +// +// TODO: Use slices.Concat once the minimum go version is 1.22. +func join[T any](slices ...[]T) []T { + size := 0 + for _, s := range slices { + size += len(s) + } + newSlice := make([]T, 0, size) + for _, s := range slices { + newSlice = append(newSlice, s...) + } + return newSlice +} From c587d91960e6e6aebd2ee595d1b7aa0446e2ee25 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 27 Jun 2024 17:27:45 -0400 Subject: [PATCH 081/102] Remove parent lookup from issue (#3132) --- snow/engine/snowman/transitive.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index fc3bea56c41a..d9817b502459 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -827,12 +827,10 @@ func (t *Transitive) issue( issuedMetric: issuedMetric, } - // block on the parent if needed - var ( - parentID = blk.Parent() - deps []ids.ID - ) - if parent, err := t.getBlock(ctx, parentID); err != nil || !(t.isDecided(parent) || t.Consensus.Processing(parentID)) { + // We know that shouldIssueBlock(blk) is true. This means that parent is + // either the last accepted block or is not decided. + var deps []ids.ID + if parentID := blk.Parent(); !t.canIssueChildOn(parentID) { t.Ctx.Log.Verbo("block waiting for parent to be issued", zap.Stringer("blkID", blkID), zap.Stringer("parentID", parentID), From 3dfd8fe475ff4f0eca0ff2f211c553e16c78129d Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 28 Jun 2024 12:07:40 -0400 Subject: [PATCH 082/102] Remove status usage from consensus (#3140) --- snow/engine/snowman/transitive.go | 56 ++++++++++++------------------- 1 file changed, 21 insertions(+), 35 deletions(-) diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index d9817b502459..40106424f12e 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -15,7 +15,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/consensus/snowman/poll" "github.com/ava-labs/avalanchego/snow/engine/common" @@ -660,15 +659,6 @@ func (t *Transitive) buildBlocks(ctx context.Context) error { } t.numBuilt.Inc() - // a newly created block is expected to be processing. If this check - // fails, there is potentially an error in the VM this engine is running - if status := blk.Status(); status != choices.Processing { - t.Ctx.Log.Warn("attempting to issue block with unexpected status", - zap.Stringer("expectedStatus", choices.Processing), - zap.Stringer("status", status), - ) - } - // The newly created block should be built on top of the preferred block. // Otherwise, the new block doesn't have the best chance of being confirmed. parentID := blk.Parent() @@ -1035,15 +1025,18 @@ func (t *Transitive) deliver( } func (t *Transitive) addToNonVerifieds(blk snowman.Block) { - // don't add this blk if it's decided or processing. + // If this block is processing, we don't need to add it to non-verifieds. blkID := blk.ID() - if t.isDecided(blk) || t.Consensus.Processing(blkID) { + if t.Consensus.Processing(blkID) { return } parentID := blk.Parent() - // we might still need this block so we can bubble votes to the parent - // only add blocks with parent already in the tree or processing. - // decided parents should not be in this map. + // We might still need this block so we can bubble votes to the parent. + // + // If the non-verified set contains the parentID, then we know that the + // parent is not decided and therefore blk is not decided. + // Similarly, if the parent is processing, then the parent is not decided + // and therefore blk is not decided. if t.nonVerifieds.Has(parentID) || t.Consensus.Processing(parentID) { t.nonVerifieds.Add(blkID, parentID) t.nonVerifiedCache.Put(blkID, blk) @@ -1132,7 +1125,6 @@ func (t *Transitive) getProcessingAncestor(ctx context.Context, initialVote ids. zap.String("reason", "bubbled vote already decided"), zap.Stringer("initialVoteID", initialVote), zap.Stringer("bubbledVoteID", bubbledVote), - zap.Stringer("status", blk.Status()), zap.Uint64("height", blk.Height()), ) t.numProcessingAncestorFetchesDropped.Inc() @@ -1147,18 +1139,8 @@ func (t *Transitive) getProcessingAncestor(ctx context.Context, initialVote ids. // issuance. If the block is already decided, already enqueued, or has already // been issued, this function will return false. func (t *Transitive) shouldIssueBlock(blk snowman.Block) bool { - height := blk.Height() - lastAcceptedID, lastAcceptedHeight := t.Consensus.LastAccepted() - if height <= lastAcceptedHeight { - return false // block is either accepted or rejected - } - - // This is guaranteed not to underflow because the above check ensures - // [height] > 0. - parentHeight := height - 1 - parentID := blk.Parent() - if parentHeight == lastAcceptedHeight && parentID != lastAcceptedID { - return false // the parent was rejected + if t.isDecided(blk) { + return false } blkID := blk.ID() @@ -1181,14 +1163,18 @@ func (t *Transitive) canIssueChildOn(parentID ids.ID) bool { return parentID == lastAcceptedID || t.Consensus.Processing(parentID) } -// isDecided reports true if the provided block's status is Accepted, Rejected, -// or if the block's height implies that the block is either Accepted or -// Rejected. +// isDecided reports true if the provided block's height implies that the block +// is either Accepted or Rejected. func (t *Transitive) isDecided(blk snowman.Block) bool { - if blk.Status().Decided() { - return true + height := blk.Height() + lastAcceptedID, lastAcceptedHeight := t.Consensus.LastAccepted() + if height <= lastAcceptedHeight { + return true // block is either accepted or rejected } - _, lastAcceptedHeight := t.Consensus.LastAccepted() - return blk.Height() <= lastAcceptedHeight + // This is guaranteed not to underflow because the above check ensures + // [height] > 0. + parentHeight := height - 1 + parentID := blk.Parent() + return parentHeight == lastAcceptedHeight && parentID != lastAcceptedID // the parent was rejected } From 2cb8147afb3dc4b3a249bfe7f51534290284c6a6 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Fri, 28 Jun 2024 16:04:30 -0400 Subject: [PATCH 083/102] Fix bootstrapping warn log (#3156) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Co-authored-by: Stephen Buttolph --- node/node.go | 47 ++++++++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/node/node.go b/node/node.go index 63946140258d..c7d1b1991e5c 100644 --- a/node/node.go +++ b/node/node.go @@ -400,6 +400,9 @@ type Node struct { // Specifies how much disk usage each peer can cause before // we rate-limit them. diskTargeter tracker.Targeter + + // Closed when a sufficient amount of bootstrap nodes are connected to + onSufficientlyConnected chan struct{} } /* @@ -596,36 +599,19 @@ func (n *Node) initNetworking(reg prometheus.Registerer) error { } } + n.onSufficientlyConnected = make(chan struct{}) numBootstrappers := n.bootstrappers.Count(constants.PrimaryNetworkID) requiredConns := (3*numBootstrappers + 3) / 4 if requiredConns > 0 { - onSufficientlyConnected := make(chan struct{}) consensusRouter = &beaconManager{ Router: consensusRouter, beacons: n.bootstrappers, requiredConns: int64(requiredConns), - onSufficientlyConnected: onSufficientlyConnected, + onSufficientlyConnected: n.onSufficientlyConnected, } - - // Log a warning if we aren't able to connect to a sufficient portion of - // nodes. - go func() { - timer := time.NewTimer(n.Config.BootstrapBeaconConnectionTimeout) - defer timer.Stop() - - select { - case <-timer.C: - if n.shuttingDown.Get() { - return - } - n.Log.Warn("failed to connect to bootstrap nodes", - zap.Stringer("bootstrappers", n.bootstrappers), - zap.Duration("duration", n.Config.BootstrapBeaconConnectionTimeout), - ) - case <-onSufficientlyConnected: - } - }() + } else { + close(n.onSufficientlyConnected) } // add node configs to network config @@ -715,6 +701,25 @@ func (n *Node) Dispatch() error { n.Shutdown(1) }) + // Log a warning if we aren't able to connect to a sufficient portion of + // nodes. + go func() { + timer := time.NewTimer(n.Config.BootstrapBeaconConnectionTimeout) + defer timer.Stop() + + select { + case <-timer.C: + if n.shuttingDown.Get() { + return + } + n.Log.Warn("failed to connect to bootstrap nodes", + zap.Stringer("bootstrappers", n.bootstrappers), + zap.Duration("duration", n.Config.BootstrapBeaconConnectionTimeout), + ) + case <-n.onSufficientlyConnected: + } + }() + // Add state sync nodes to the peer network for i, peerIP := range n.Config.StateSyncIPs { n.Net.ManuallyTrack(n.Config.StateSyncIDs[i], peerIP) From 8fdf55bd206080f04538999d1e85da75d630f7ea Mon Sep 17 00:00:00 2001 From: hatti Date: Mon, 1 Jul 2024 20:29:33 +0800 Subject: [PATCH 084/102] chore: fix some comment (#3144) --- snow/engine/snowman/bootstrap/interval/state.go | 2 +- utils/dynamicip/opendns_resolver.go | 2 +- vms/avm/vm_benchmark_test.go | 2 +- vms/secp256k1fx/tx.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/snow/engine/snowman/bootstrap/interval/state.go b/snow/engine/snowman/bootstrap/interval/state.go index 8ba06824eea2..c6b7c0332aa8 100644 --- a/snow/engine/snowman/bootstrap/interval/state.go +++ b/snow/engine/snowman/bootstrap/interval/state.go @@ -78,7 +78,7 @@ func GetBlockIterator(db database.Iteratee) database.Iterator { return db.NewIteratorWithPrefix(blockPrefix) } -// GetBlockIterator returns a block iterator that will produce values +// GetBlockIteratorWithStart returns a block iterator that will produce values // corresponding to persisted blocks in order of increasing height starting at // [height]. func GetBlockIteratorWithStart(db database.Iteratee, height uint64) database.Iterator { diff --git a/utils/dynamicip/opendns_resolver.go b/utils/dynamicip/opendns_resolver.go index ccf75653c81e..ba245acfe653 100644 --- a/utils/dynamicip/opendns_resolver.go +++ b/utils/dynamicip/opendns_resolver.go @@ -20,7 +20,7 @@ var ( _ Resolver = (*openDNSResolver)(nil) ) -// IFConfigResolves resolves our public IP using openDNS +// openDNSResolver resolves our public IP using openDNS type openDNSResolver struct { resolver *net.Resolver } diff --git a/vms/avm/vm_benchmark_test.go b/vms/avm/vm_benchmark_test.go index 096ed51e13bc..8a342cfa2477 100644 --- a/vms/avm/vm_benchmark_test.go +++ b/vms/avm/vm_benchmark_test.go @@ -60,7 +60,7 @@ func BenchmarkLoadUser(b *testing.B) { } } -// GetAllUTXOsBenchmark is a helper func to benchmark the GetAllUTXOs depending on the size +// getAllUTXOsBenchmark is a helper func to benchmark the GetAllUTXOs depending on the size func getAllUTXOsBenchmark(b *testing.B, utxoCount int, randSrc rand.Source) { require := require.New(b) diff --git a/vms/secp256k1fx/tx.go b/vms/secp256k1fx/tx.go index 5cc483c7c3fc..c7a9965e2271 100644 --- a/vms/secp256k1fx/tx.go +++ b/vms/secp256k1fx/tx.go @@ -13,7 +13,7 @@ var _ UnsignedTx = (*TestTx)(nil) // TestTx is a minimal implementation of a Tx type TestTx struct{ UnsignedBytes []byte } -// UnsignedBytes returns Bytes +// Bytes returns UnsignedBytes func (tx *TestTx) Bytes() []byte { return tx.UnsignedBytes } From ffed3674fd8232353c02b6674febc60f542f950c Mon Sep 17 00:00:00 2001 From: marun Date: Mon, 1 Jul 2024 14:04:31 -0700 Subject: [PATCH 085/102] [ci] Add actionlint job (#3160) --- .github/actionlint.yml | 5 +++++ .github/workflows/build-linux-binaries.yml | 8 +++---- .github/workflows/build-macos-release.yml | 8 +++---- .github/workflows/build-public-ami.yml | 8 +++---- .../workflows/build-ubuntu-amd64-release.yml | 8 +++---- .../workflows/build-ubuntu-arm64-release.yml | 8 +++---- .github/workflows/build-win-release.yml | 4 ++-- .github/workflows/ci.yml | 5 ++++- .../workflows/publish_antithesis_images.yml | 4 ++-- .github/workflows/trigger-antithesis-runs.yml | 22 ++++++++++--------- scripts/actionlint.sh | 7 ++++++ 11 files changed, 52 insertions(+), 35 deletions(-) create mode 100644 .github/actionlint.yml create mode 100755 scripts/actionlint.sh diff --git a/.github/actionlint.yml b/.github/actionlint.yml new file mode 100644 index 000000000000..2e6d753c2282 --- /dev/null +++ b/.github/actionlint.yml @@ -0,0 +1,5 @@ +self-hosted-runner: + labels: + - custom-arm64-focal + - custom-arm64-jammy + - net-outage-sim diff --git a/.github/workflows/build-linux-binaries.yml b/.github/workflows/build-linux-binaries.yml index 9f5cdfe97475..bcaf374f3935 100644 --- a/.github/workflows/build-linux-binaries.yml +++ b/.github/workflows/build-linux-binaries.yml @@ -40,14 +40,14 @@ jobs: if: "${{ github.event.inputs.tag == '' }}" id: get_tag_from_git run: | - echo "TAG=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV + echo "TAG=${GITHUB_REF/refs\/tags\//}" >> "$GITHUB_ENV" shell: bash - name: Try to get tag from workflow dispatch if: "${{ github.event.inputs.tag != '' }}" id: get_tag_from_workflow run: | - echo "TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV + echo "TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" shell: bash - name: Create tgz package structure and upload to S3 @@ -101,14 +101,14 @@ jobs: if: "${{ github.event.inputs.tag == '' }}" id: get_tag_from_git run: | - echo "TAG=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV + echo "TAG=${GITHUB_REF/refs\/tags\//}" >> "$GITHUB_ENV" shell: bash - name: Try to get tag from workflow dispatch if: "${{ github.event.inputs.tag != '' }}" id: get_tag_from_workflow run: | - echo "TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV + echo "TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" shell: bash - name: Create tgz package structure and upload to S3 diff --git a/.github/workflows/build-macos-release.yml b/.github/workflows/build-macos-release.yml index 2a4bfb1c45d1..8a7f641ed3f7 100644 --- a/.github/workflows/build-macos-release.yml +++ b/.github/workflows/build-macos-release.yml @@ -35,18 +35,18 @@ jobs: if: "${{ github.event.inputs.tag == '' }}" id: get_tag_from_git run: | - echo "TAG=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV + echo "TAG=${GITHUB_REF/refs\/tags\//}" >> "$GITHUB_ENV" shell: bash - name: Try to get tag from workflow dispatch if: "${{ github.event.inputs.tag != '' }}" id: get_tag_from_workflow run: | - echo "TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV + echo "TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" shell: bash - name: Create zip file - run: 7z a avalanchego-macos-${TAG}.zip build/avalanchego + run: 7z a "avalanchego-macos-${TAG}.zip" build/avalanchego env: TAG: ${{ env.TAG }} @@ -63,7 +63,7 @@ jobs: aws-region: us-east-1 - name: Upload file to S3 - run: aws s3 cp avalanchego-macos-${{ env.TAG }}.zip s3://${BUCKET}/macos/ + run: aws s3 cp avalanchego-macos-${{ env.TAG }}.zip "s3://${BUCKET}/macos/" env: BUCKET: ${{ secrets.BUCKET }} diff --git a/.github/workflows/build-public-ami.yml b/.github/workflows/build-public-ami.yml index cc9082ab3e3c..314b110865a1 100644 --- a/.github/workflows/build-public-ami.yml +++ b/.github/workflows/build-public-ami.yml @@ -27,16 +27,16 @@ jobs: - name: Install aws cli run: | sudo apt update - sudo apt-get -y install python3-boto3=${PYTHON3_BOTO3_VERSION} + sudo apt-get -y install python3-boto3="${PYTHON3_BOTO3_VERSION}" - name: Get the tag id: get_tag run: | if [[ ${{ github.event_name }} == 'push' ]]; then - echo "TAG=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV + echo "TAG=${GITHUB_REF/refs\/tags\//}" >> "$GITHUB_ENV" else - echo "TAG=${{ inputs.tag }}" >> $GITHUB_ENV + echo "TAG=${{ inputs.tag }}" >> "$GITHUB_ENV" fi shell: bash @@ -44,7 +44,7 @@ jobs: run: | if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then echo "Setting SKIP_CREATE_AMI to False" - echo "SKIP_CREATE_AMI=False" >> $GITHUB_ENV + echo "SKIP_CREATE_AMI=False" >> "$GITHUB_ENV" fi - name: Configure AWS credentials diff --git a/.github/workflows/build-ubuntu-amd64-release.yml b/.github/workflows/build-ubuntu-amd64-release.yml index 7c00b56d1224..ff26569570c2 100644 --- a/.github/workflows/build-ubuntu-amd64-release.yml +++ b/.github/workflows/build-ubuntu-amd64-release.yml @@ -38,14 +38,14 @@ jobs: if: "${{ github.event.inputs.tag == '' }}" id: get_tag_from_git run: | - echo "TAG=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV + echo "TAG=${GITHUB_REF/refs\/tags\//}" >> "$GITHUB_ENV" shell: bash - name: Try to get tag from workflow dispatch if: "${{ github.event.inputs.tag != '' }}" id: get_tag_from_workflow run: | - echo "TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV + echo "TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" shell: bash - name: Create debian package @@ -88,14 +88,14 @@ jobs: if: "${{ github.event.inputs.tag == '' }}" id: get_tag_from_git run: | - echo "TAG=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV + echo "TAG=${GITHUB_REF/refs\/tags\//}" >> "$GITHUB_ENV" shell: bash - name: Try to get tag from workflow dispatch if: "${{ github.event.inputs.tag != '' }}" id: get_tag_from_workflow run: | - echo "TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV + echo "TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" shell: bash - name: Configure AWS credentials diff --git a/.github/workflows/build-ubuntu-arm64-release.yml b/.github/workflows/build-ubuntu-arm64-release.yml index 096137b1a2ef..4d8fa841cbd8 100644 --- a/.github/workflows/build-ubuntu-arm64-release.yml +++ b/.github/workflows/build-ubuntu-arm64-release.yml @@ -38,14 +38,14 @@ jobs: if: "${{ github.event.inputs.tag == '' }}" id: get_tag_from_git run: | - echo "TAG=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV + echo "TAG=${GITHUB_REF/refs\/tags\//}" >> "$GITHUB_ENV" shell: bash - name: Try to get tag from workflow dispatch if: "${{ github.event.inputs.tag != '' }}" id: get_tag_from_workflow run: | - echo "TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV + echo "TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" shell: bash - name: Create debian package @@ -96,14 +96,14 @@ jobs: if: "${{ github.event.inputs.tag == '' }}" id: get_tag_from_git run: | - echo "TAG=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV + echo "TAG=${GITHUB_REF/refs\/tags\//}" >> "$GITHUB_ENV" shell: bash - name: Try to get tag from workflow dispatch if: "${{ github.event.inputs.tag != '' }}" id: get_tag_from_workflow run: | - echo "TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV + echo "TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" shell: bash - name: Create debian package diff --git a/.github/workflows/build-win-release.yml b/.github/workflows/build-win-release.yml index 9d04e036b7e8..15502e003223 100644 --- a/.github/workflows/build-win-release.yml +++ b/.github/workflows/build-win-release.yml @@ -44,14 +44,14 @@ jobs: if: "${{ github.event.inputs.tag == '' }}" id: get_tag_from_git run: | - echo "TAG=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV + echo "TAG=${GITHUB_REF/refs\/tags\//}" >> "$GITHUB_ENV" shell: bash - name: Try to get tag from workflow dispatch if: "${{ github.event.inputs.tag != '' }}" id: get_tag_from_workflow run: | - echo "TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV + echo "TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" shell: bash # Runs a single command using the runners shell diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7fc80756daed..68ee2585a3b7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,7 +37,7 @@ jobs: - name: Set timeout on Windows # Windows UT run slower and need a longer timeout shell: bash if: matrix.os == 'windows-2022' - run: echo "TIMEOUT=240s" >> $GITHUB_ENV + run: echo "TIMEOUT=240s" >> "$GITHUB_ENV" - name: build_test shell: bash run: ./scripts/build_test.sh @@ -213,6 +213,9 @@ jobs: - name: Run shellcheck shell: bash run: scripts/shellcheck.sh + - name: Run actionlint + shell: bash + run: scripts/actionlint.sh buf-lint: name: Protobuf Lint runs-on: ubuntu-latest diff --git a/.github/workflows/publish_antithesis_images.yml b/.github/workflows/publish_antithesis_images.yml index 8363ad73e975..8dc9d426f942 100644 --- a/.github/workflows/publish_antithesis_images.yml +++ b/.github/workflows/publish_antithesis_images.yml @@ -35,12 +35,12 @@ jobs: run: bash -x ./scripts/build_antithesis_images.sh env: IMAGE_PREFIX: ${{ env.REGISTRY }}/${{ env.REPOSITORY }} - TAG: ${{ github.events.inputs.image_tag || 'latest' }} + TAG: ${{ github.event.inputs.image_tag || 'latest' }} TEST_SETUP: avalanchego - name: Build and push images for xsvm test setup run: bash -x ./scripts/build_antithesis_images.sh env: IMAGE_PREFIX: ${{ env.REGISTRY }}/${{ env.REPOSITORY }} - TAG: ${{ github.events.inputs.image_tag || 'latest' }} + TAG: ${{ github.event.inputs.image_tag || 'latest' }} TEST_SETUP: xsvm diff --git a/.github/workflows/trigger-antithesis-runs.yml b/.github/workflows/trigger-antithesis-runs.yml index 0521b0770d79..f893b88da9ec 100644 --- a/.github/workflows/trigger-antithesis-runs.yml +++ b/.github/workflows/trigger-antithesis-runs.yml @@ -21,7 +21,8 @@ on: type: string jobs: - Run Antithesis Avalanchego Test Setup: + antithesis_avalanchego: + name: Run Antithesis Avalanchego Test Setup runs-on: ubuntu-latest steps: - uses: antithesishq/antithesis-trigger-action@v0.5 @@ -31,12 +32,13 @@ jobs: username: ${{ secrets.ANTITHESIS_USERNAME }} password: ${{ secrets.ANTITHESIS_PASSWORD }} github_token: ${{ secrets.ANTITHESIS_GH_PAT }} - config_image: antithesis-avalanchego-config@${{ github.events.inputs.image_tag }} - images: antithesis-avalanchego-workload@${{ github.events.inputs.image_tag }};antithesis-avalanchego-node@${{ github.events.inputs.image_tag }} - email_recipients: ${{ github.events.inputs.recipients }} + config_image: antithesis-avalanchego-config@${{ github.event.inputs.image_tag || 'latest' }} + images: antithesis-avalanchego-workload@${{ github.event.inputs.image_tag || 'latest' }};antithesis-avalanchego-node@${{ github.event.inputs.image_tag || 'latest' }} + email_recipients: ${{ github.event.inputs.recipients || secrets.ANTITHESIS_RECIPIENTS }} additional_parameters: |- - custom.duration=${{ github.events.inputs.duration }} - Run Antithesis XSVM Test Setup: + custom.duration=${{ github.event.inputs.duration || '0.5' }} + antithesis_xsvm: + name: Run Antithesis XSVM Test Setup runs-on: ubuntu-latest steps: - uses: antithesishq/antithesis-trigger-action@v0.5 @@ -46,8 +48,8 @@ jobs: username: ${{ secrets.ANTITHESIS_USERNAME }} password: ${{ secrets.ANTITHESIS_PASSWORD }} github_token: ${{ secrets.ANTITHESIS_GH_PAT }} - config_image: antithesis-xsvm-config@${{ github.events.inputs.image_tag }} - images: antithesis-xsvm-workload@${{ github.events.inputs.image_tag }};antithesis-xsvm-node@${{ github.events.inputs.image_tag }} - email_recipients: ${{ github.events.inputs.recipients }} + config_image: antithesis-xsvm-config@${{ github.event.inputs.image_tag || 'latest' }} + images: antithesis-xsvm-workload@${{ github.event.inputs.image_tag || 'latest' }};antithesis-xsvm-node@${{ github.event.inputs.image_tag || 'latest' }} + email_recipients: ${{ github.event.inputs.recipients || secrets.ANTITHESIS_RECIPIENTS }} additional_parameters: |- - custom.duration=${{ github.events.inputs.duration }} + custom.duration=${{ github.event.inputs.duration || '0.5' }} diff --git a/scripts/actionlint.sh b/scripts/actionlint.sh new file mode 100755 index 000000000000..bdc3083e6b65 --- /dev/null +++ b/scripts/actionlint.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -euo pipefail + +go install github.com/rhysd/actionlint/cmd/actionlint@v1.7.1 + +actionlint From ae35eeb61f6d7b86a13c4e1770ba1e4130a9fd78 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 1 Jul 2024 23:19:25 +0200 Subject: [PATCH 086/102] check router is closing in requests (#3157) Co-authored-by: Stephen Buttolph --- snow/networking/router/chain_router.go | 67 ++++++++++++ snow/networking/router/chain_router_test.go | 111 ++++++++++++++++++++ 2 files changed, 178 insertions(+) diff --git a/snow/networking/router/chain_router.go b/snow/networking/router/chain_router.go index 27bf891ab4f9..6af0984afc3f 100644 --- a/snow/networking/router/chain_router.go +++ b/snow/networking/router/chain_router.go @@ -31,6 +31,7 @@ import ( var ( errUnknownChain = errors.New("received message for unknown chain") errUnallowedNode = errors.New("received message from non-allowed node") + errClosing = errors.New("router is closing") _ Router = (*ChainRouter)(nil) _ benchlist.Benchable = (*ChainRouter)(nil) @@ -63,6 +64,7 @@ type ChainRouter struct { clock mockable.Clock log logging.Logger lock sync.Mutex + closing bool chainHandlers map[ids.ID]handler.Handler // It is only safe to call [RegisterResponse] with the router lock held. Any @@ -154,6 +156,18 @@ func (cr *ChainRouter) RegisterRequest( engineType p2p.EngineType, ) { cr.lock.Lock() + if cr.closing { + cr.log.Debug("dropping request", + zap.Stringer("nodeID", nodeID), + zap.Stringer("requestingChainID", requestingChainID), + zap.Stringer("respondingChainID", respondingChainID), + zap.Uint32("requestID", requestID), + zap.Stringer("messageOp", op), + zap.Error(errClosing), + ) + cr.lock.Unlock() + return + } // When we receive a response message type (Chits, Put, Accepted, etc.) // we validate that we actually sent the corresponding request. // Give this request a unique ID so we can do that validation. @@ -244,6 +258,17 @@ func (cr *ChainRouter) HandleInbound(ctx context.Context, msg message.InboundMes cr.lock.Lock() defer cr.lock.Unlock() + if cr.closing { + cr.log.Debug("dropping message", + zap.Stringer("messageOp", op), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", destinationChainID), + zap.Error(errClosing), + ) + msg.OnFinishedHandling() + return + } + // Get the chain, if it exists chain, exists := cr.chainHandlers[destinationChainID] if !exists { @@ -356,6 +381,7 @@ func (cr *ChainRouter) Shutdown(ctx context.Context) { cr.lock.Lock() prevChains := cr.chainHandlers cr.chainHandlers = map[ids.ID]handler.Handler{} + cr.closing = true cr.lock.Unlock() for _, chain := range prevChains { @@ -388,6 +414,13 @@ func (cr *ChainRouter) AddChain(ctx context.Context, chain handler.Handler) { defer cr.lock.Unlock() chainID := chain.Context().ChainID + if cr.closing { + cr.log.Debug("dropping add chain request", + zap.Stringer("chainID", chainID), + zap.Error(errClosing), + ) + return + } cr.log.Debug("registering chain with chain router", zap.Stringer("chainID", chainID), ) @@ -446,6 +479,14 @@ func (cr *ChainRouter) Connected(nodeID ids.NodeID, nodeVersion *version.Applica cr.lock.Lock() defer cr.lock.Unlock() + if cr.closing { + cr.log.Debug("dropping connected message", + zap.Stringer("nodeID", nodeID), + zap.Error(errClosing), + ) + return + } + connectedPeer, exists := cr.peers[nodeID] if !exists { connectedPeer = &peer{ @@ -493,6 +534,14 @@ func (cr *ChainRouter) Disconnected(nodeID ids.NodeID) { cr.lock.Lock() defer cr.lock.Unlock() + if cr.closing { + cr.log.Debug("dropping disconnected message", + zap.Stringer("nodeID", nodeID), + zap.Error(errClosing), + ) + return + } + peer := cr.peers[nodeID] delete(cr.peers, nodeID) if _, benched := cr.benched[nodeID]; benched { @@ -522,6 +571,15 @@ func (cr *ChainRouter) Benched(chainID ids.ID, nodeID ids.NodeID) { cr.lock.Lock() defer cr.lock.Unlock() + if cr.closing { + cr.log.Debug("dropping benched message", + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", chainID), + zap.Error(errClosing), + ) + return + } + benchedChains, exists := cr.benched[nodeID] benchedChains.Add(chainID) cr.benched[nodeID] = benchedChains @@ -554,6 +612,15 @@ func (cr *ChainRouter) Unbenched(chainID ids.ID, nodeID ids.NodeID) { cr.lock.Lock() defer cr.lock.Unlock() + if cr.closing { + cr.log.Debug("dropping unbenched message", + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", chainID), + zap.Error(errClosing), + ) + return + } + benchedChains := cr.benched[nodeID] benchedChains.Remove(chainID) if benchedChains.Len() != 0 { diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index 19b889cd2d94..9eaae3071e15 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -191,6 +191,117 @@ func TestShutdown(t *testing.T) { require.Less(shutdownDuration, 250*time.Millisecond) } +func TestConnectedAfterShutdownErrorLogRegression(t *testing.T) { + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.PChainID) + chainCtx := snowtest.ConsensusContext(snowCtx) + + chainRouter := ChainRouter{} + require.NoError(chainRouter.Initialize( + ids.EmptyNodeID, + logging.NoWarn{}, // If an error log is emitted, the test will fail + nil, + time.Second, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + HealthConfig{}, + prometheus.NewRegistry(), + )) + + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) + require.NoError(err) + + p2pTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + + h, err := handler.New( + chainCtx, + nil, + nil, + time.Second, + testThreadPoolSize, + resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(chainCtx.NodeID, subnets.Config{}), + commontracker.NewPeers(), + p2pTracker, + prometheus.NewRegistry(), + ) + require.NoError(err) + + engine := common.EngineTest{ + T: t, + StartF: func(context.Context, uint32) error { + return nil + }, + ContextF: func() *snow.ConsensusContext { + return chainCtx + }, + HaltF: func(context.Context) {}, + ShutdownF: func(context.Context) error { + return nil + }, + ConnectedF: func(context.Context, ids.NodeID, *version.Application) error { + return nil + }, + } + engine.Default(true) + engine.CantGossip = false + + bootstrapper := &common.BootstrapperTest{ + EngineTest: engine, + CantClear: true, + } + + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: &engine, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: &engine, + }, + }) + chainCtx.State.Set(snow.EngineState{ + Type: engineType, + State: snow.NormalOp, // assumed bootstrapping is done + }) + + chainRouter.AddChain(context.Background(), h) + + h.Start(context.Background(), false) + + chainRouter.Shutdown(context.Background()) + + shutdownDuration, err := h.AwaitStopped(context.Background()) + require.NoError(err) + require.GreaterOrEqual(shutdownDuration, time.Duration(0)) + + // Calling connected after shutdown should result in an error log. + chainRouter.Connected( + ids.GenerateTestNodeID(), + version.CurrentApp, + ids.GenerateTestID(), + ) +} + func TestShutdownTimesOut(t *testing.T) { require := require.New(t) From deeab24c06da722a760aded7a7b81e3d4408f501 Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 2 Jul 2024 14:15:26 -0400 Subject: [PATCH 087/102] Use `ids.Empty` instead of `ids.ID{}` (#3166) --- chains/test_manager.go | 2 +- database/helpers.go | 2 +- genesis/genesis.go | 32 +++++++++--------- ids/galiasreader/alias_reader_client.go | 2 +- snow/consensus/snowman/topological.go | 4 +-- .../avalanche/bootstrap/queue/test_job.go | 2 +- .../avalanche/state/unique_vertex_test.go | 33 +++++++++---------- .../avalanche/vertex/stateless_vertex_test.go | 18 +++++----- snow/engine/snowman/block/test_vm.go | 2 +- snow/networking/handler/handler_test.go | 4 +-- snow/networking/router/chain_router_test.go | 2 +- snow/networking/timeout/manager_test.go | 2 +- vms/avm/client.go | 4 +-- vms/avm/vm.go | 2 +- vms/avm/wallet_client.go | 2 +- vms/components/avax/addresses.go | 8 ++--- vms/components/avax/atomic_utxos.go | 4 +-- vms/components/avax/utxo_fetching.go | 4 +-- vms/components/chain/state_test.go | 2 +- vms/platformvm/client.go | 2 +- vms/platformvm/txs/create_chain_test.go | 2 +- vms/proposervm/proposer/windower_test.go | 8 ++--- vms/proposervm/state/chain_state.go | 4 +-- vms/proposervm/vm_test.go | 6 ++-- x/sync/g_db/db_client.go | 2 +- 25 files changed, 76 insertions(+), 79 deletions(-) diff --git a/chains/test_manager.go b/chains/test_manager.go index f7b98b29b587..7740f3e2699b 100644 --- a/chains/test_manager.go +++ b/chains/test_manager.go @@ -42,7 +42,7 @@ func (testManager) StartChainCreator(ChainParameters) error { } func (testManager) SubnetID(ids.ID) (ids.ID, error) { - return ids.ID{}, nil + return ids.Empty, nil } func (testManager) IsBootstrapped(ids.ID) bool { diff --git a/database/helpers.go b/database/helpers.go index 7e66c58fa770..e2f4a1a3c15a 100644 --- a/database/helpers.go +++ b/database/helpers.go @@ -36,7 +36,7 @@ func PutID(db KeyValueWriter, key []byte, val ids.ID) error { func GetID(db KeyValueReader, key []byte) (ids.ID, error) { b, err := db.Get(key) if err != nil { - return ids.ID{}, err + return ids.Empty, err } return ids.ToID(b) } diff --git a/genesis/genesis.go b/genesis/genesis.go index e25088a59a12..89967d10a48a 100644 --- a/genesis/genesis.go +++ b/genesis/genesis.go @@ -203,7 +203,7 @@ func validateConfig(networkID uint32, config *Config, stakingCfg *StakingConfig) func FromFile(networkID uint32, filepath string, stakingCfg *StakingConfig) ([]byte, ids.ID, error) { switch networkID { case constants.MainnetID, constants.TestnetID, constants.LocalID: - return nil, ids.ID{}, fmt.Errorf( + return nil, ids.Empty, fmt.Errorf( "%w: %s", errOverridesStandardNetworkConfig, constants.NetworkName(networkID), @@ -212,11 +212,11 @@ func FromFile(networkID uint32, filepath string, stakingCfg *StakingConfig) ([]b config, err := GetConfigFile(filepath) if err != nil { - return nil, ids.ID{}, fmt.Errorf("unable to load provided genesis config at %s: %w", filepath, err) + return nil, ids.Empty, fmt.Errorf("unable to load provided genesis config at %s: %w", filepath, err) } if err := validateConfig(networkID, config, stakingCfg); err != nil { - return nil, ids.ID{}, fmt.Errorf("genesis config validation failed: %w", err) + return nil, ids.Empty, fmt.Errorf("genesis config validation failed: %w", err) } return FromConfig(config) @@ -245,7 +245,7 @@ func FromFile(networkID uint32, filepath string, stakingCfg *StakingConfig) ([]b func FromFlag(networkID uint32, genesisContent string, stakingCfg *StakingConfig) ([]byte, ids.ID, error) { switch networkID { case constants.MainnetID, constants.TestnetID, constants.LocalID: - return nil, ids.ID{}, fmt.Errorf( + return nil, ids.Empty, fmt.Errorf( "%w: %s", errOverridesStandardNetworkConfig, constants.NetworkName(networkID), @@ -254,11 +254,11 @@ func FromFlag(networkID uint32, genesisContent string, stakingCfg *StakingConfig customConfig, err := GetConfigContent(genesisContent) if err != nil { - return nil, ids.ID{}, fmt.Errorf("unable to load genesis content from flag: %w", err) + return nil, ids.Empty, fmt.Errorf("unable to load genesis content from flag: %w", err) } if err := validateConfig(networkID, customConfig, stakingCfg); err != nil { - return nil, ids.ID{}, fmt.Errorf("genesis config validation failed: %w", err) + return nil, ids.Empty, fmt.Errorf("genesis config validation failed: %w", err) } return FromConfig(customConfig) @@ -298,7 +298,7 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { for _, allocation := range xAllocations { addr, err := address.FormatBech32(hrp, allocation.AVAXAddr.Bytes()) if err != nil { - return nil, ids.ID{}, err + return nil, ids.Empty, err } avax.InitialState["fixedCap"] = append(avax.InitialState["fixedCap"], avm.Holder{ @@ -323,22 +323,22 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { avmSS := avm.CreateStaticService() err := avmSS.BuildGenesis(nil, &avmArgs, &avmReply) if err != nil { - return nil, ids.ID{}, err + return nil, ids.Empty, err } bytes, err := formatting.Decode(defaultEncoding, avmReply.Bytes) if err != nil { - return nil, ids.ID{}, fmt.Errorf("couldn't parse avm genesis reply: %w", err) + return nil, ids.Empty, fmt.Errorf("couldn't parse avm genesis reply: %w", err) } avaxAssetID, err := AVAXAssetID(bytes) if err != nil { - return nil, ids.ID{}, fmt.Errorf("couldn't generate AVAX asset ID: %w", err) + return nil, ids.Empty, fmt.Errorf("couldn't generate AVAX asset ID: %w", err) } genesisTime := time.Unix(int64(config.StartTime), 0) initialSupply, err := config.InitialSupply() if err != nil { - return nil, ids.ID{}, fmt.Errorf("couldn't calculate the initial supply: %w", err) + return nil, ids.Empty, fmt.Errorf("couldn't calculate the initial supply: %w", err) } initiallyStaked := set.Of(config.InitialStakedFunds...) @@ -360,7 +360,7 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { } addr, err := address.FormatBech32(hrp, allocation.AVAXAddr.Bytes()) if err != nil { - return nil, ids.ID{}, err + return nil, ids.Empty, err } for _, unlock := range allocation.UnlockSchedule { if unlock.Amount > 0 { @@ -391,14 +391,14 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { destAddrStr, err := address.FormatBech32(hrp, staker.RewardAddress.Bytes()) if err != nil { - return nil, ids.ID{}, err + return nil, ids.Empty, err } utxos := []api.UTXO(nil) for _, allocation := range nodeAllocations { addr, err := address.FormatBech32(hrp, allocation.AVAXAddr.Bytes()) if err != nil { - return nil, ids.ID{}, err + return nil, ids.Empty, err } for _, unlock := range allocation.UnlockSchedule { msgStr, err := formatting.Encode(defaultEncoding, allocation.ETHAddr.Bytes()) @@ -463,12 +463,12 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { platformvmReply := api.BuildGenesisReply{} platformvmSS := api.StaticService{} if err := platformvmSS.BuildGenesis(nil, &platformvmArgs, &platformvmReply); err != nil { - return nil, ids.ID{}, fmt.Errorf("problem while building platform chain's genesis state: %w", err) + return nil, ids.Empty, fmt.Errorf("problem while building platform chain's genesis state: %w", err) } genesisBytes, err := formatting.Decode(platformvmReply.Encoding, platformvmReply.Bytes) if err != nil { - return nil, ids.ID{}, fmt.Errorf("problem parsing platformvm genesis bytes: %w", err) + return nil, ids.Empty, fmt.Errorf("problem parsing platformvm genesis bytes: %w", err) } return genesisBytes, avaxAssetID, nil diff --git a/ids/galiasreader/alias_reader_client.go b/ids/galiasreader/alias_reader_client.go index 319d7508cbd4..daa3771af568 100644 --- a/ids/galiasreader/alias_reader_client.go +++ b/ids/galiasreader/alias_reader_client.go @@ -29,7 +29,7 @@ func (c *Client) Lookup(alias string) (ids.ID, error) { Alias: alias, }) if err != nil { - return ids.ID{}, err + return ids.Empty, err } return ids.ToID(resp.Id) } diff --git a/snow/consensus/snowman/topological.go b/snow/consensus/snowman/topological.go index 8c09e2798fcc..9888652f0764 100644 --- a/snow/consensus/snowman/topological.go +++ b/snow/consensus/snowman/topological.go @@ -502,7 +502,7 @@ func (ts *Topological) vote(ctx context.Context, voteStack []votes) (ids.ID, err // block. if parentBlock.sb.Finalized() && ts.lastAcceptedID == vote.parentID { if err := ts.acceptPreferredChild(ctx, parentBlock); err != nil { - return ids.ID{}, err + return ids.Empty, err } // by accepting the child of parentBlock, the last accepted block is @@ -522,7 +522,7 @@ func (ts *Topological) vote(ctx context.Context, voteStack []votes) (ids.ID, err // children will need to have their confidence reset. If there isn't a // child having RecordPoll called, then the nextID will default to the // nil ID. - nextID := ids.ID{} + nextID := ids.Empty if len(voteStack) > 0 { nextID = voteStack[newStackSize-1].parentID } diff --git a/snow/engine/avalanche/bootstrap/queue/test_job.go b/snow/engine/avalanche/bootstrap/queue/test_job.go index fd9af544fb62..91a370b96d81 100644 --- a/snow/engine/avalanche/bootstrap/queue/test_job.go +++ b/snow/engine/avalanche/bootstrap/queue/test_job.go @@ -51,7 +51,7 @@ func (j *TestJob) ID() ids.ID { if j.CantID && j.T != nil { require.FailNow(j.T, "Unexpectedly called ID") } - return ids.ID{} + return ids.Empty } func (j *TestJob) MissingDependencies(ctx context.Context) (set.Set[ids.ID], error) { diff --git a/snow/engine/avalanche/state/unique_vertex_test.go b/snow/engine/avalanche/state/unique_vertex_test.go index 6f644680d290..206b04c7d0e9 100644 --- a/snow/engine/avalanche/state/unique_vertex_test.go +++ b/snow/engine/avalanche/state/unique_vertex_test.go @@ -47,7 +47,7 @@ func TestUnknownUniqueVertexErrors(t *testing.T) { uVtx := &uniqueVertex{ serializer: s, - id: ids.ID{}, + id: ids.Empty, } status := uVtx.Status() @@ -78,10 +78,9 @@ func TestUniqueVertexCacheHit(t *testing.T) { id := ids.ID{2} parentID := ids.ID{'p', 'a', 'r', 'e', 'n', 't'} parentIDs := []ids.ID{parentID} - chainID := ids.ID{} // Same as chainID of serializer height := uint64(1) vtx, err := vertex.Build( // regular, non-stop vertex - chainID, + s.ChainID, height, parentIDs, [][]byte{{0}}, @@ -153,10 +152,9 @@ func TestUniqueVertexCacheMiss(t *testing.T) { parentID := uvtxParent.ID() parentIDs := []ids.ID{parentID} - chainID := ids.ID{} height := uint64(1) innerVertex, err := vertex.Build( // regular, non-stop vertex - chainID, + s.ChainID, height, parentIDs, [][]byte{txBytes}, @@ -259,16 +257,6 @@ func TestParseVertexWithIncorrectChainID(t *testing.T) { func TestParseVertexWithInvalidTxs(t *testing.T) { require := require.New(t) - chainID := ids.Empty - statelessVertex, err := vertex.Build( // regular, non-stop vertex - chainID, - 0, - nil, - [][]byte{{1}}, - ) - require.NoError(err) - vtxBytes := statelessVertex.Bytes() - s := newTestSerializer(t, func(_ context.Context, b []byte) (snowstorm.Tx, error) { switch { case bytes.Equal(b, []byte{2}): @@ -278,6 +266,15 @@ func TestParseVertexWithInvalidTxs(t *testing.T) { } }) + statelessVertex, err := vertex.Build( // regular, non-stop vertex + s.ChainID, + 0, + nil, + [][]byte{{1}}, + ) + require.NoError(err) + vtxBytes := statelessVertex.Bytes() + _, err = s.ParseVtx(context.Background(), vtxBytes) require.ErrorIs(err, errUnknownTx) @@ -289,7 +286,7 @@ func TestParseVertexWithInvalidTxs(t *testing.T) { require.ErrorIs(err, errUnknownVertex) childStatelessVertex, err := vertex.Build( // regular, non-stop vertex - chainID, + s.ChainID, 1, []ids.ID{id}, [][]byte{{2}}, @@ -323,14 +320,14 @@ func newTestUniqueVertex( ) if !stopVertex { vtx, err = vertex.Build( - ids.ID{}, + s.ChainID, uint64(1), parentIDs, txs, ) } else { vtx, err = vertex.BuildStopVertex( - ids.ID{}, + s.ChainID, uint64(1), parentIDs, ) diff --git a/snow/engine/avalanche/vertex/stateless_vertex_test.go b/snow/engine/avalanche/vertex/stateless_vertex_test.go index 35ece98c51da..f8133c99848f 100644 --- a/snow/engine/avalanche/vertex/stateless_vertex_test.go +++ b/snow/engine/avalanche/vertex/stateless_vertex_test.go @@ -39,7 +39,7 @@ func TestVertexVerify(t *testing.T) { name: "valid vertex", vertex: statelessVertex{innerStatelessVertex: innerStatelessVertex{ Version: 0, - ChainID: ids.ID{}, + ChainID: ids.Empty, Height: 0, Epoch: 0, ParentIDs: []ids.ID{}, @@ -51,7 +51,7 @@ func TestVertexVerify(t *testing.T) { name: "invalid vertex epoch", vertex: statelessVertex{innerStatelessVertex: innerStatelessVertex{ Version: 0, - ChainID: ids.ID{}, + ChainID: ids.Empty, Height: 0, Epoch: 1, ParentIDs: []ids.ID{}, @@ -63,7 +63,7 @@ func TestVertexVerify(t *testing.T) { name: "too many vertex parents", vertex: statelessVertex{innerStatelessVertex: innerStatelessVertex{ Version: 0, - ChainID: ids.ID{}, + ChainID: ids.Empty, Height: 0, Epoch: 0, ParentIDs: tooManyParents, @@ -75,7 +75,7 @@ func TestVertexVerify(t *testing.T) { name: "no vertex txs", vertex: statelessVertex{innerStatelessVertex: innerStatelessVertex{ Version: 0, - ChainID: ids.ID{}, + ChainID: ids.Empty, Height: 0, Epoch: 0, ParentIDs: []ids.ID{}, @@ -87,7 +87,7 @@ func TestVertexVerify(t *testing.T) { name: "too many vertex txs", vertex: statelessVertex{innerStatelessVertex: innerStatelessVertex{ Version: 0, - ChainID: ids.ID{}, + ChainID: ids.Empty, Height: 0, Epoch: 0, ParentIDs: []ids.ID{}, @@ -99,7 +99,7 @@ func TestVertexVerify(t *testing.T) { name: "unsorted vertex parents", vertex: statelessVertex{innerStatelessVertex: innerStatelessVertex{ Version: 0, - ChainID: ids.ID{}, + ChainID: ids.Empty, Height: 0, Epoch: 0, ParentIDs: []ids.ID{{1}, {0}}, @@ -111,7 +111,7 @@ func TestVertexVerify(t *testing.T) { name: "unsorted vertex txs", vertex: statelessVertex{innerStatelessVertex: innerStatelessVertex{ Version: 0, - ChainID: ids.ID{}, + ChainID: ids.Empty, Height: 0, Epoch: 0, ParentIDs: []ids.ID{}, @@ -123,7 +123,7 @@ func TestVertexVerify(t *testing.T) { name: "duplicate vertex parents", vertex: statelessVertex{innerStatelessVertex: innerStatelessVertex{ Version: 0, - ChainID: ids.ID{}, + ChainID: ids.Empty, Height: 0, Epoch: 0, ParentIDs: []ids.ID{{0}, {0}}, @@ -135,7 +135,7 @@ func TestVertexVerify(t *testing.T) { name: "duplicate vertex txs", vertex: statelessVertex{innerStatelessVertex: innerStatelessVertex{ Version: 0, - ChainID: ids.ID{}, + ChainID: ids.Empty, Height: 0, Epoch: 0, ParentIDs: []ids.ID{}, diff --git a/snow/engine/snowman/block/test_vm.go b/snow/engine/snowman/block/test_vm.go index 7c04c90ec238..503d3f9d4851 100644 --- a/snow/engine/snowman/block/test_vm.go +++ b/snow/engine/snowman/block/test_vm.go @@ -100,7 +100,7 @@ func (vm *TestVM) LastAccepted(ctx context.Context) (ids.ID, error) { if vm.CantLastAccepted && vm.T != nil { require.FailNow(vm.T, errLastAccepted.Error()) } - return ids.ID{}, errLastAccepted + return ids.Empty, errLastAccepted } func (vm *TestVM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { diff --git a/snow/networking/handler/handler_test.go b/snow/networking/handler/handler_test.go index cb24040643f3..929c51780c24 100644 --- a/snow/networking/handler/handler_test.go +++ b/snow/networking/handler/handler_test.go @@ -114,7 +114,7 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { nodeID := ids.EmptyNodeID reqID := uint32(1) - chainID := ids.ID{} + chainID := ids.Empty msg := Message{ InboundMessage: message.InboundGetAcceptedFrontier(chainID, reqID, 0*time.Second, nodeID), EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, @@ -237,7 +237,7 @@ func TestHandlerClosesOnError(t *testing.T) { reqID := uint32(1) deadline := time.Nanosecond msg := Message{ - InboundMessage: message.InboundGetAcceptedFrontier(ids.ID{}, reqID, deadline, nodeID), + InboundMessage: message.InboundGetAcceptedFrontier(ids.Empty, reqID, deadline, nodeID), EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } handler.Push(context.Background(), msg) diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index 9eaae3071e15..7472de1fc016 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -435,7 +435,7 @@ func TestShutdownTimesOut(t *testing.T) { shutdownFinished := make(chan struct{}, 1) go func() { - chainID := ids.ID{} + chainID := ids.Empty msg := handler.Message{ InboundMessage: message.InboundPullQuery(chainID, 1, time.Hour, ids.GenerateTestID(), 0, nodeID), EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, diff --git a/snow/networking/timeout/manager_test.go b/snow/networking/timeout/manager_test.go index d6109002f615..131fb09b5e76 100644 --- a/snow/networking/timeout/manager_test.go +++ b/snow/networking/timeout/manager_test.go @@ -39,7 +39,7 @@ func TestManagerFire(t *testing.T) { manager.RegisterRequest( ids.EmptyNodeID, - ids.ID{}, + ids.Empty, true, ids.RequestID{}, wg.Done, diff --git a/vms/avm/client.go b/vms/avm/client.go index d53ed9388c7a..e2ef96125b75 100644 --- a/vms/avm/client.go +++ b/vms/avm/client.go @@ -266,7 +266,7 @@ func (c *client) GetHeight(ctx context.Context, options ...rpc.Option) (uint64, func (c *client) IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Option) (ids.ID, error) { txStr, err := formatting.Encode(formatting.Hex, txBytes) if err != nil { - return ids.ID{}, err + return ids.Empty, err } res := &api.JSONTxID{} err = c.requester.SendRequest(ctx, "avm.issueTx", &api.FormattedTx{ @@ -693,7 +693,7 @@ func (c *client) MintNFT( ) (ids.ID, error) { payloadStr, err := formatting.Encode(formatting.Hex, payload) if err != nil { - return ids.ID{}, err + return ids.Empty, err } res := &api.JSONTxID{} err = c.requester.SendRequest(ctx, "avm.mintNFT", &MintNFTArgs{ diff --git a/vms/avm/vm.go b/vms/avm/vm.go index 6a455132c1a1..284243693349 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -641,7 +641,7 @@ func (vm *VM) lookupAssetID(asset string) (ids.ID, error) { if assetID, err := ids.FromString(asset); err == nil { return assetID, nil } - return ids.ID{}, fmt.Errorf("asset '%s' not found", asset) + return ids.Empty, fmt.Errorf("asset '%s' not found", asset) } // Invariant: onAccept is called when [tx] is being marked as accepted, but diff --git a/vms/avm/wallet_client.go b/vms/avm/wallet_client.go index 69bdc06f9d74..7f805fd7ba23 100644 --- a/vms/avm/wallet_client.go +++ b/vms/avm/wallet_client.go @@ -75,7 +75,7 @@ func NewWalletClient(uri, chain string) WalletClient { func (c *walletClient) IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Option) (ids.ID, error) { txStr, err := formatting.Encode(formatting.Hex, txBytes) if err != nil { - return ids.ID{}, err + return ids.Empty, err } res := &api.JSONTxID{} err = c.requester.SendRequest(ctx, "wallet.issueTx", &api.FormattedTx{ diff --git a/vms/components/avax/addresses.go b/vms/components/avax/addresses.go index a1567f75f4d6..b8a3a43f81ce 100644 --- a/vms/components/avax/addresses.go +++ b/vms/components/avax/addresses.go @@ -66,17 +66,17 @@ func (a *addressManager) ParseLocalAddress(addrStr string) (ids.ShortID, error) func (a *addressManager) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { chainIDAlias, hrp, addrBytes, err := address.Parse(addrStr) if err != nil { - return ids.ID{}, ids.ShortID{}, err + return ids.Empty, ids.ShortID{}, err } chainID, err := a.ctx.BCLookup.Lookup(chainIDAlias) if err != nil { - return ids.ID{}, ids.ShortID{}, err + return ids.Empty, ids.ShortID{}, err } expectedHRP := constants.GetHRP(a.ctx.NetworkID) if hrp != expectedHRP { - return ids.ID{}, ids.ShortID{}, fmt.Errorf( + return ids.Empty, ids.ShortID{}, fmt.Errorf( "expected hrp %q but got %q", expectedHRP, hrp, @@ -85,7 +85,7 @@ func (a *addressManager) ParseAddress(addrStr string) (ids.ID, ids.ShortID, erro addr, err := ids.ToShortID(addrBytes) if err != nil { - return ids.ID{}, ids.ShortID{}, err + return ids.Empty, ids.ShortID{}, err } return chainID, addr, nil } diff --git a/vms/components/avax/atomic_utxos.go b/vms/components/avax/atomic_utxos.go index f0a854284f22..f0e6e0f2d4c9 100644 --- a/vms/components/avax/atomic_utxos.go +++ b/vms/components/avax/atomic_utxos.go @@ -47,7 +47,7 @@ func GetAtomicUTXOs( limit, ) if err != nil { - return nil, ids.ShortID{}, ids.ID{}, fmt.Errorf("error fetching atomic UTXOs: %w", err) + return nil, ids.ShortID{}, ids.Empty, fmt.Errorf("error fetching atomic UTXOs: %w", err) } lastAddrID, err := ids.ToShortID(lastAddr) @@ -63,7 +63,7 @@ func GetAtomicUTXOs( for i, utxoBytes := range allUTXOBytes { utxo := &UTXO{} if _, err := codec.Unmarshal(utxoBytes, utxo); err != nil { - return nil, ids.ShortID{}, ids.ID{}, fmt.Errorf("error parsing UTXO: %w", err) + return nil, ids.ShortID{}, ids.Empty, fmt.Errorf("error parsing UTXO: %w", err) } utxos[i] = utxo } diff --git a/vms/components/avax/utxo_fetching.go b/vms/components/avax/utxo_fetching.go index c5170f1d3123..082327319efd 100644 --- a/vms/components/avax/utxo_fetching.go +++ b/vms/components/avax/utxo_fetching.go @@ -84,7 +84,7 @@ func GetPaginatedUTXOs( utxoIDs, err := db.UTXOIDs(addr.Bytes(), start, searchSize) // Get UTXOs associated with [addr] if err != nil { - return nil, ids.ShortID{}, ids.ID{}, fmt.Errorf("couldn't get UTXOs for address %s: %w", addr, err) + return nil, ids.ShortID{}, ids.Empty, fmt.Errorf("couldn't get UTXOs for address %s: %w", addr, err) } for _, utxoID := range utxoIDs { lastUTXOID = utxoID // The last searched UTXO - not the last found @@ -95,7 +95,7 @@ func GetPaginatedUTXOs( utxo, err := db.GetUTXO(utxoID) if err != nil { - return nil, ids.ShortID{}, ids.ID{}, fmt.Errorf("couldn't get UTXO %s: %w", utxoID, err) + return nil, ids.ShortID{}, ids.Empty, fmt.Errorf("couldn't get UTXO %s: %w", utxoID, err) } utxos = append(utxos, utxo) diff --git a/vms/components/chain/state_test.go b/vms/components/chain/state_test.go index e4376be502aa..69fe825cb048 100644 --- a/vms/components/chain/state_test.go +++ b/vms/components/chain/state_test.go @@ -102,7 +102,7 @@ func createInternalBlockFuncs(blks []*snowmantest.Block) ( } } - return ids.ID{}, database.ErrNotFound + return ids.Empty, database.ErrNotFound } return getBlock, parseBlk, getAcceptedBlockIDAtHeight diff --git a/vms/platformvm/client.go b/vms/platformvm/client.go index 11453efb5f6b..95377ce96758 100644 --- a/vms/platformvm/client.go +++ b/vms/platformvm/client.go @@ -362,7 +362,7 @@ func (c *client) GetBlockchains(ctx context.Context, options ...rpc.Option) ([]A func (c *client) IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Option) (ids.ID, error) { txStr, err := formatting.Encode(formatting.Hex, txBytes) if err != nil { - return ids.ID{}, err + return ids.Empty, err } res := &api.JSONTxID{} diff --git a/vms/platformvm/txs/create_chain_test.go b/vms/platformvm/txs/create_chain_test.go index 787aaa2a7ccb..ecf52567d357 100644 --- a/vms/platformvm/txs/create_chain_test.go +++ b/vms/platformvm/txs/create_chain_test.go @@ -59,7 +59,7 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { chainName: "yeet", keys: []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, setup: func(tx *CreateChainTx) *CreateChainTx { - tx.VMID = ids.ID{} + tx.VMID = ids.Empty return tx }, expectedErr: errInvalidVMID, diff --git a/vms/proposervm/proposer/windower_test.go b/vms/proposervm/proposer/windower_test.go index b3345181bd2f..1a77760af3cf 100644 --- a/vms/proposervm/proposer/windower_test.go +++ b/vms/proposervm/proposer/windower_test.go @@ -123,11 +123,11 @@ func TestDelayChangeByChain(t *testing.T) { source := rand.NewSource(int64(0)) rng := rand.New(source) // #nosec G404 - chainID0 := ids.ID{} + chainID0 := ids.Empty _, err := rng.Read(chainID0[:]) require.NoError(err) - chainID1 := ids.ID{} + chainID1 := ids.Empty _, err = rng.Read(chainID1[:]) require.NoError(err) @@ -196,11 +196,11 @@ func TestExpectedProposerChangeByChain(t *testing.T) { source := rand.NewSource(int64(0)) rng := rand.New(source) // #nosec G404 - chainID0 := ids.ID{} + chainID0 := ids.Empty _, err := rng.Read(chainID0[:]) require.NoError(err) - chainID1 := ids.ID{} + chainID1 := ids.Empty _, err = rng.Read(chainID1[:]) require.NoError(err) diff --git a/vms/proposervm/state/chain_state.go b/vms/proposervm/state/chain_state.go index e4ed34ddcb78..87d2d99081b7 100644 --- a/vms/proposervm/state/chain_state.go +++ b/vms/proposervm/state/chain_state.go @@ -52,11 +52,11 @@ func (s *chainState) GetLastAccepted() (ids.ID, error) { } lastAcceptedBytes, err := s.db.Get(lastAcceptedKey) if err != nil { - return ids.ID{}, err + return ids.Empty, err } lastAccepted, err := ids.ToID(lastAcceptedBytes) if err != nil { - return ids.ID{}, err + return ids.Empty, err } s.lastAccepted = lastAccepted return lastAccepted, nil diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index a2536375d48c..5e76b22cd6e2 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -1571,7 +1571,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { }, GetBlockIDAtHeightF: func(_ context.Context, height uint64) (ids.ID, error) { if height >= uint64(len(coreHeights)) { - return ids.ID{}, errTooHigh + return ids.Empty, errTooHigh } return coreHeights[height], nil }, @@ -1743,7 +1743,7 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { }, GetBlockIDAtHeightF: func(_ context.Context, height uint64) (ids.ID, error) { if height >= uint64(len(coreHeights)) { - return ids.ID{}, errTooHigh + return ids.Empty, errTooHigh } return coreHeights[height], nil }, @@ -2299,7 +2299,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { }, GetBlockIDAtHeightF: func(_ context.Context, height uint64) (ids.ID, error) { if height >= uint64(len(acceptedBlocks)) { - return ids.ID{}, errTooHigh + return ids.Empty, errTooHigh } return acceptedBlocks[height].ID(), nil }, diff --git a/x/sync/g_db/db_client.go b/x/sync/g_db/db_client.go index 37b3339766ae..48fc8d2b4c7f 100644 --- a/x/sync/g_db/db_client.go +++ b/x/sync/g_db/db_client.go @@ -32,7 +32,7 @@ type DBClient struct { func (c *DBClient) GetMerkleRoot(ctx context.Context) (ids.ID, error) { resp, err := c.client.GetMerkleRoot(ctx, &emptypb.Empty{}) if err != nil { - return ids.ID{}, err + return ids.Empty, err } return ids.ToID(resp.RootHash) } From e20115cd28ea3d57ecc28f84f31c27af52c08980 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 2 Jul 2024 15:42:01 -0400 Subject: [PATCH 088/102] Replace usage of utils.Err with errors.Join (#3167) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- api/server/metrics.go | 5 ++--- cache/metercacher/metrics.go | 6 +++--- database/leveldb/metrics.go | 5 ++--- database/meterdb/db.go | 4 ++-- genesis/config.go | 2 +- indexer/index.go | 3 +-- message/messages.go | 3 +-- network/ip_tracker.go | 4 ++-- network/metrics.go | 4 ++-- network/p2p/gossip/gossip.go | 3 +-- network/p2p/network.go | 4 ++-- network/p2p/peer_tracker.go | 4 ++-- network/peer/metrics.go | 4 ++-- network/throttling/inbound_resource_throttler.go | 4 ++-- network/throttling/outbound_msg_throttler.go | 5 +++-- node/node.go | 2 +- snow/engine/avalanche/bootstrap/metrics.go | 6 +++--- snow/engine/avalanche/bootstrap/queue/jobs.go | 4 ++-- snow/engine/avalanche/bootstrap/queue/state.go | 4 ++-- snow/engine/avalanche/vertex/codec.go | 5 +++-- snow/engine/common/tracker/peers.go | 4 ++-- snow/engine/snowman/bootstrap/metrics.go | 6 +++--- snow/networking/benchlist/benchlist.go | 4 ++-- snow/networking/handler/message_queue_metrics.go | 5 +++-- snow/networking/handler/metrics.go | 6 +++--- snow/networking/router/chain_router_metrics.go | 6 +++--- snow/networking/timeout/metrics.go | 4 ++-- snow/networking/tracker/resource_tracker.go | 4 ++-- utils/bloom/metrics.go | 6 +++--- utils/error.go | 13 ------------- utils/metric/api_interceptor.go | 5 ++--- utils/resource/metrics.go | 6 +++--- utils/timer/adaptive_timeout_manager.go | 3 +-- vms/avm/block/parser.go | 6 +++--- vms/avm/state/state.go | 6 +++--- vms/avm/txs/parser.go | 4 ++-- vms/avm/vm.go | 3 +-- vms/components/keystore/codec.go | 4 ++-- vms/example/xsvm/execute/tx.go | 3 +-- vms/example/xsvm/tx/codec.go | 4 ++-- vms/nftfx/fx.go | 3 +-- vms/platformvm/block/codec.go | 6 +++--- vms/platformvm/state/metadata_codec.go | 4 ++-- vms/platformvm/state/state.go | 13 ++++++------- vms/platformvm/txs/codec.go | 4 ++-- vms/platformvm/vm.go | 2 +- vms/platformvm/warp/codec.go | 4 ++-- vms/platformvm/warp/payload/codec.go | 5 +++-- vms/propertyfx/fx.go | 3 +-- vms/proposervm/block/codec.go | 4 ++-- vms/proposervm/vm.go | 3 +-- vms/secp256k1fx/fx.go | 3 +-- vms/txs/mempool/metrics.go | 6 +++--- x/merkledb/metrics.go | 5 ++--- x/sync/metrics.go | 5 ++--- 55 files changed, 114 insertions(+), 139 deletions(-) delete mode 100644 utils/error.go diff --git a/api/server/metrics.go b/api/server/metrics.go index 9734f36eeaa1..3ab65e8e15e7 100644 --- a/api/server/metrics.go +++ b/api/server/metrics.go @@ -4,12 +4,11 @@ package server import ( + "errors" "net/http" "time" "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/utils" ) type metrics struct { @@ -43,7 +42,7 @@ func newMetrics(registerer prometheus.Registerer) (*metrics, error) { ), } - err := utils.Err( + err := errors.Join( registerer.Register(m.numProcessing), registerer.Register(m.numCalls), registerer.Register(m.totalDuration), diff --git a/cache/metercacher/metrics.go b/cache/metercacher/metrics.go index c7587f62c979..65b72db6a46c 100644 --- a/cache/metercacher/metrics.go +++ b/cache/metercacher/metrics.go @@ -4,9 +4,9 @@ package metercacher import ( - "github.com/prometheus/client_golang/prometheus" + "errors" - "github.com/ava-labs/avalanchego/utils" + "github.com/prometheus/client_golang/prometheus" ) const ( @@ -78,7 +78,7 @@ func newMetrics( Help: "fraction of cache filled", }), } - return m, utils.Err( + return m, errors.Join( reg.Register(m.getCount), reg.Register(m.getTime), reg.Register(m.putCount), diff --git a/database/leveldb/metrics.go b/database/leveldb/metrics.go index d1edab6f98e7..194975f9675d 100644 --- a/database/leveldb/metrics.go +++ b/database/leveldb/metrics.go @@ -4,12 +4,11 @@ package leveldb import ( + "errors" "strconv" "github.com/prometheus/client_golang/prometheus" "github.com/syndtr/goleveldb/leveldb" - - "github.com/ava-labs/avalanchego/utils" ) var levelLabels = []string{"level"} @@ -161,7 +160,7 @@ func newMetrics(reg prometheus.Registerer) (metrics, error) { currentStats: &leveldb.DBStats{}, } - err := utils.Err( + err := errors.Join( reg.Register(m.writesDelayedCount), reg.Register(m.writesDelayedDuration), reg.Register(m.writeIsDelayed), diff --git a/database/meterdb/db.go b/database/meterdb/db.go index af41746b32e4..4ae864400a91 100644 --- a/database/meterdb/db.go +++ b/database/meterdb/db.go @@ -5,12 +5,12 @@ package meterdb import ( "context" + "errors" "time" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/utils" ) const methodLabel = "method" @@ -125,7 +125,7 @@ func New( methodLabels, ), } - return meterDB, utils.Err( + return meterDB, errors.Join( reg.Register(meterDB.calls), reg.Register(meterDB.duration), reg.Register(meterDB.size), diff --git a/genesis/config.go b/genesis/config.go index 62b57c77f923..f09c5df07a89 100644 --- a/genesis/config.go +++ b/genesis/config.go @@ -177,7 +177,7 @@ func init() { unparsedFujiConfig := UnparsedConfig{} unparsedLocalConfig := UnparsedConfig{} - err := utils.Err( + err := errors.Join( json.Unmarshal(mainnetGenesisConfigJSON, &unparsedMainnetConfig), json.Unmarshal(fujiGenesisConfigJSON, &unparsedFujiConfig), json.Unmarshal(localGenesisConfigJSON, &unparsedLocalConfig), diff --git a/indexer/index.go b/indexer/index.go index 16a127c9795b..188658c16ca6 100644 --- a/indexer/index.go +++ b/indexer/index.go @@ -15,7 +15,6 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) @@ -99,7 +98,7 @@ func newIndex( // Close this index func (i *index) Close() error { - return utils.Err( + return errors.Join( i.indexToContainer.Close(), i.containerToIndex.Close(), i.vDB.Close(), diff --git a/message/messages.go b/message/messages.go index 06ef3125d69a..4be13de729b4 100644 --- a/message/messages.go +++ b/message/messages.go @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/proto/pb/p2p" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" @@ -178,7 +177,7 @@ func newMsgBuilder( maxMessageTimeout: maxMessageTimeout, } - return mb, utils.Err( + return mb, errors.Join( metrics.Register(mb.count), metrics.Register(mb.duration), ) diff --git a/network/ip_tracker.go b/network/ip_tracker.go index 370c7d47da92..47cb032fc7a4 100644 --- a/network/ip_tracker.go +++ b/network/ip_tracker.go @@ -5,6 +5,7 @@ package network import ( "crypto/rand" + "errors" "sync" "github.com/prometheus/client_golang/prometheus" @@ -12,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" @@ -63,7 +63,7 @@ func newIPTracker( connected: make(map[ids.NodeID]*ips.ClaimedIPPort), gossipableIndices: make(map[ids.NodeID]int), } - err = utils.Err( + err = errors.Join( registerer.Register(tracker.numTrackedIPs), registerer.Register(tracker.numGossipableIPs), ) diff --git a/network/metrics.go b/network/metrics.go index 8cc5155ec102..9702e821b246 100644 --- a/network/metrics.go +++ b/network/metrics.go @@ -4,6 +4,7 @@ package network import ( + "errors" "sync" "time" @@ -11,7 +12,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/peer" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" ) @@ -135,7 +135,7 @@ func newMetrics( peerConnectedStartTimes: make(map[ids.NodeID]float64), } - err := utils.Err( + err := errors.Join( registerer.Register(m.numTracked), registerer.Register(m.numPeers), registerer.Register(m.numSubnetPeers), diff --git a/network/p2p/gossip/gossip.go b/network/p2p/gossip/gossip.go index 918f19ca5ba0..1562093c41e2 100644 --- a/network/p2p/gossip/gossip.go +++ b/network/p2p/gossip/gossip.go @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/buffer" "github.com/ava-labs/avalanchego/utils/logging" @@ -150,7 +149,7 @@ func NewMetrics( typeLabels, ), } - err := utils.Err( + err := errors.Join( metrics.Register(m.count), metrics.Register(m.bytes), metrics.Register(m.tracking), diff --git a/network/p2p/network.go b/network/p2p/network.go index dd7bac73aa7b..491b35bbe9b9 100644 --- a/network/p2p/network.go +++ b/network/p2p/network.go @@ -6,6 +6,7 @@ package p2p import ( "context" "encoding/binary" + "errors" "strconv" "sync" "time" @@ -15,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" @@ -81,7 +81,7 @@ func NewNetwork( ), } - err := utils.Err( + err := errors.Join( registerer.Register(metrics.msgTime), registerer.Register(metrics.msgCount), ) diff --git a/network/p2p/peer_tracker.go b/network/p2p/peer_tracker.go index 31a4fb61cbb8..e4d9a07c27b2 100644 --- a/network/p2p/peer_tracker.go +++ b/network/p2p/peer_tracker.go @@ -4,6 +4,7 @@ package p2p import ( + "errors" "math" "math/rand" "sync" @@ -13,7 +14,6 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -113,7 +113,7 @@ func NewPeerTracker( }, } - err := utils.Err( + err := errors.Join( registerer.Register(t.metrics.numTrackedPeers), registerer.Register(t.metrics.numResponsivePeers), registerer.Register(t.metrics.averageBandwidth), diff --git a/network/peer/metrics.go b/network/peer/metrics.go index 7547d7a827d4..6747a577abc0 100644 --- a/network/peer/metrics.go +++ b/network/peer/metrics.go @@ -4,12 +4,12 @@ package peer import ( + "errors" "strconv" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/message" - "github.com/ava-labs/avalanchego/utils" ) const ( @@ -82,7 +82,7 @@ func NewMetrics(registerer prometheus.Registerer) (*Metrics, error) { ioOpLabels, ), } - return m, utils.Err( + return m, errors.Join( registerer.Register(m.ClockSkewCount), registerer.Register(m.ClockSkewSum), registerer.Register(m.NumFailedToParse), diff --git a/network/throttling/inbound_resource_throttler.go b/network/throttling/inbound_resource_throttler.go index eb0e939b8d9e..eb915e03232c 100644 --- a/network/throttling/inbound_resource_throttler.go +++ b/network/throttling/inbound_resource_throttler.go @@ -5,6 +5,7 @@ package throttling import ( "context" + "errors" "fmt" "sync" "time" @@ -13,7 +14,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/networking/tracker" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) @@ -80,7 +80,7 @@ func newSystemThrottlerMetrics(namespace string, reg prometheus.Registerer) (*sy Help: "Number of nodes we're waiting to read a message from because their usage is too high", }), } - err := utils.Err( + err := errors.Join( reg.Register(m.totalWaits), reg.Register(m.totalNoWaits), reg.Register(m.awaitingAcquire), diff --git a/network/throttling/outbound_msg_throttler.go b/network/throttling/outbound_msg_throttler.go index b27fe01060dc..4eeeb30cf46e 100644 --- a/network/throttling/outbound_msg_throttler.go +++ b/network/throttling/outbound_msg_throttler.go @@ -4,13 +4,14 @@ package throttling import ( + "errors" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -196,7 +197,7 @@ func (m *outboundMsgThrottlerMetrics) initialize(registerer prometheus.Registere Name: "throttler_outbound_awaiting_release", Help: "Number of messages waiting to be sent", }) - return utils.Err( + return errors.Join( registerer.Register(m.acquireSuccesses), registerer.Register(m.acquireFailures), registerer.Register(m.remainingAtLargeBytes), diff --git a/node/node.go b/node/node.go index c7d1b1991e5c..2be24203f5e3 100644 --- a/node/node.go +++ b/node/node.go @@ -1217,7 +1217,7 @@ func (n *Node) initVMs() error { // Register the VMs that Avalanche supports eUpgradeTime := version.GetEUpgradeTime(n.Config.NetworkID) - err := utils.Err( + err := errors.Join( n.VMManager.RegisterFactory(context.TODO(), constants.PlatformVMID, &platformvm.Factory{ Config: platformconfig.Config{ Chains: n.chainManager, diff --git a/snow/engine/avalanche/bootstrap/metrics.go b/snow/engine/avalanche/bootstrap/metrics.go index fdf68f5ecff2..5751389eb924 100644 --- a/snow/engine/avalanche/bootstrap/metrics.go +++ b/snow/engine/avalanche/bootstrap/metrics.go @@ -4,9 +4,9 @@ package bootstrap import ( - "github.com/prometheus/client_golang/prometheus" + "errors" - "github.com/ava-labs/avalanchego/utils" + "github.com/prometheus/client_golang/prometheus" ) type metrics struct { @@ -33,7 +33,7 @@ func (m *metrics) Initialize(registerer prometheus.Registerer) error { Help: "Number of transactions accepted during bootstrapping", }) - return utils.Err( + return errors.Join( registerer.Register(m.numFetchedVts), registerer.Register(m.numAcceptedVts), registerer.Register(m.numFetchedTxs), diff --git a/snow/engine/avalanche/bootstrap/queue/jobs.go b/snow/engine/avalanche/bootstrap/queue/jobs.go index a8955a8da079..4becc8d3224f 100644 --- a/snow/engine/avalanche/bootstrap/queue/jobs.go +++ b/snow/engine/avalanche/bootstrap/queue/jobs.go @@ -5,6 +5,7 @@ package queue import ( "context" + "errors" "fmt" "time" @@ -16,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" ) @@ -424,7 +424,7 @@ func (jm *JobsWithMissing) cleanRunnableStack(ctx context.Context) error { } } - return utils.Err( + return errors.Join( runnableJobsIter.Error(), jm.Commit(), ) diff --git a/snow/engine/avalanche/bootstrap/queue/state.go b/snow/engine/avalanche/bootstrap/queue/state.go index 76bce7c838c1..9ad87d682453 100644 --- a/snow/engine/avalanche/bootstrap/queue/state.go +++ b/snow/engine/avalanche/bootstrap/queue/state.go @@ -5,6 +5,7 @@ package queue import ( "context" + "errors" "fmt" "github.com/prometheus/client_golang/prometheus" @@ -15,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/database/linkeddb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/set" ) @@ -153,7 +153,7 @@ func (s *state) Clear() error { return err } - return utils.Err( + return errors.Join( runJobsIter.Error(), jobsIter.Error(), depsIter.Error(), diff --git a/snow/engine/avalanche/vertex/codec.go b/snow/engine/avalanche/vertex/codec.go index 3a55f443467e..4a17526f61bd 100644 --- a/snow/engine/avalanche/vertex/codec.go +++ b/snow/engine/avalanche/vertex/codec.go @@ -4,10 +4,11 @@ package vertex import ( + "errors" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/codec/reflectcodec" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" ) @@ -26,7 +27,7 @@ func init() { lc1 := linearcodec.New([]string{reflectcodec.DefaultTagName + "V1"}) Codec = codec.NewManager(maxSize) - err := utils.Err( + err := errors.Join( Codec.RegisterCodec(CodecVersion, lc0), Codec.RegisterCodec(CodecVersionWithStopVtx, lc1), ) diff --git a/snow/engine/common/tracker/peers.go b/snow/engine/common/tracker/peers.go index 94ed46764785..543933df43c2 100644 --- a/snow/engine/common/tracker/peers.go +++ b/snow/engine/common/tracker/peers.go @@ -5,13 +5,13 @@ package tracker import ( "context" + "errors" "sync" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" @@ -126,7 +126,7 @@ func NewMeteredPeers(reg prometheus.Registerer) (Peers, error) { Name: "num_validators", Help: "Total number of validators", }) - err := utils.Err( + err := errors.Join( reg.Register(percentConnected), reg.Register(totalWeight), reg.Register(numValidators), diff --git a/snow/engine/snowman/bootstrap/metrics.go b/snow/engine/snowman/bootstrap/metrics.go index 7b28b8b969b7..db7dfd772cf2 100644 --- a/snow/engine/snowman/bootstrap/metrics.go +++ b/snow/engine/snowman/bootstrap/metrics.go @@ -4,9 +4,9 @@ package bootstrap import ( - "github.com/prometheus/client_golang/prometheus" + "errors" - "github.com/ava-labs/avalanchego/utils" + "github.com/prometheus/client_golang/prometheus" ) type metrics struct { @@ -25,7 +25,7 @@ func newMetrics(registerer prometheus.Registerer) (*metrics, error) { }), } - err := utils.Err( + err := errors.Join( registerer.Register(m.numFetched), registerer.Register(m.numAccepted), ) diff --git a/snow/networking/benchlist/benchlist.go b/snow/networking/benchlist/benchlist.go index 453395379435..05934ed2fce5 100644 --- a/snow/networking/benchlist/benchlist.go +++ b/snow/networking/benchlist/benchlist.go @@ -4,6 +4,7 @@ package benchlist import ( + "errors" "fmt" "math/rand" "sync" @@ -15,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -130,7 +130,7 @@ func NewBenchlist( maxPortion: maxPortion, } - err := utils.Err( + err := errors.Join( reg.Register(benchlist.numBenched), reg.Register(benchlist.weightBenched), ) diff --git a/snow/networking/handler/message_queue_metrics.go b/snow/networking/handler/message_queue_metrics.go index 827edbf5c162..03600fb65b08 100644 --- a/snow/networking/handler/message_queue_metrics.go +++ b/snow/networking/handler/message_queue_metrics.go @@ -4,9 +4,10 @@ package handler import ( + "errors" + "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/metric" ) @@ -44,7 +45,7 @@ func (m *messageQueueMetrics) initialize( Help: "times a message has been deferred due to excessive CPU usage", }) - return utils.Err( + return errors.Join( metricsRegisterer.Register(m.count), metricsRegisterer.Register(m.nodesWithMessages), metricsRegisterer.Register(m.numExcessiveCPU), diff --git a/snow/networking/handler/metrics.go b/snow/networking/handler/metrics.go index f3a21149f26c..1e143a6c98be 100644 --- a/snow/networking/handler/metrics.go +++ b/snow/networking/handler/metrics.go @@ -4,9 +4,9 @@ package handler import ( - "github.com/prometheus/client_golang/prometheus" + "errors" - "github.com/ava-labs/avalanchego/utils" + "github.com/prometheus/client_golang/prometheus" ) type metrics struct { @@ -44,7 +44,7 @@ func newMetrics(reg prometheus.Registerer) (*metrics, error) { Help: "time spent acquiring the context lock", }), } - return m, utils.Err( + return m, errors.Join( reg.Register(m.expired), reg.Register(m.messages), reg.Register(m.messageHandlingTime), diff --git a/snow/networking/router/chain_router_metrics.go b/snow/networking/router/chain_router_metrics.go index 8855acc5ccdf..2c17326618ad 100644 --- a/snow/networking/router/chain_router_metrics.go +++ b/snow/networking/router/chain_router_metrics.go @@ -4,9 +4,9 @@ package router import ( - "github.com/prometheus/client_golang/prometheus" + "errors" - "github.com/ava-labs/avalanchego/utils" + "github.com/prometheus/client_golang/prometheus" ) // routerMetrics about router messages @@ -37,7 +37,7 @@ func newRouterMetrics(registerer prometheus.Registerer) (*routerMetrics, error) }, ) - err := utils.Err( + err := errors.Join( registerer.Register(rMetrics.outstandingRequests), registerer.Register(rMetrics.longestRunningRequest), registerer.Register(rMetrics.droppedRequests), diff --git a/snow/networking/timeout/metrics.go b/snow/networking/timeout/metrics.go index 3f217d5f7ad7..b109e866db3c 100644 --- a/snow/networking/timeout/metrics.go +++ b/snow/networking/timeout/metrics.go @@ -4,6 +4,7 @@ package timeout import ( + "errors" "sync" "time" @@ -12,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils" ) const ( @@ -48,7 +48,7 @@ func newTimeoutMetrics(reg prometheus.Registerer) (*timeoutMetrics, error) { ), chainIDToAlias: make(map[ids.ID]string), } - return m, utils.Err( + return m, errors.Join( reg.Register(m.messages), reg.Register(m.messageLatencies), ) diff --git a/snow/networking/tracker/resource_tracker.go b/snow/networking/tracker/resource_tracker.go index d8f5da99192f..1e7e256dfaf9 100644 --- a/snow/networking/tracker/resource_tracker.go +++ b/snow/networking/tracker/resource_tracker.go @@ -4,6 +4,7 @@ package tracker import ( + "errors" "fmt" "sync" "time" @@ -11,7 +12,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/linked" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" @@ -316,7 +316,7 @@ func newCPUTrackerMetrics(reg prometheus.Registerer) (*trackerMetrics, error) { Help: "Available space remaining (bytes) on the database volume", }), } - err := utils.Err( + err := errors.Join( reg.Register(m.processingTimeMetric), reg.Register(m.cpuMetric), reg.Register(m.diskReadsMetric), diff --git a/utils/bloom/metrics.go b/utils/bloom/metrics.go index 7e33edc5c069..a71d50f067a1 100644 --- a/utils/bloom/metrics.go +++ b/utils/bloom/metrics.go @@ -4,9 +4,9 @@ package bloom import ( - "github.com/prometheus/client_golang/prometheus" + "errors" - "github.com/ava-labs/avalanchego/utils" + "github.com/prometheus/client_golang/prometheus" ) // Metrics is a collection of commonly useful metrics when using a long-lived @@ -50,7 +50,7 @@ func NewMetrics( Help: "Number times the bloom has been reset", }), } - err := utils.Err( + err := errors.Join( registerer.Register(m.Count), registerer.Register(m.NumHashes), registerer.Register(m.NumEntries), diff --git a/utils/error.go b/utils/error.go deleted file mode 100644 index 0a6a9f323e03..000000000000 --- a/utils/error.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package utils - -func Err(errors ...error) error { - for _, err := range errors { - if err != nil { - return err - } - } - return nil -} diff --git a/utils/metric/api_interceptor.go b/utils/metric/api_interceptor.go index 50027fde1478..1d4520e3fd94 100644 --- a/utils/metric/api_interceptor.go +++ b/utils/metric/api_interceptor.go @@ -5,13 +5,12 @@ package metric import ( "context" + "errors" "net/http" "time" "github.com/gorilla/rpc/v2" "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/utils" ) type APIInterceptor interface { @@ -51,7 +50,7 @@ func NewAPIInterceptor(registerer prometheus.Registerer) (APIInterceptor, error) []string{"method"}, ) - err := utils.Err( + err := errors.Join( registerer.Register(requestDurationCount), registerer.Register(requestDurationSum), registerer.Register(requestErrors), diff --git a/utils/resource/metrics.go b/utils/resource/metrics.go index 42d12f1ccc74..01cdec5e15b9 100644 --- a/utils/resource/metrics.go +++ b/utils/resource/metrics.go @@ -4,9 +4,9 @@ package resource import ( - "github.com/prometheus/client_golang/prometheus" + "errors" - "github.com/ava-labs/avalanchego/utils" + "github.com/prometheus/client_golang/prometheus" ) type metrics struct { @@ -55,7 +55,7 @@ func newMetrics(registerer prometheus.Registerer) (*metrics, error) { []string{"processID"}, ), } - err := utils.Err( + err := errors.Join( registerer.Register(m.numCPUCycles), registerer.Register(m.numDiskReads), registerer.Register(m.numDiskReadBytes), diff --git a/utils/timer/adaptive_timeout_manager.go b/utils/timer/adaptive_timeout_manager.go index 5d8670bb56e2..1ad399e070d6 100644 --- a/utils/timer/adaptive_timeout_manager.go +++ b/utils/timer/adaptive_timeout_manager.go @@ -12,7 +12,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -133,7 +132,7 @@ func NewAdaptiveTimeoutManager( tm.timer = NewTimer(tm.timeout) tm.averager = math.NewAverager(float64(config.InitialTimeout), config.TimeoutHalflife, tm.clock.Time()) - err := utils.Err( + err := errors.Join( reg.Register(tm.networkTimeoutMetric), reg.Register(tm.avgLatency), reg.Register(tm.numTimeouts), diff --git a/vms/avm/block/parser.go b/vms/avm/block/parser.go index bfae841093d1..f5597d8e3f4a 100644 --- a/vms/avm/block/parser.go +++ b/vms/avm/block/parser.go @@ -4,10 +4,10 @@ package block import ( + "errors" "reflect" "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/avm/fxs" @@ -38,7 +38,7 @@ func NewParser(fxs []fxs.Fx) (Parser, error) { c := p.CodecRegistry() gc := p.GenesisCodecRegistry() - err = utils.Err( + err = errors.Join( c.RegisterType(&StandardBlock{}), gc.RegisterType(&StandardBlock{}), ) @@ -60,7 +60,7 @@ func NewCustomParser( c := p.CodecRegistry() gc := p.GenesisCodecRegistry() - err = utils.Err( + err = errors.Join( c.RegisterType(&StandardBlock{}), gc.RegisterType(&StandardBlock{}), ) diff --git a/vms/avm/state/state.go b/vms/avm/state/state.go index 5005eb3dfc14..4230a4f72bdf 100644 --- a/vms/avm/state/state.go +++ b/vms/avm/state/state.go @@ -4,6 +4,7 @@ package state import ( + "errors" "fmt" "time" @@ -15,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -408,7 +408,7 @@ func (s *state) CommitBatch() (database.Batch, error) { } func (s *state) Close() error { - return utils.Err( + return errors.Join( s.utxoDB.Close(), s.txDB.Close(), s.blockIDDB.Close(), @@ -419,7 +419,7 @@ func (s *state) Close() error { } func (s *state) write() error { - return utils.Err( + return errors.Join( s.writeUTXOs(), s.writeTxs(), s.writeBlockIDs(), diff --git a/vms/avm/txs/parser.go b/vms/avm/txs/parser.go index c5b7fe19edc0..9dd3d3e4fa65 100644 --- a/vms/avm/txs/parser.go +++ b/vms/avm/txs/parser.go @@ -4,13 +4,13 @@ package txs import ( + "errors" "fmt" "math" "reflect" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/avm/fxs" @@ -60,7 +60,7 @@ func NewCustomParser( gcm := codec.NewManager(math.MaxInt32) cm := codec.NewDefaultManager() - err := utils.Err( + err := errors.Join( c.RegisterType(&BaseTx{}), c.RegisterType(&CreateAssetTx{}), c.RegisterType(&OperationTx{}), diff --git a/vms/avm/vm.go b/vms/avm/vm.go index 284243693349..e83e7cd9ea7a 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -26,7 +26,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/linked" "github.com/ava-labs/avalanchego/utils/set" @@ -320,7 +319,7 @@ func (vm *VM) Shutdown(context.Context) error { vm.onShutdownCtxCancel() vm.awaitShutdown.Wait() - return utils.Err( + return errors.Join( vm.state.Close(), vm.baseDB.Close(), ) diff --git a/vms/components/keystore/codec.go b/vms/components/keystore/codec.go index 4e5a01db6dd0..1837cc576229 100644 --- a/vms/components/keystore/codec.go +++ b/vms/components/keystore/codec.go @@ -4,11 +4,11 @@ package keystore import ( + "errors" "math" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils" ) const CodecVersion = 0 @@ -24,7 +24,7 @@ func init() { lc := linearcodec.NewDefault() LegacyCodec = codec.NewManager(math.MaxInt32) - err := utils.Err( + err := errors.Join( Codec.RegisterCodec(CodecVersion, c), LegacyCodec.RegisterCodec(CodecVersion, lc), ) diff --git a/vms/example/xsvm/execute/tx.go b/vms/example/xsvm/execute/tx.go index f3f6ad504de4..2755cd83f3c9 100644 --- a/vms/example/xsvm/execute/tx.go +++ b/vms/example/xsvm/execute/tx.go @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/example/xsvm/state" @@ -56,7 +55,7 @@ func (t *Tx) Transfer(tf *tx.Transfer) error { return errWrongChainID } - return utils.Err( + return errors.Join( state.IncrementNonce(t.Database, t.Sender, tf.Nonce), state.DecreaseBalance(t.Database, t.Sender, tf.ChainID, t.TransferFee), state.DecreaseBalance(t.Database, t.Sender, tf.AssetID, tf.Amount), diff --git a/vms/example/xsvm/tx/codec.go b/vms/example/xsvm/tx/codec.go index 4ba775abb3f4..bbe4e2a9db11 100644 --- a/vms/example/xsvm/tx/codec.go +++ b/vms/example/xsvm/tx/codec.go @@ -4,11 +4,11 @@ package tx import ( + "errors" "math" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils" ) const CodecVersion = 0 @@ -19,7 +19,7 @@ func init() { c := linearcodec.NewDefault() Codec = codec.NewManager(math.MaxInt32) - err := utils.Err( + err := errors.Join( c.RegisterType(&Transfer{}), c.RegisterType(&Export{}), c.RegisterType(&Import{}), diff --git a/vms/nftfx/fx.go b/vms/nftfx/fx.go index 66ea9460b56b..e00cb6fa5a0d 100644 --- a/vms/nftfx/fx.go +++ b/vms/nftfx/fx.go @@ -7,7 +7,6 @@ import ( "bytes" "errors" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -34,7 +33,7 @@ func (fx *Fx) Initialize(vmIntf interface{}) error { log.Debug("initializing nft fx") c := fx.VM.CodecRegistry() - return utils.Err( + return errors.Join( c.RegisterType(&MintOutput{}), c.RegisterType(&TransferOutput{}), c.RegisterType(&MintOperation{}), diff --git a/vms/platformvm/block/codec.go b/vms/platformvm/block/codec.go index f0f66a414811..16797459e525 100644 --- a/vms/platformvm/block/codec.go +++ b/vms/platformvm/block/codec.go @@ -4,11 +4,11 @@ package block import ( + "errors" "math" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) @@ -55,7 +55,7 @@ func init() { // subpackage-level codecs were introduced, each handling serialization of // specific types. func RegisterApricotBlockTypes(targetCodec codec.Registry) error { - return utils.Err( + return errors.Join( targetCodec.RegisterType(&ApricotProposalBlock{}), targetCodec.RegisterType(&ApricotAbortBlock{}), targetCodec.RegisterType(&ApricotCommitBlock{}), @@ -65,7 +65,7 @@ func RegisterApricotBlockTypes(targetCodec codec.Registry) error { } func RegisterBanffBlockTypes(targetCodec codec.Registry) error { - return utils.Err( + return errors.Join( targetCodec.RegisterType(&BanffProposalBlock{}), targetCodec.RegisterType(&BanffAbortBlock{}), targetCodec.RegisterType(&BanffCommitBlock{}), diff --git a/vms/platformvm/state/metadata_codec.go b/vms/platformvm/state/metadata_codec.go index f2f5478a89d3..637059997d2c 100644 --- a/vms/platformvm/state/metadata_codec.go +++ b/vms/platformvm/state/metadata_codec.go @@ -4,11 +4,11 @@ package state import ( + "errors" "math" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils" ) const ( @@ -26,7 +26,7 @@ func init() { c1 := linearcodec.New([]string{CodecVersion0Tag, CodecVersion1Tag}) MetadataCodec = codec.NewManager(math.MaxInt32) - err := utils.Err( + err := errors.Join( MetadataCodec.RegisterCodec(CodecVersion0, c0), MetadataCodec.RegisterCodec(CodecVersion1, c1), ) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 2c090422e06d..74b71dadf8a2 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -26,7 +26,6 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/hashing" @@ -1272,7 +1271,7 @@ func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) er // Load pulls data previously stored on disk that is expected to be in memory. func (s *state) load() error { - return utils.Err( + return errors.Join( s.loadMetadata(), s.loadCurrentValidators(), s.loadPendingValidators(), @@ -1493,7 +1492,7 @@ func (s *state) loadCurrentValidators() error { } } - return utils.Err( + return errors.Join( validatorIt.Error(), subnetValidatorIt.Error(), delegatorIt.Error(), @@ -1577,7 +1576,7 @@ func (s *state) loadPendingValidators() error { } } - return utils.Err( + return errors.Join( validatorIt.Error(), subnetValidatorIt.Error(), delegatorIt.Error(), @@ -1627,7 +1626,7 @@ func (s *state) write(updateValidators bool, height uint64) error { codecVersion = CodecVersion0 } - return utils.Err( + return errors.Join( s.writeBlocks(), s.writeCurrentStakers(updateValidators, height, codecVersion), s.writePendingStakers(), @@ -1645,7 +1644,7 @@ func (s *state) write(updateValidators bool, height uint64) error { } func (s *state) Close() error { - return utils.Err( + return errors.Join( s.pendingSubnetValidatorBaseDB.Close(), s.pendingSubnetDelegatorBaseDB.Close(), s.pendingDelegatorBaseDB.Close(), @@ -2383,7 +2382,7 @@ func (s *state) ReindexBlocks(lock sync.Locker, log logging.Logger) error { // attempt to commit to disk while a block is concurrently being // accepted. lock.Lock() - err := utils.Err( + err := errors.Join( s.Commit(), blockIterator.Error(), ) diff --git a/vms/platformvm/txs/codec.go b/vms/platformvm/txs/codec.go index a93477af7074..809fb61e2899 100644 --- a/vms/platformvm/txs/codec.go +++ b/vms/platformvm/txs/codec.go @@ -4,11 +4,11 @@ package txs import ( + "errors" "math" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" @@ -104,7 +104,7 @@ func RegisterUnsignedTxsTypes(targetCodec linearcodec.Codec) error { } func RegisterDUnsignedTxsTypes(targetCodec linearcodec.Codec) error { - return utils.Err( + return errors.Join( targetCodec.RegisterType(&TransferSubnetOwnershipTx{}), targetCodec.RegisterType(&BaseTx{}), ) diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index efbfe0fa5453..203688d23136 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -413,7 +413,7 @@ func (vm *VM) Shutdown(context.Context) error { } } - return utils.Err( + return errors.Join( vm.state.Close(), vm.db.Close(), ) diff --git a/vms/platformvm/warp/codec.go b/vms/platformvm/warp/codec.go index 8d2193827346..6ab7c2d8e446 100644 --- a/vms/platformvm/warp/codec.go +++ b/vms/platformvm/warp/codec.go @@ -4,11 +4,11 @@ package warp import ( + "errors" "math" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils" ) const CodecVersion = 0 @@ -19,7 +19,7 @@ func init() { Codec = codec.NewManager(math.MaxInt) lc := linearcodec.NewDefault() - err := utils.Err( + err := errors.Join( lc.RegisterType(&BitSetSignature{}), Codec.RegisterCodec(CodecVersion, lc), ) diff --git a/vms/platformvm/warp/payload/codec.go b/vms/platformvm/warp/payload/codec.go index b89db089d454..4b7cd7270d52 100644 --- a/vms/platformvm/warp/payload/codec.go +++ b/vms/platformvm/warp/payload/codec.go @@ -4,9 +4,10 @@ package payload import ( + "errors" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" ) @@ -22,7 +23,7 @@ func init() { Codec = codec.NewManager(MaxMessageSize) lc := linearcodec.NewDefault() - err := utils.Err( + err := errors.Join( lc.RegisterType(&Hash{}), lc.RegisterType(&AddressedCall{}), Codec.RegisterCodec(CodecVersion, lc), diff --git a/vms/propertyfx/fx.go b/vms/propertyfx/fx.go index 24a3dff171cb..e6c3528fe229 100644 --- a/vms/propertyfx/fx.go +++ b/vms/propertyfx/fx.go @@ -6,7 +6,6 @@ package propertyfx import ( "errors" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -32,7 +31,7 @@ func (fx *Fx) Initialize(vmIntf interface{}) error { log.Debug("initializing nft fx") c := fx.VM.CodecRegistry() - return utils.Err( + return errors.Join( c.RegisterType(&MintOutput{}), c.RegisterType(&OwnedOutput{}), c.RegisterType(&MintOperation{}), diff --git a/vms/proposervm/block/codec.go b/vms/proposervm/block/codec.go index a00ad7de2506..bd6099ded242 100644 --- a/vms/proposervm/block/codec.go +++ b/vms/proposervm/block/codec.go @@ -4,11 +4,11 @@ package block import ( + "errors" "math" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils" ) const CodecVersion = 0 @@ -21,7 +21,7 @@ func init() { // See: [constants.DefaultMaxMessageSize] Codec = codec.NewManager(math.MaxInt) - err := utils.Err( + err := errors.Join( lc.RegisterType(&statelessBlock{}), lc.RegisterType(&option{}), Codec.RegisterCodec(CodecVersion, lc), diff --git a/vms/proposervm/vm.go b/vms/proposervm/vm.go index f5916fcf9f42..2baf8064e4e3 100644 --- a/vms/proposervm/vm.go +++ b/vms/proposervm/vm.go @@ -23,7 +23,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -234,7 +233,7 @@ func (vm *VM) Initialize( Buckets: []float64{0.5, 1.5, 2.5}, }) - return utils.Err( + return errors.Join( vm.Config.Registerer.Register(vm.proposerBuildSlotGauge), vm.Config.Registerer.Register(vm.acceptedBlocksSlotHistogram), ) diff --git a/vms/secp256k1fx/fx.go b/vms/secp256k1fx/fx.go index 8c1cfa53bf9a..b5457eb99dec 100644 --- a/vms/secp256k1fx/fx.go +++ b/vms/secp256k1fx/fx.go @@ -9,7 +9,6 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -60,7 +59,7 @@ func (fx *Fx) Initialize(vmIntf interface{}) error { }, } c := fx.VM.CodecRegistry() - return utils.Err( + return errors.Join( c.RegisterType(&TransferInput{}), c.RegisterType(&MintOutput{}), c.RegisterType(&TransferOutput{}), diff --git a/vms/txs/mempool/metrics.go b/vms/txs/mempool/metrics.go index 7ad316082b7f..b60f849ac9cd 100644 --- a/vms/txs/mempool/metrics.go +++ b/vms/txs/mempool/metrics.go @@ -4,9 +4,9 @@ package mempool import ( - "github.com/prometheus/client_golang/prometheus" + "errors" - "github.com/ava-labs/avalanchego/utils" + "github.com/prometheus/client_golang/prometheus" ) var _ Metrics = (*metrics)(nil) @@ -30,7 +30,7 @@ func NewMetrics(namespace string, registerer prometheus.Registerer) (*metrics, e }), } - err := utils.Err( + err := errors.Join( registerer.Register(m.numTxs), registerer.Register(m.bytesAvailableMetric), ) diff --git a/x/merkledb/metrics.go b/x/merkledb/metrics.go index b9517ce9cd59..6266462913f3 100644 --- a/x/merkledb/metrics.go +++ b/x/merkledb/metrics.go @@ -4,11 +4,10 @@ package merkledb import ( + "errors" "sync" "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/utils" ) const ( @@ -116,7 +115,7 @@ func newMetrics(namespace string, reg prometheus.Registerer) (metrics, error) { Help: "cumulative number of in-memory lookups performed", }, lookupLabels), } - err := utils.Err( + err := errors.Join( reg.Register(m.hashes), reg.Register(m.io), reg.Register(m.lookup), diff --git a/x/sync/metrics.go b/x/sync/metrics.go index fb27e6b45ffb..0ece8dddd3fb 100644 --- a/x/sync/metrics.go +++ b/x/sync/metrics.go @@ -4,11 +4,10 @@ package sync import ( + "errors" "sync" "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/utils" ) var ( @@ -74,7 +73,7 @@ func NewMetrics(namespace string, reg prometheus.Registerer) (SyncMetrics, error Help: "cumulative amount of proof requests that were successful", }), } - err := utils.Err( + err := errors.Join( reg.Register(m.requestsFailed), reg.Register(m.requestsMade), reg.Register(m.requestsSucceeded), From 65c653d5624f5560c1fcb96291eef2fc4c8d1904 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 3 Jul 2024 18:10:06 -0400 Subject: [PATCH 089/102] Update versions for v1.11.9 (#3163) --- CONTRIBUTING.md | 2 +- README.md | 2 +- RELEASES.md | 60 ++++++++++++++++++++++++++++++++++++++ go.mod | 4 +-- go.sum | 4 +-- version/compatibility.json | 3 +- version/constants.go | 2 +- 7 files changed, 69 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7b57a5e6886f..5e9dcb437b53 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ To start developing on AvalancheGo, you'll need a few things installed. -- Golang version >= 1.21.11 +- Golang version >= 1.21.12 - gcc - g++ diff --git a/README.md b/README.md index e6763982bce8..2117a4316fac 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ The minimum recommended hardware specification for nodes connected to Mainnet is If you plan to build AvalancheGo from source, you will also need the following software: -- [Go](https://golang.org/doc/install) version >= 1.21.11 +- [Go](https://golang.org/doc/install) version >= 1.21.12 - [gcc](https://gcc.gnu.org/) - g++ diff --git a/RELEASES.md b/RELEASES.md index e616850e816c..a70ad214e1ff 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,65 @@ # Release Notes +## [v1.11.9](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.9) + +This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. + +The plugin version is unchanged at `35` and is compatible with versions `v1.11.3-v1.11.8`. + +### APIs + +- Updated health metrics to use labels rather than namespaces +- Added consensus poll termination metrics + +### Configs + +- Added `--version-json` flag to output version information in json format + +### Fixes + +- Fixed incorrect WARN log that could previously be emitted during start on nodes with slower disks +- Fixed incorrect ERROR log that could previously be emitted if a peer tracking a subnet connects during shutdown +- Fixed ledger dependency on erased commit +- Fixed protobuf dependency to resolve compilation issues in some cases +- Fixed C-chain filename logging + +### What's Changed + +- Error driven snowflake multi counter by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/3092 +- [antithesis] Add ci jobs to trigger test runs by @marun in https://github.com/ava-labs/avalanchego/pull/3076 +- bump ledger-avalanche dependency to current main branch by @felipemadero in https://github.com/ava-labs/avalanchego/pull/3115 +- [antithesis] Fix image publication job by quoting default tag value by @marun in https://github.com/ava-labs/avalanchego/pull/3112 +- [e2e] Fix excessively verbose output from virtuous test by @marun in https://github.com/ava-labs/avalanchego/pull/3116 +- Remove .Status() from .IsPreferred() by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3111 +- Add early termination metrics case by case by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/3093 +- Update C-chain wallet context by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3118 +- Standardize wallet tx acceptance polling by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3110 +- [antithesis] Remove assertions incompatible with fault injection by @marun in https://github.com/ava-labs/avalanchego/pull/3104 +- Use health labels by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3122 +- Remove `Decided` from the `Consensus` interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3123 +- Remove .Status() from .Accepted() by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3124 +- Refactor `event.Blocker` into `job.Scheduler` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3125 +- Remove block lookup from `deliver` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3130 +- [chains/atomic] Remove a nested if statement by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3135 +- [vms/platformvm] Minor grammer fixes in `state` struct code comments by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3136 +- bump protobuf (fixes some build issues) by @darioush in https://github.com/ava-labs/avalanchego/pull/3142 +- Emit version in JSON format for --json-version by @marun in https://github.com/ava-labs/avalanchego/pull/3129 +- Repackaged NextBlockTime and GetNextStakerChangeTime by @abi87 in https://github.com/ava-labs/avalanchego/pull/3134 +- [vms/platformvm] Cleanup execution config tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3137 +- [tmpnet] Enable bootstrap of subnets with disjoint validator sets by @marun in https://github.com/ava-labs/avalanchego/pull/3138 +- Simplify dependency registration by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3139 +- Replace `wasIssued` with `shouldIssueBlock` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3131 +- Remove parent lookup from issue by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3132 +- Remove status usage from consensus by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3140 +- Fix bootstrapping warn log by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/3156 +- chore: fix some comment by @hattizai in https://github.com/ava-labs/avalanchego/pull/3144 +- [ci] Add actionlint job by @marun in https://github.com/ava-labs/avalanchego/pull/3160 +- check router is closing in requests by @ceyonur in https://github.com/ava-labs/avalanchego/pull/3157 +- Use `ids.Empty` instead of `ids.ID{}` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3166 +- Replace usage of utils.Err with errors.Join by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/3167 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.11.8...v1.11.9 + ## [v1.11.8](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.8) This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. diff --git a/go.mod b/go.mod index cf4ca86ee324..3c251d2fc54c 100644 --- a/go.mod +++ b/go.mod @@ -4,13 +4,13 @@ module github.com/ava-labs/avalanchego // CONTRIBUTING.md // README.md // go.mod (here) -go 1.21.11 +go 1.21.12 require ( github.com/DataDog/zstd v1.5.2 github.com/NYTimes/gziphandler v1.1.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/coreth v0.13.5-rc.0 + github.com/ava-labs/coreth v0.13.6-rc.1 github.com/ava-labs/ledger-avalanche/go v0.0.0-20240610153809-9c955cc90a95 github.com/btcsuite/btcd/btcutil v1.1.3 github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 diff --git a/go.sum b/go.sum index f969c7512b4f..49e67f0213d6 100644 --- a/go.sum +++ b/go.sum @@ -62,8 +62,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/antithesishq/antithesis-sdk-go v0.3.8 h1:OvGoHxIcOXFJLyn9IJQ5DzByZ3YVAWNBc394ObzDRb8= github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/coreth v0.13.5-rc.0 h1:PJQbR9o2RrW3j9ba4r1glXnmM2PNAP3xR569+gMcBd0= -github.com/ava-labs/coreth v0.13.5-rc.0/go.mod h1:cm5c12xo5NiTgtbmeduv8i2nYdzgkczz9Wm3yiwwTRU= +github.com/ava-labs/coreth v0.13.6-rc.1 h1:gRXRokmu0WOlPqyx+mTLWB655e8/w++u6qFcq9Mo7qA= +github.com/ava-labs/coreth v0.13.6-rc.1/go.mod h1:vm9T8qzP7RLo/jR2MKkliPfaiGgWeEpu/PG6fvvPmog= github.com/ava-labs/ledger-avalanche/go v0.0.0-20240610153809-9c955cc90a95 h1:dOVbtdnZL++pENdTCNZ1nu41eYDQkTML4sWebDnnq8c= github.com/ava-labs/ledger-avalanche/go v0.0.0-20240610153809-9c955cc90a95/go.mod h1:pJxaT9bUgeRNVmNRgtCHb7sFDIRKy7CzTQVi8gGNT6g= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= diff --git a/version/compatibility.json b/version/compatibility.json index a1596351f373..cdcd18acd1fd 100644 --- a/version/compatibility.json +++ b/version/compatibility.json @@ -5,7 +5,8 @@ "v1.11.5", "v1.11.6", "v1.11.7", - "v1.11.8" + "v1.11.8", + "v1.11.9" ], "34": [ "v1.11.2" diff --git a/version/constants.go b/version/constants.go index 2899b37fac6b..a65d2a083ab2 100644 --- a/version/constants.go +++ b/version/constants.go @@ -26,7 +26,7 @@ var ( Current = &Semantic{ Major: 1, Minor: 11, - Patch: 8, + Patch: 9, } CurrentApp = &Application{ Name: Client, From 891ba52d172c81a15e703870eebf9f5bb158d9c2 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 3 Jul 2024 20:05:46 -0400 Subject: [PATCH 090/102] Refactor rpcchainvm metrics registration (#3170) --- node/node.go | 7 ++++ vms/registry/vm_getter.go | 3 ++ vms/rpcchainvm/batched_vm_test.go | 4 +-- vms/rpcchainvm/factory.go | 29 ++++++++------- vms/rpcchainvm/state_syncable_vm_test.go | 33 ++++++++--------- vms/rpcchainvm/vm_client.go | 45 ++++++++++++++---------- vms/rpcchainvm/with_context_vm_test.go | 4 +-- 7 files changed, 75 insertions(+), 50 deletions(-) diff --git a/node/node.go b/node/node.go index 2be24203f5e3..fe574b17ea52 100644 --- a/node/node.go +++ b/node/node.go @@ -102,6 +102,7 @@ const ( requestsNamespace = constants.PlatformName + metric.NamespaceSeparator + "requests" resourceTrackerNamespace = constants.PlatformName + metric.NamespaceSeparator + "resource_tracker" responsesNamespace = constants.PlatformName + metric.NamespaceSeparator + "responses" + rpcchainvmNamespace = constants.PlatformName + metric.NamespaceSeparator + "rpcchainvm" systemResourcesNamespace = constants.PlatformName + metric.NamespaceSeparator + "system_resources" ) @@ -1262,6 +1263,11 @@ func (n *Node) initVMs() error { // initialize vm runtime manager n.runtimeManager = runtime.NewManager() + rpcchainvmMetricsGatherer := metrics.NewLabelGatherer(chains.ChainLabel) + if err := n.MetricsGatherer.Register(rpcchainvmNamespace, rpcchainvmMetricsGatherer); err != nil { + return err + } + // initialize the vm registry n.VMRegistry = registry.NewVMRegistry(registry.VMRegistryConfig{ VMGetter: registry.NewVMGetter(registry.VMGetterConfig{ @@ -1270,6 +1276,7 @@ func (n *Node) initVMs() error { PluginDirectory: n.Config.PluginDir, CPUTracker: n.resourceManager, RuntimeTracker: n.runtimeManager, + MetricsGatherer: rpcchainvmMetricsGatherer, }), VMManager: n.VMManager, }) diff --git a/vms/registry/vm_getter.go b/vms/registry/vm_getter.go index 826624744e38..eaaa8eebac7b 100644 --- a/vms/registry/vm_getter.go +++ b/vms/registry/vm_getter.go @@ -8,6 +8,7 @@ import ( "fmt" "path/filepath" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/filesystem" "github.com/ava-labs/avalanchego/utils/resource" @@ -40,6 +41,7 @@ type VMGetterConfig struct { PluginDirectory string CPUTracker resource.ProcessTracker RuntimeTracker runtime.Tracker + MetricsGatherer metrics.MultiGatherer } type vmGetter struct { @@ -103,6 +105,7 @@ func (getter *vmGetter) Get() (map[ids.ID]vms.Factory, map[ids.ID]vms.Factory, e filepath.Join(getter.config.PluginDirectory, file.Name()), getter.config.CPUTracker, getter.config.RuntimeTracker, + getter.config.MetricsGatherer, ) } return registeredVMs, unregisteredVMs, nil diff --git a/vms/rpcchainvm/batched_vm_test.go b/vms/rpcchainvm/batched_vm_test.go index b24c0699e597..9d6a602283a3 100644 --- a/vms/rpcchainvm/batched_vm_test.go +++ b/vms/rpcchainvm/batched_vm_test.go @@ -81,8 +81,8 @@ func TestBatchedParseBlockCaching(t *testing.T) { testKey := batchedParseBlockCachingTestKey // Create and start the plugin - vm, stopper := buildClientHelper(require, testKey) - defer stopper.Stop(context.Background()) + vm := buildClientHelper(require, testKey) + defer vm.runtime.Stop(context.Background()) ctx := snowtest.Context(t, snowtest.CChainID) diff --git a/vms/rpcchainvm/factory.go b/vms/rpcchainvm/factory.go index d61c41d11af8..75f9fe97bf69 100644 --- a/vms/rpcchainvm/factory.go +++ b/vms/rpcchainvm/factory.go @@ -7,6 +7,7 @@ import ( "context" "fmt" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/vms" @@ -18,16 +19,23 @@ import ( var _ vms.Factory = (*factory)(nil) type factory struct { - path string - processTracker resource.ProcessTracker - runtimeTracker runtime.Tracker + path string + processTracker resource.ProcessTracker + runtimeTracker runtime.Tracker + metricsGatherer metrics.MultiGatherer } -func NewFactory(path string, processTracker resource.ProcessTracker, runtimeTracker runtime.Tracker) vms.Factory { +func NewFactory( + path string, + processTracker resource.ProcessTracker, + runtimeTracker runtime.Tracker, + metricsGatherer metrics.MultiGatherer, +) vms.Factory { return &factory{ - path: path, - processTracker: processTracker, - runtimeTracker: runtimeTracker, + path: path, + processTracker: processTracker, + runtimeTracker: runtimeTracker, + metricsGatherer: metricsGatherer, } } @@ -59,10 +67,7 @@ func (f *factory) New(log logging.Logger) (interface{}, error) { return nil, err } - vm := NewClient(clientConn) - vm.SetProcess(stopper, status.Pid, f.processTracker) - + f.processTracker.TrackProcess(status.Pid) f.runtimeTracker.TrackRuntime(stopper) - - return vm, nil + return NewClient(clientConn, stopper, status.Pid, f.processTracker, f.metricsGatherer), nil } diff --git a/vms/rpcchainvm/state_syncable_vm_test.go b/vms/rpcchainvm/state_syncable_vm_test.go index 504f628fc90a..9e77daa3799a 100644 --- a/vms/rpcchainvm/state_syncable_vm_test.go +++ b/vms/rpcchainvm/state_syncable_vm_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" @@ -261,7 +262,7 @@ func lastAcceptedBlockPostStateSummaryAcceptTestPlugin(t *testing.T, loadExpecta return ssVM } -func buildClientHelper(require *require.Assertions, testKey string) (*VMClient, runtime.Stopper) { +func buildClientHelper(require *require.Assertions, testKey string) *VMClient { process := helperProcess(testKey) log := logging.NewLogger( @@ -292,7 +293,7 @@ func buildClientHelper(require *require.Assertions, testKey string) (*VMClient, clientConn, err := grpcutils.Dial(status.Addr) require.NoError(err) - return NewClient(clientConn), stopper + return NewClient(clientConn, stopper, status.Pid, nil, metrics.NewPrefixGatherer()) } func TestStateSyncEnabled(t *testing.T) { @@ -300,8 +301,8 @@ func TestStateSyncEnabled(t *testing.T) { testKey := stateSyncEnabledTestKey // Create and start the plugin - vm, stopper := buildClientHelper(require, testKey) - defer stopper.Stop(context.Background()) + vm := buildClientHelper(require, testKey) + defer vm.runtime.Stop(context.Background()) // test state sync not implemented // Note that enabled == false is returned rather than @@ -331,8 +332,8 @@ func TestGetOngoingSyncStateSummary(t *testing.T) { testKey := getOngoingSyncStateSummaryTestKey // Create and start the plugin - vm, stopper := buildClientHelper(require, testKey) - defer stopper.Stop(context.Background()) + vm := buildClientHelper(require, testKey) + defer vm.runtime.Stop(context.Background()) // test unimplemented case; this is just a guard _, err := vm.GetOngoingSyncStateSummary(context.Background()) @@ -356,8 +357,8 @@ func TestGetLastStateSummary(t *testing.T) { testKey := getLastStateSummaryTestKey // Create and start the plugin - vm, stopper := buildClientHelper(require, testKey) - defer stopper.Stop(context.Background()) + vm := buildClientHelper(require, testKey) + defer vm.runtime.Stop(context.Background()) // test unimplemented case; this is just a guard _, err := vm.GetLastStateSummary(context.Background()) @@ -381,8 +382,8 @@ func TestParseStateSummary(t *testing.T) { testKey := parseStateSummaryTestKey // Create and start the plugin - vm, stopper := buildClientHelper(require, testKey) - defer stopper.Stop(context.Background()) + vm := buildClientHelper(require, testKey) + defer vm.runtime.Stop(context.Background()) // test unimplemented case; this is just a guard _, err := vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) @@ -410,8 +411,8 @@ func TestGetStateSummary(t *testing.T) { testKey := getStateSummaryTestKey // Create and start the plugin - vm, stopper := buildClientHelper(require, testKey) - defer stopper.Stop(context.Background()) + vm := buildClientHelper(require, testKey) + defer vm.runtime.Stop(context.Background()) // test unimplemented case; this is just a guard _, err := vm.GetStateSummary(context.Background(), mockedSummary.Height()) @@ -435,8 +436,8 @@ func TestAcceptStateSummary(t *testing.T) { testKey := acceptStateSummaryTestKey // Create and start the plugin - vm, stopper := buildClientHelper(require, testKey) - defer stopper.Stop(context.Background()) + vm := buildClientHelper(require, testKey) + defer vm.runtime.Stop(context.Background()) // retrieve the summary first summary, err := vm.GetStateSummary(context.Background(), mockedSummary.Height()) @@ -465,8 +466,8 @@ func TestLastAcceptedBlockPostStateSummaryAccept(t *testing.T) { testKey := lastAcceptedBlockPostStateSummaryAcceptTestKey // Create and start the plugin - vm, stopper := buildClientHelper(require, testKey) - defer stopper.Stop(context.Background()) + vm := buildClientHelper(require, testKey) + defer vm.runtime.Stop(context.Background()) // Step 1: initialize VM and check initial LastAcceptedBlock ctx := snowtest.Context(t, snowtest.CChainID) diff --git a/vms/rpcchainvm/vm_client.go b/vms/rpcchainvm/vm_client.go index 6e6417725f11..d5e115ca0233 100644 --- a/vms/rpcchainvm/vm_client.go +++ b/vms/rpcchainvm/vm_client.go @@ -85,10 +85,11 @@ var ( // VMClient is an implementation of a VM that talks over RPC. type VMClient struct { *chain.State - client vmpb.VMClient - runtime runtime.Stopper - pid int - processTracker resource.ProcessTracker + client vmpb.VMClient + runtime runtime.Stopper + pid int + processTracker resource.ProcessTracker + metricsGatherer metrics.MultiGatherer messenger *messenger.Server keystore *gkeystore.Server @@ -105,21 +106,23 @@ type VMClient struct { } // NewClient returns a VM connected to a remote VM -func NewClient(clientConn *grpc.ClientConn) *VMClient { +func NewClient( + clientConn *grpc.ClientConn, + runtime runtime.Stopper, + pid int, + processTracker resource.ProcessTracker, + metricsGatherer metrics.MultiGatherer, +) *VMClient { return &VMClient{ - client: vmpb.NewVMClient(clientConn), - conns: []*grpc.ClientConn{clientConn}, + client: vmpb.NewVMClient(clientConn), + runtime: runtime, + pid: pid, + processTracker: processTracker, + metricsGatherer: metricsGatherer, + conns: []*grpc.ClientConn{clientConn}, } } -// SetProcess gives ownership of the server process to the client. -func (vm *VMClient) SetProcess(runtime runtime.Stopper, pid int, processTracker resource.ProcessTracker) { - vm.runtime = runtime - vm.processTracker = processTracker - vm.pid = pid - processTracker.TrackProcess(vm.pid) -} - func (vm *VMClient) Initialize( ctx context.Context, chainCtx *snow.Context, @@ -135,10 +138,16 @@ func (vm *VMClient) Initialize( return errUnsupportedFXs } + primaryAlias, err := chainCtx.BCLookup.PrimaryAlias(chainCtx.ChainID) + if err != nil { + // If fetching the alias fails, we default to the chain's ID + primaryAlias = chainCtx.ChainID.String() + } + // Register metrics serverReg, err := metrics.MakeAndRegister( - chainCtx.Metrics, - "rpcchainvm", + vm.metricsGatherer, + primaryAlias, ) if err != nil { return err @@ -148,7 +157,7 @@ func (vm *VMClient) Initialize( return err } - if err := chainCtx.Metrics.Register("plugin", vm); err != nil { + if err := chainCtx.Metrics.Register("", vm); err != nil { return err } diff --git a/vms/rpcchainvm/with_context_vm_test.go b/vms/rpcchainvm/with_context_vm_test.go index f87a6cba5f0b..3c256392832f 100644 --- a/vms/rpcchainvm/with_context_vm_test.go +++ b/vms/rpcchainvm/with_context_vm_test.go @@ -94,8 +94,8 @@ func TestContextVMSummary(t *testing.T) { testKey := contextTestKey // Create and start the plugin - vm, stopper := buildClientHelper(require, testKey) - defer stopper.Stop(context.Background()) + vm := buildClientHelper(require, testKey) + defer vm.runtime.Stop(context.Background()) ctx := snowtest.Context(t, snowtest.CChainID) From 0bc46174889b81bcb79a33818b0a192c58a7f816 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 3 Jul 2024 20:50:34 -0400 Subject: [PATCH 091/102] Add example reward calculator usage (#3171) --- vms/platformvm/reward/example_test.go | 43 +++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 vms/platformvm/reward/example_test.go diff --git a/vms/platformvm/reward/example_test.go b/vms/platformvm/reward/example_test.go new file mode 100644 index 000000000000..74cccc143161 --- /dev/null +++ b/vms/platformvm/reward/example_test.go @@ -0,0 +1,43 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package reward + +import ( + "fmt" + "time" + + "github.com/ava-labs/avalanchego/utils/units" +) + +func ExampleNewCalculator() { + const ( + day = 24 * time.Hour + week = 7 * day + stakingDuration = 4 * week + + stakeAmount = 100_000 * units.Avax // 100k AVAX + + // The current supply can be fetched with the platform.getCurrentSupply API + currentSupply = 447_903_489_576_595_361 * units.NanoAvax // ~448m AVAX + ) + var ( + mainnetRewardConfig = Config{ + MaxConsumptionRate: .12 * PercentDenominator, + MinConsumptionRate: .10 * PercentDenominator, + MintingPeriod: 365 * 24 * time.Hour, + SupplyCap: 720 * units.MegaAvax, + } + mainnetCalculator = NewCalculator(mainnetRewardConfig) + ) + + potentialReward := mainnetCalculator.Calculate(stakingDuration, stakeAmount, currentSupply) + + fmt.Printf("Staking %d nAVAX for %s with the current supply of %d nAVAX would have a potential reward of %d nAVAX", + stakeAmount, + stakingDuration, + currentSupply, + potentialReward, + ) + // Output: Staking 100000000000000 nAVAX for 672h0m0s with the current supply of 447903489576595361 nAVAX would have a potential reward of 473168956104 nAVAX +} From 45a30f9868714d0afea39d3d948ec1dbbfadfc1c Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Thu, 4 Jul 2024 14:46:57 -0400 Subject: [PATCH 092/102] Send AppErrors from p2p SDK (#2753) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Co-authored-by: Stephen Buttolph --- go.mod | 2 +- go.sum | 4 +- network/p2p/error.go | 33 ++++++++ network/p2p/gossip/handler.go | 16 ++-- network/p2p/handler.go | 18 ++--- network/p2p/handler_test.go | 3 +- network/p2p/network_test.go | 108 +++++++++++++++++++++++--- network/p2p/router.go | 12 ++- network/p2p/throttler_handler.go | 12 +-- network/p2p/throttler_handler_test.go | 3 +- vms/avm/network/gossip.go | 3 +- vms/platformvm/network/gossip.go | 3 +- vms/platformvm/vm_test.go | 3 + 13 files changed, 174 insertions(+), 46 deletions(-) create mode 100644 network/p2p/error.go diff --git a/go.mod b/go.mod index 3c251d2fc54c..4eef73478aa4 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/DataDog/zstd v1.5.2 github.com/NYTimes/gziphandler v1.1.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/coreth v0.13.6-rc.1 + github.com/ava-labs/coreth v0.13.6-rc.1.0.20240702201359-ba2ce5367874 github.com/ava-labs/ledger-avalanche/go v0.0.0-20240610153809-9c955cc90a95 github.com/btcsuite/btcd/btcutil v1.1.3 github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 diff --git a/go.sum b/go.sum index 49e67f0213d6..0642e3c0e5fa 100644 --- a/go.sum +++ b/go.sum @@ -62,8 +62,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/antithesishq/antithesis-sdk-go v0.3.8 h1:OvGoHxIcOXFJLyn9IJQ5DzByZ3YVAWNBc394ObzDRb8= github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/coreth v0.13.6-rc.1 h1:gRXRokmu0WOlPqyx+mTLWB655e8/w++u6qFcq9Mo7qA= -github.com/ava-labs/coreth v0.13.6-rc.1/go.mod h1:vm9T8qzP7RLo/jR2MKkliPfaiGgWeEpu/PG6fvvPmog= +github.com/ava-labs/coreth v0.13.6-rc.1.0.20240702201359-ba2ce5367874 h1:aTDg0jvO07EvUvBYebmLO25bffe1DAaZZPPL0ooGhIA= +github.com/ava-labs/coreth v0.13.6-rc.1.0.20240702201359-ba2ce5367874/go.mod h1:VhNDxZBsqZQQaUTmIkzdyY8UicIsoTDXlRmPaPL9lkA= github.com/ava-labs/ledger-avalanche/go v0.0.0-20240610153809-9c955cc90a95 h1:dOVbtdnZL++pENdTCNZ1nu41eYDQkTML4sWebDnnq8c= github.com/ava-labs/ledger-avalanche/go v0.0.0-20240610153809-9c955cc90a95/go.mod h1:pJxaT9bUgeRNVmNRgtCHb7sFDIRKy7CzTQVi8gGNT6g= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= diff --git a/network/p2p/error.go b/network/p2p/error.go new file mode 100644 index 000000000000..07207319a041 --- /dev/null +++ b/network/p2p/error.go @@ -0,0 +1,33 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import "github.com/ava-labs/avalanchego/snow/engine/common" + +var ( + // ErrUnexpected should be used to indicate that a request failed due to a + // generic error + ErrUnexpected = &common.AppError{ + Code: -1, + Message: "unexpected error", + } + // ErrUnregisteredHandler should be used to indicate that a request failed + // due to it not matching a registered handler + ErrUnregisteredHandler = &common.AppError{ + Code: -2, + Message: "unregistered handler", + } + // ErrNotValidator should be used to indicate that a request failed due to + // the requesting peer not being a validator + ErrNotValidator = &common.AppError{ + Code: -3, + Message: "not a validator", + } + // ErrThrottled should be used to indicate that a request failed due to the + // requesting peer exceeding a rate limit + ErrThrottled = &common.AppError{ + Code: -4, + Message: "throttled", + } +) diff --git a/network/p2p/gossip/handler.go b/network/p2p/gossip/handler.go index 7f5f7b380ed7..1205a5f2b7df 100644 --- a/network/p2p/gossip/handler.go +++ b/network/p2p/gossip/handler.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -43,10 +44,10 @@ type Handler[T Gossipable] struct { targetResponseSize int } -func (h Handler[T]) AppRequest(_ context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, error) { +func (h Handler[T]) AppRequest(_ context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, *common.AppError) { filter, salt, err := ParseAppRequest(requestBytes) if err != nil { - return nil, err + return nil, p2p.ErrUnexpected } responseSize := 0 @@ -73,14 +74,19 @@ func (h Handler[T]) AppRequest(_ context.Context, _ ids.NodeID, _ time.Time, req return responseSize <= h.targetResponseSize }) if err != nil { - return nil, err + return nil, p2p.ErrUnexpected } if err := h.metrics.observeMessage(sentPullLabels, len(gossipBytes), responseSize); err != nil { - return nil, err + return nil, p2p.ErrUnexpected } - return MarshalAppResponse(gossipBytes) + response, err := MarshalAppResponse(gossipBytes) + if err != nil { + return nil, p2p.ErrUnexpected + } + + return response, nil } func (h Handler[_]) AppGossip(_ context.Context, nodeID ids.NodeID, gossipBytes []byte) { diff --git a/network/p2p/handler.go b/network/p2p/handler.go index ed005fb761c0..3afeb3eb9a46 100644 --- a/network/p2p/handler.go +++ b/network/p2p/handler.go @@ -5,7 +5,6 @@ package p2p import ( "context" - "errors" "time" "go.uber.org/zap" @@ -17,8 +16,6 @@ import ( ) var ( - ErrNotValidator = errors.New("not a validator") - _ Handler = (*NoOpHandler)(nil) _ Handler = (*TestHandler)(nil) _ Handler = (*ValidatorHandler)(nil) @@ -33,13 +30,14 @@ type Handler interface { gossipBytes []byte, ) // AppRequest is called when handling an AppRequest message. - // Returns the bytes for the response corresponding to [requestBytes] + // Sends a response with the response corresponding to [requestBytes] or + // an application-defined error. AppRequest( ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte, - ) ([]byte, error) + ) ([]byte, *common.AppError) // CrossChainAppRequest is called when handling a CrossChainAppRequest // message. // Returns the bytes for the response corresponding to [requestBytes] @@ -56,7 +54,7 @@ type NoOpHandler struct{} func (NoOpHandler) AppGossip(context.Context, ids.NodeID, []byte) {} -func (NoOpHandler) AppRequest(context.Context, ids.NodeID, time.Time, []byte) ([]byte, error) { +func (NoOpHandler) AppRequest(context.Context, ids.NodeID, time.Time, []byte) ([]byte, *common.AppError) { return nil, nil } @@ -95,7 +93,7 @@ func (v ValidatorHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, goss v.handler.AppGossip(ctx, nodeID, gossipBytes) } -func (v ValidatorHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { +func (v ValidatorHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) { if !v.validatorSet.Has(ctx, nodeID) { return nil, ErrNotValidator } @@ -128,7 +126,7 @@ func (r *responder) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID zap.Binary("message", request), zap.Error(err), ) - return nil + return r.sender.SendAppError(ctx, nodeID, requestID, err.Code, err.Message) } return r.sender.SendAppResponse(ctx, nodeID, requestID, appResponse) @@ -155,7 +153,7 @@ func (r *responder) CrossChainAppRequest(ctx context.Context, chainID ids.ID, re type TestHandler struct { AppGossipF func(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) - AppRequestF func(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) + AppRequestF func(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) CrossChainAppRequestF func(ctx context.Context, chainID ids.ID, deadline time.Time, requestBytes []byte) ([]byte, error) } @@ -167,7 +165,7 @@ func (t TestHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipByt t.AppGossipF(ctx, nodeID, gossipBytes) } -func (t TestHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { +func (t TestHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) { if t.AppRequestF == nil { return nil, nil } diff --git a/network/p2p/handler_test.go b/network/p2p/handler_test.go index 0633b70f00a8..f933215ca6ee 100644 --- a/network/p2p/handler_test.go +++ b/network/p2p/handler_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" ) @@ -79,7 +80,7 @@ func TestValidatorHandlerAppRequest(t *testing.T) { name string validatorSet ValidatorSet nodeID ids.NodeID - expected error + expected *common.AppError }{ { name: "message dropped", diff --git a/network/p2p/network_test.go b/network/p2p/network_test.go index 5339a6eeb315..5346a9255a00 100644 --- a/network/p2p/network_test.go +++ b/network/p2p/network_test.go @@ -43,7 +43,7 @@ func TestMessageRouting(t *testing.T) { require.Equal(wantNodeID, nodeID) require.Equal(wantMsg, msg) }, - AppRequestF: func(_ context.Context, nodeID ids.NodeID, _ time.Time, msg []byte) ([]byte, error) { + AppRequestF: func(_ context.Context, nodeID ids.NodeID, _ time.Time, msg []byte) ([]byte, *common.AppError) { appRequestCalled = true require.Equal(wantNodeID, nodeID) require.Equal(wantMsg, msg) @@ -352,7 +352,7 @@ func TestCrossChainAppRequestFailed(t *testing.T) { } // Messages for unregistered handlers should be dropped gracefully -func TestMessageForUnregisteredHandler(t *testing.T) { +func TestAppGossipMessageForUnregisteredHandler(t *testing.T) { tests := []struct { name string msg []byte @@ -379,26 +379,110 @@ func TestMessageForUnregisteredHandler(t *testing.T) { AppGossipF: func(context.Context, ids.NodeID, []byte) { require.Fail("should not be called") }, - AppRequestF: func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, error) { - require.Fail("should not be called") - return nil, nil - }, - CrossChainAppRequestF: func(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { + } + network, err := NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(err) + require.NoError(network.AddHandler(handlerID, handler)) + require.NoError(network.AppGossip(ctx, ids.EmptyNodeID, tt.msg)) + }) + } +} + +// An unregistered handler should gracefully drop messages by responding +// to the requester with a common.AppError +func TestAppRequestMessageForUnregisteredHandler(t *testing.T) { + tests := []struct { + name string + msg []byte + }{ + { + name: "nil", + msg: nil, + }, + { + name: "empty", + msg: []byte{}, + }, + { + name: "non-empty", + msg: []byte("foobar"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctx := context.Background() + handler := &TestHandler{ + AppRequestF: func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, *common.AppError) { require.Fail("should not be called") return nil, nil }, } - network, err := NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + + wantNodeID := ids.GenerateTestNodeID() + wantRequestID := uint32(111) + + done := make(chan struct{}) + sender := &common.SenderTest{} + sender.SendAppErrorF = func(_ context.Context, nodeID ids.NodeID, requestID uint32, errorCode int32, errorMessage string) error { + defer close(done) + + require.Equal(wantNodeID, nodeID) + require.Equal(wantRequestID, requestID) + require.Equal(ErrUnregisteredHandler.Code, errorCode) + require.Equal(ErrUnregisteredHandler.Message, errorMessage) + + return nil + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") require.NoError(err) require.NoError(network.AddHandler(handlerID, handler)) - require.NoError(network.AppRequest(ctx, ids.EmptyNodeID, 0, time.Time{}, tt.msg)) - require.NoError(network.AppGossip(ctx, ids.EmptyNodeID, tt.msg)) - require.NoError(network.CrossChainAppRequest(ctx, ids.Empty, 0, time.Time{}, tt.msg)) + require.NoError(network.AppRequest(ctx, wantNodeID, wantRequestID, time.Time{}, tt.msg)) + <-done }) } } +// A handler that errors should send an AppError to the requesting peer +func TestAppError(t *testing.T) { + require := require.New(t) + ctx := context.Background() + appError := &common.AppError{ + Code: 123, + Message: "foo", + } + handler := &TestHandler{ + AppRequestF: func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, *common.AppError) { + return nil, appError + }, + } + + wantNodeID := ids.GenerateTestNodeID() + wantRequestID := uint32(111) + + done := make(chan struct{}) + sender := &common.SenderTest{} + sender.SendAppErrorF = func(_ context.Context, nodeID ids.NodeID, requestID uint32, errorCode int32, errorMessage string) error { + defer close(done) + + require.Equal(wantNodeID, nodeID) + require.Equal(wantRequestID, requestID) + require.Equal(appError.Code, errorCode) + require.Equal(appError.Message, errorMessage) + + return nil + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + require.NoError(network.AddHandler(handlerID, handler)) + msg := PrefixMessage(ProtocolPrefix(handlerID), []byte("message")) + + require.NoError(network.AppRequest(ctx, wantNodeID, wantRequestID, time.Time{}, msg)) + <-done +} + // A response or timeout for a request we never made should return an error func TestResponseForUnrequestedRequest(t *testing.T) { tests := []struct { @@ -427,7 +511,7 @@ func TestResponseForUnrequestedRequest(t *testing.T) { AppGossipF: func(context.Context, ids.NodeID, []byte) { require.Fail("should not be called") }, - AppRequestF: func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, error) { + AppRequestF: func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, *common.AppError) { require.Fail("should not be called") return nil, nil }, diff --git a/network/p2p/router.go b/network/p2p/router.go index 6d4c7efe4d47..8f099ddb0b64 100644 --- a/network/p2p/router.go +++ b/network/p2p/router.go @@ -128,14 +128,18 @@ func (r *router) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID ui start := time.Now() parsedMsg, handler, handlerID, ok := r.parse(request) if !ok { - r.log.Debug("failed to process message", + r.log.Debug("received message for unregistered handler", zap.Stringer("messageOp", message.AppRequestOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Time("deadline", deadline), zap.Binary("message", request), ) - return nil + + // Send an error back to the requesting peer. Invalid requests that we + // cannot parse a handler id for are handled the same way as requests + // for which we do not have a registered handler. + return r.sender.SendAppError(ctx, nodeID, requestID, ErrUnregisteredHandler.Code, ErrUnregisteredHandler.Message) } // call the corresponding handler and send back a response to nodeID @@ -209,7 +213,7 @@ func (r *router) AppGossip(ctx context.Context, nodeID ids.NodeID, gossip []byte start := time.Now() parsedMsg, handler, handlerID, ok := r.parse(gossip) if !ok { - r.log.Debug("failed to process message", + r.log.Debug("received message for unregistered handler", zap.Stringer("messageOp", message.AppGossipOp), zap.Stringer("nodeID", nodeID), zap.Binary("message", gossip), @@ -244,7 +248,7 @@ func (r *router) CrossChainAppRequest( start := time.Now() parsedMsg, handler, handlerID, ok := r.parse(msg) if !ok { - r.log.Debug("failed to process message", + r.log.Debug("received message for unregistered handler", zap.Stringer("messageOp", message.CrossChainAppRequestOp), zap.Stringer("chainID", chainID), zap.Uint32("requestID", requestID), diff --git a/network/p2p/throttler_handler.go b/network/p2p/throttler_handler.go index df0200482ef6..2718f733223c 100644 --- a/network/p2p/throttler_handler.go +++ b/network/p2p/throttler_handler.go @@ -5,20 +5,16 @@ package p2p import ( "context" - "errors" - "fmt" "time" "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" ) -var ( - ErrThrottled = errors.New("throttled") - _ Handler = (*ThrottlerHandler)(nil) -) +var _ Handler = (*ThrottlerHandler)(nil) func NewThrottlerHandler(handler Handler, throttler Throttler, log logging.Logger) *ThrottlerHandler { return &ThrottlerHandler{ @@ -46,9 +42,9 @@ func (t ThrottlerHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, goss t.handler.AppGossip(ctx, nodeID, gossipBytes) } -func (t ThrottlerHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { +func (t ThrottlerHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) { if !t.throttler.Handle(nodeID) { - return nil, fmt.Errorf("dropping message from %s: %w", nodeID, ErrThrottled) + return nil, ErrThrottled } return t.handler.AppRequest(ctx, nodeID, deadline, requestBytes) diff --git a/network/p2p/throttler_handler_test.go b/network/p2p/throttler_handler_test.go index 1f5a07069d8e..52dd964f1013 100644 --- a/network/p2p/throttler_handler_test.go +++ b/network/p2p/throttler_handler_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -57,7 +58,7 @@ func TestThrottlerHandlerAppRequest(t *testing.T) { tests := []struct { name string Throttler Throttler - expectedErr error + expectedErr *common.AppError }{ { name: "not throttled", diff --git a/vms/avm/network/gossip.go b/vms/avm/network/gossip.go index 6ea5f1448485..131cc51688fa 100644 --- a/vms/avm/network/gossip.go +++ b/vms/avm/network/gossip.go @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/network/p2p/gossip" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/txs/mempool" @@ -51,7 +52,7 @@ func (t txGossipHandler) AppRequest( nodeID ids.NodeID, deadline time.Time, requestBytes []byte, -) ([]byte, error) { +) ([]byte, *common.AppError) { return t.appRequestHandler.AppRequest(ctx, nodeID, deadline, requestBytes) } diff --git a/vms/platformvm/network/gossip.go b/vms/platformvm/network/gossip.go index c8cfefceed48..7e6e7adc341c 100644 --- a/vms/platformvm/network/gossip.go +++ b/vms/platformvm/network/gossip.go @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/network/p2p/gossip" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/txs/mempool" @@ -51,7 +52,7 @@ func (t txGossipHandler) AppRequest( nodeID ids.NodeID, deadline time.Time, requestBytes []byte, -) ([]byte, error) { +) ([]byte, *common.AppError) { return t.appRequestHandler.AppRequest(ctx, nodeID, deadline, requestBytes) } diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 24fa707a8e32..9153ee6548bf 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -291,6 +291,9 @@ func defaultVM(t *testing.T, f fork) (*VM, *txstest.WalletFactory, database.Data appSender.SendAppGossipF = func(context.Context, common.SendConfig, []byte) error { return nil } + appSender.SendAppErrorF = func(context.Context, ids.NodeID, uint32, int32, string) error { + return nil + } dynamicConfigBytes := []byte(`{"network":{"max-validator-set-staleness":0}}`) require.NoError(vm.Initialize( From 2af3890f79d49aacd2de4c090433fdf614398dc9 Mon Sep 17 00:00:00 2001 From: Arran Schlosberg <519948+ARR4N@users.noreply.github.com> Date: Mon, 8 Jul 2024 17:41:21 +0100 Subject: [PATCH 093/102] build(tests): require `//go:build test` tag if importing test packages outside of `_test.go` files (#3173) --- .golangci.yml | 5 ++++ .vscode/settings.json | 11 ++++++++ cache/test_cacher.go | 2 ++ chains/atomic/test_shared_memory.go | 2 ++ codec/test_codec.go | 2 ++ database/benchmark_database.go | 2 ++ database/test_database.go | 2 ++ ids/test_aliases.go | 2 ++ scripts/build_fuzz.sh | 2 +- scripts/build_test.sh | 2 +- scripts/lint.sh | 27 ++++++++++++++++++- scripts/tests.e2e.existing.sh | 2 +- scripts/tests.e2e.sh | 4 +-- scripts/tests.upgrade.sh | 2 +- snow/consensus/snowball/test_snowflake.go | 2 ++ .../avalanche/bootstrap/queue/test_job.go | 2 ++ .../avalanche/bootstrap/queue/test_parser.go | 2 ++ snow/engine/avalanche/vertex/test_builder.go | 2 ++ snow/engine/avalanche/vertex/test_manager.go | 2 ++ snow/engine/avalanche/vertex/test_parser.go | 2 ++ snow/engine/avalanche/vertex/test_storage.go | 2 ++ snow/engine/avalanche/vertex/test_vm.go | 2 ++ snow/engine/common/test_bootstrap_tracker.go | 2 ++ snow/engine/common/test_bootstrapper.go | 2 ++ snow/engine/common/test_engine.go | 2 ++ snow/engine/common/test_sender.go | 2 ++ snow/engine/common/test_timer.go | 2 ++ snow/engine/common/test_vm.go | 2 ++ snow/engine/snowman/block/test_batched_vm.go | 2 ++ .../snowman/block/test_state_summary.go | 2 ++ .../snowman/block/test_state_syncable_vm.go | 2 ++ snow/engine/snowman/block/test_vm.go | 2 ++ snow/networking/benchlist/test_benchable.go | 2 ++ .../networking/sender/test_external_sender.go | 2 ++ snow/snowtest/snowtest.go | 2 ++ snow/validators/test_state.go | 3 +++ tests/e2e/banff/suites.go | 2 ++ tests/e2e/c/dynamic_fees.go | 2 ++ tests/e2e/c/interchain_workflow.go | 2 ++ tests/e2e/faultinjection/duplicate_node_id.go | 2 ++ tests/e2e/p/interchain_workflow.go | 2 ++ tests/e2e/p/permissionless_subnets.go | 2 ++ tests/e2e/p/staking_rewards.go | 2 ++ tests/e2e/p/validator_sets.go | 2 ++ tests/e2e/p/workflow.go | 2 ++ tests/e2e/vms/xsvm.go | 2 ++ tests/e2e/x/interchain_workflow.go | 2 ++ tests/e2e/x/transfer/virtuous.go | 2 ++ tests/fixture/e2e/env.go | 2 ++ tests/fixture/e2e/helpers.go | 2 ++ vms/platformvm/warp/test_signer.go | 2 ++ wallet/subnet/primary/common/test_utxos.go | 2 ++ 52 files changed, 137 insertions(+), 7 deletions(-) create mode 100644 .vscode/settings.json diff --git a/.golangci.yml b/.golangci.yml index a1991abd29aa..1809a5fa6609 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -15,6 +15,11 @@ run: # By default, it isn't set. modules-download-mode: readonly + # Include non-test files tagged as test-only. + # Context: https://github.com/ava-labs/avalanchego/pull/3173 + build-tags: + - test + output: # Make issues output unique by line. # Default: true diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000000..0abd38ef2982 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,11 @@ +{ + "gopls": { + "build.buildFlags": [ + // Context: https://github.com/ava-labs/avalanchego/pull/3173 + // Without this tag, the language server won't build the test-only + // code in non-_test.go files. + "--tags='test'", + ], + }, + "go.testTags": "test", +} \ No newline at end of file diff --git a/cache/test_cacher.go b/cache/test_cacher.go index 2e85502e4a55..35b1ad9c1711 100644 --- a/cache/test_cacher.go +++ b/cache/test_cacher.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package cache import ( diff --git a/chains/atomic/test_shared_memory.go b/chains/atomic/test_shared_memory.go index 82b1cbeff3a5..f1c7b44ae30c 100644 --- a/chains/atomic/test_shared_memory.go +++ b/chains/atomic/test_shared_memory.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package atomic import ( diff --git a/codec/test_codec.go b/codec/test_codec.go index 2dc8b3e2add9..974168dd4829 100644 --- a/codec/test_codec.go +++ b/codec/test_codec.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package codec import ( diff --git a/database/benchmark_database.go b/database/benchmark_database.go index 43af10db1c2b..479035c126cb 100644 --- a/database/benchmark_database.go +++ b/database/benchmark_database.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package database import ( diff --git a/database/test_database.go b/database/test_database.go index e35a98dca36f..4e5bc174ae85 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package database import ( diff --git a/ids/test_aliases.go b/ids/test_aliases.go index ce9991f5f737..3943fce16023 100644 --- a/ids/test_aliases.go +++ b/ids/test_aliases.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package ids import "github.com/stretchr/testify/require" diff --git a/scripts/build_fuzz.sh b/scripts/build_fuzz.sh index 49378e2e0878..520b244bc631 100755 --- a/scripts/build_fuzz.sh +++ b/scripts/build_fuzz.sh @@ -28,7 +28,7 @@ do echo "Fuzzing $func in $file" parentDir=$(dirname "$file") # If any of the fuzz tests fail, return exit code 1 - if ! go test "$parentDir" -run="$func" -fuzz="$func" -fuzztime="${fuzzTime}"s; then + if ! go test -tags test "$parentDir" -run="$func" -fuzz="$func" -fuzztime="${fuzzTime}"s; then failed=true fi done diff --git a/scripts/build_test.sh b/scripts/build_test.sh index c0c9b72e3230..4a7cbd04f746 100755 --- a/scripts/build_test.sh +++ b/scripts/build_test.sh @@ -18,4 +18,4 @@ fi TEST_TARGETS="$(eval "go list ./... ${EXCLUDED_TARGETS}")" # shellcheck disable=SC2086 -go test -shuffle=on -race -timeout="${TIMEOUT:-120s}" -coverprofile="coverage.out" -covermode="atomic" ${TEST_TARGETS} +go test -tags test -shuffle=on -race -timeout="${TIMEOUT:-120s}" -coverprofile="coverage.out" -covermode="atomic" ${TEST_TARGETS} diff --git a/scripts/lint.sh b/scripts/lint.sh index 9fb23ae325be..7f2111d6f96e 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -29,7 +29,7 @@ fi # by default, "./scripts/lint.sh" runs all lint tests # to run only "license_header" test # TESTS='license_header' ./scripts/lint.sh -TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_no_error_inline_func"} +TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_no_error_inline_func import_testing_only_in_tests"} function test_golangci_lint { go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.58.1 @@ -86,6 +86,31 @@ function test_interface_compliance_nil { fi } +function test_import_testing_only_in_tests { + ROOT=$( git rev-parse --show-toplevel ) + NON_TEST_GO_FILES=$( find "${ROOT}" -iname '*.go' ! -iname '*_test.go'); + + IMPORT_TESTING=$( echo "${NON_TEST_GO_FILES}" | xargs grep -lP '^\s*(import\s+)?"testing"'); + IMPORT_TESTIFY=$( echo "${NON_TEST_GO_FILES}" | xargs grep -l '"github.com/stretchr/testify'); + # TODO(arr4n): send a PR to add support for build tags in `mockgen` and then enable this. + # IMPORT_GOMOCK=$( echo "${NON_TEST_GO_FILES}" | xargs grep -l '"go.uber.org/mock'); + HAVE_TEST_LOGIC=$( printf "%s\n%s" "${IMPORT_TESTING}" "${IMPORT_TESTIFY}" ); + + TAGGED_AS_TEST=$( echo "${NON_TEST_GO_FILES}" | xargs grep -lP '^\/\/go:build\s+(.+(,|\s+))?test[,\s]?'); + + # -3 suppresses files that have test logic and have the "test" build tag + # -2 suppresses files that are tagged despite not having detectable test logic + UNTAGGED=$( comm -23 <( echo "${HAVE_TEST_LOGIC}" | sort -u ) <( echo "${TAGGED_AS_TEST}" | sort -u ) ); + if [ -z "${UNTAGGED}" ]; + then + return 0; + fi + + echo "Non-test Go files importing test-only packages MUST have '//go:build test' tag:"; + echo "${UNTAGGED}"; + return 1; +} + function run { local test="${1}" shift 1 diff --git a/scripts/tests.e2e.existing.sh b/scripts/tests.e2e.existing.sh index 4b28fc1ad271..dbe41ec003d1 100755 --- a/scripts/tests.e2e.existing.sh +++ b/scripts/tests.e2e.existing.sh @@ -22,7 +22,7 @@ function print_separator { function cleanup { print_separator echo "cleaning up reusable network" - ginkgo -v ./tests/e2e/e2e.test -- --stop-network + ginkgo -v --tags test ./tests/e2e/e2e.test -- --stop-network } trap cleanup EXIT diff --git a/scripts/tests.e2e.sh b/scripts/tests.e2e.sh index 564116b40fda..efc1889af5ac 100755 --- a/scripts/tests.e2e.sh +++ b/scripts/tests.e2e.sh @@ -23,7 +23,7 @@ source ./scripts/constants.sh echo "building e2e.test" # to install the ginkgo binary (required for test build and run) go install -v github.com/onsi/ginkgo/v2/ginkgo@v2.13.1 -ACK_GINKGO_RC=true ginkgo build ./tests/e2e +ACK_GINKGO_RC=true ginkgo build --tags test ./tests/e2e ./tests/e2e/e2e.test --help # Enable subnet testing by building xsvm @@ -57,4 +57,4 @@ fi ################################# # - Execute in random order to identify unwanted dependency -ginkgo ${GINKGO_ARGS} -v --randomize-all ./tests/e2e/e2e.test -- "${E2E_ARGS[@]}" "${@}" +ginkgo ${GINKGO_ARGS} -v --tags test --randomize-all ./tests/e2e/e2e.test -- "${E2E_ARGS[@]}" "${@}" diff --git a/scripts/tests.upgrade.sh b/scripts/tests.upgrade.sh index 8cc158d87d49..1adab23248b9 100755 --- a/scripts/tests.upgrade.sh +++ b/scripts/tests.upgrade.sh @@ -66,7 +66,7 @@ source ./scripts/constants.sh echo "building upgrade.test" # to install the ginkgo binary (required for test build and run) go install -v github.com/onsi/ginkgo/v2/ginkgo@v2.13.1 -ACK_GINKGO_RC=true ginkgo build ./tests/upgrade +ACK_GINKGO_RC=true ginkgo build --tags test ./tests/upgrade ./tests/upgrade/upgrade.test --help ################################# diff --git a/snow/consensus/snowball/test_snowflake.go b/snow/consensus/snowball/test_snowflake.go index 78ce95b27e3d..8688ee82c09b 100644 --- a/snow/consensus/snowball/test_snowflake.go +++ b/snow/consensus/snowball/test_snowflake.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package snowball import "testing" diff --git a/snow/engine/avalanche/bootstrap/queue/test_job.go b/snow/engine/avalanche/bootstrap/queue/test_job.go index 91a370b96d81..8884ee0a2357 100644 --- a/snow/engine/avalanche/bootstrap/queue/test_job.go +++ b/snow/engine/avalanche/bootstrap/queue/test_job.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package queue import ( diff --git a/snow/engine/avalanche/bootstrap/queue/test_parser.go b/snow/engine/avalanche/bootstrap/queue/test_parser.go index 1cc1cfd2973f..2787973d267e 100644 --- a/snow/engine/avalanche/bootstrap/queue/test_parser.go +++ b/snow/engine/avalanche/bootstrap/queue/test_parser.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package queue import ( diff --git a/snow/engine/avalanche/vertex/test_builder.go b/snow/engine/avalanche/vertex/test_builder.go index 534629372249..90d603fc10a9 100644 --- a/snow/engine/avalanche/vertex/test_builder.go +++ b/snow/engine/avalanche/vertex/test_builder.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package vertex import ( diff --git a/snow/engine/avalanche/vertex/test_manager.go b/snow/engine/avalanche/vertex/test_manager.go index 6954161cdd46..22bb6ce696eb 100644 --- a/snow/engine/avalanche/vertex/test_manager.go +++ b/snow/engine/avalanche/vertex/test_manager.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package vertex import "testing" diff --git a/snow/engine/avalanche/vertex/test_parser.go b/snow/engine/avalanche/vertex/test_parser.go index 2ee10add6090..d3c2b0b8d8a2 100644 --- a/snow/engine/avalanche/vertex/test_parser.go +++ b/snow/engine/avalanche/vertex/test_parser.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package vertex import ( diff --git a/snow/engine/avalanche/vertex/test_storage.go b/snow/engine/avalanche/vertex/test_storage.go index 8e0b8bc1e84d..3e00dbdda879 100644 --- a/snow/engine/avalanche/vertex/test_storage.go +++ b/snow/engine/avalanche/vertex/test_storage.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package vertex import ( diff --git a/snow/engine/avalanche/vertex/test_vm.go b/snow/engine/avalanche/vertex/test_vm.go index ee17c8b13ae0..49948b81d516 100644 --- a/snow/engine/avalanche/vertex/test_vm.go +++ b/snow/engine/avalanche/vertex/test_vm.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package vertex import ( diff --git a/snow/engine/common/test_bootstrap_tracker.go b/snow/engine/common/test_bootstrap_tracker.go index 2e940f1a43b1..59aea61c5549 100644 --- a/snow/engine/common/test_bootstrap_tracker.go +++ b/snow/engine/common/test_bootstrap_tracker.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package common import ( diff --git a/snow/engine/common/test_bootstrapper.go b/snow/engine/common/test_bootstrapper.go index 259fcb07fb3e..ae1aef57abdf 100644 --- a/snow/engine/common/test_bootstrapper.go +++ b/snow/engine/common/test_bootstrapper.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package common import ( diff --git a/snow/engine/common/test_engine.go b/snow/engine/common/test_engine.go index e07352d43713..80470fa0509c 100644 --- a/snow/engine/common/test_engine.go +++ b/snow/engine/common/test_engine.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package common import ( diff --git a/snow/engine/common/test_sender.go b/snow/engine/common/test_sender.go index e3cb44165c54..9f4e5689751d 100644 --- a/snow/engine/common/test_sender.go +++ b/snow/engine/common/test_sender.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package common import ( diff --git a/snow/engine/common/test_timer.go b/snow/engine/common/test_timer.go index 6da0d9251712..c189dd15c237 100644 --- a/snow/engine/common/test_timer.go +++ b/snow/engine/common/test_timer.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package common import ( diff --git a/snow/engine/common/test_vm.go b/snow/engine/common/test_vm.go index 828b49f5e1fe..bbad058e13ff 100644 --- a/snow/engine/common/test_vm.go +++ b/snow/engine/common/test_vm.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package common import ( diff --git a/snow/engine/snowman/block/test_batched_vm.go b/snow/engine/snowman/block/test_batched_vm.go index e5d654ec4a87..92e766b86137 100644 --- a/snow/engine/snowman/block/test_batched_vm.go +++ b/snow/engine/snowman/block/test_batched_vm.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package block import ( diff --git a/snow/engine/snowman/block/test_state_summary.go b/snow/engine/snowman/block/test_state_summary.go index 7287cff10120..5921acb3e8dc 100644 --- a/snow/engine/snowman/block/test_state_summary.go +++ b/snow/engine/snowman/block/test_state_summary.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package block import ( diff --git a/snow/engine/snowman/block/test_state_syncable_vm.go b/snow/engine/snowman/block/test_state_syncable_vm.go index f1eeb9606642..2bf5fe425941 100644 --- a/snow/engine/snowman/block/test_state_syncable_vm.go +++ b/snow/engine/snowman/block/test_state_syncable_vm.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package block import ( diff --git a/snow/engine/snowman/block/test_vm.go b/snow/engine/snowman/block/test_vm.go index 503d3f9d4851..cdbeabacc4f1 100644 --- a/snow/engine/snowman/block/test_vm.go +++ b/snow/engine/snowman/block/test_vm.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package block import ( diff --git a/snow/networking/benchlist/test_benchable.go b/snow/networking/benchlist/test_benchable.go index dabfab564829..57a611a3765a 100644 --- a/snow/networking/benchlist/test_benchable.go +++ b/snow/networking/benchlist/test_benchable.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package benchlist import ( diff --git a/snow/networking/sender/test_external_sender.go b/snow/networking/sender/test_external_sender.go index 3d5e688492b9..da91edb54db1 100644 --- a/snow/networking/sender/test_external_sender.go +++ b/snow/networking/sender/test_external_sender.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package sender import ( diff --git a/snow/snowtest/snowtest.go b/snow/snowtest/snowtest.go index 3cacc8e873bf..be08ae75b21e 100644 --- a/snow/snowtest/snowtest.go +++ b/snow/snowtest/snowtest.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package snowtest import ( diff --git a/snow/validators/test_state.go b/snow/validators/test_state.go index ee4102cf7194..378f3e8a8780 100644 --- a/snow/validators/test_state.go +++ b/snow/validators/test_state.go @@ -1,6 +1,9 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +// TODO: https://github.com/ava-labs/avalanchego/issues/3174 +//go:build test || !test + package validators import ( diff --git a/tests/e2e/banff/suites.go b/tests/e2e/banff/suites.go index b6da324c98ea..1f61eaa7ad92 100644 --- a/tests/e2e/banff/suites.go +++ b/tests/e2e/banff/suites.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + // Implements tests for the banff network upgrade. package banff diff --git a/tests/e2e/c/dynamic_fees.go b/tests/e2e/c/dynamic_fees.go index c3dda77b985c..8a65ca910667 100644 --- a/tests/e2e/c/dynamic_fees.go +++ b/tests/e2e/c/dynamic_fees.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package c import ( diff --git a/tests/e2e/c/interchain_workflow.go b/tests/e2e/c/interchain_workflow.go index bfb342818a5f..89330ccd68a7 100644 --- a/tests/e2e/c/interchain_workflow.go +++ b/tests/e2e/c/interchain_workflow.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package c import ( diff --git a/tests/e2e/faultinjection/duplicate_node_id.go b/tests/e2e/faultinjection/duplicate_node_id.go index 288583c7e09b..13daafad2b86 100644 --- a/tests/e2e/faultinjection/duplicate_node_id.go +++ b/tests/e2e/faultinjection/duplicate_node_id.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package faultinjection import ( diff --git a/tests/e2e/p/interchain_workflow.go b/tests/e2e/p/interchain_workflow.go index 548c82ac1211..c74a7d6757eb 100644 --- a/tests/e2e/p/interchain_workflow.go +++ b/tests/e2e/p/interchain_workflow.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package p import ( diff --git a/tests/e2e/p/permissionless_subnets.go b/tests/e2e/p/permissionless_subnets.go index dc92bdd60d5c..4e91569e9256 100644 --- a/tests/e2e/p/permissionless_subnets.go +++ b/tests/e2e/p/permissionless_subnets.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package p import ( diff --git a/tests/e2e/p/staking_rewards.go b/tests/e2e/p/staking_rewards.go index e988cf43c2c4..7e8178c53a32 100644 --- a/tests/e2e/p/staking_rewards.go +++ b/tests/e2e/p/staking_rewards.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package p import ( diff --git a/tests/e2e/p/validator_sets.go b/tests/e2e/p/validator_sets.go index a3f3e1e9f075..6ecbe42d1452 100644 --- a/tests/e2e/p/validator_sets.go +++ b/tests/e2e/p/validator_sets.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package p import ( diff --git a/tests/e2e/p/workflow.go b/tests/e2e/p/workflow.go index 3708c6b82c0a..c4e597d27263 100644 --- a/tests/e2e/p/workflow.go +++ b/tests/e2e/p/workflow.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package p import ( diff --git a/tests/e2e/vms/xsvm.go b/tests/e2e/vms/xsvm.go index 5d3557acd405..b7ac0dc0b0b9 100644 --- a/tests/e2e/vms/xsvm.go +++ b/tests/e2e/vms/xsvm.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package vms import ( diff --git a/tests/e2e/x/interchain_workflow.go b/tests/e2e/x/interchain_workflow.go index ecc52f41f032..c096b6e39228 100644 --- a/tests/e2e/x/interchain_workflow.go +++ b/tests/e2e/x/interchain_workflow.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package x import ( diff --git a/tests/e2e/x/transfer/virtuous.go b/tests/e2e/x/transfer/virtuous.go index 58a0351ba123..5e6ed2a90b98 100644 --- a/tests/e2e/x/transfer/virtuous.go +++ b/tests/e2e/x/transfer/virtuous.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + // Implements X-chain transfer tests. package transfer diff --git a/tests/fixture/e2e/env.go b/tests/fixture/e2e/env.go index 05fbbd97ac86..f56b9e9bce03 100644 --- a/tests/fixture/e2e/env.go +++ b/tests/fixture/e2e/env.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package e2e import ( diff --git a/tests/fixture/e2e/helpers.go b/tests/fixture/e2e/helpers.go index 6f6e5382dc7a..9bc773cbd6c1 100644 --- a/tests/fixture/e2e/helpers.go +++ b/tests/fixture/e2e/helpers.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package e2e import ( diff --git a/vms/platformvm/warp/test_signer.go b/vms/platformvm/warp/test_signer.go index e30423edf1ed..3cd3802af3f9 100644 --- a/vms/platformvm/warp/test_signer.go +++ b/vms/platformvm/warp/test_signer.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package warp import ( diff --git a/wallet/subnet/primary/common/test_utxos.go b/wallet/subnet/primary/common/test_utxos.go index 094c57d53705..0ab6b6decb8f 100644 --- a/wallet/subnet/primary/common/test_utxos.go +++ b/wallet/subnet/primary/common/test_utxos.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//go:build test + package common import ( From 4e0a7c30f40e860d73f8c9b2f26f5d624fd309d8 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 11 Jul 2024 06:31:54 -0400 Subject: [PATCH 094/102] Include VM path in plugin version error (#3178) Co-authored-by: Tsachi Herman <24438559+tsachiherman@users.noreply.github.com> --- vms/rpcchainvm/runtime/subprocess/initializer.go | 8 ++++++-- vms/rpcchainvm/runtime/subprocess/runtime.go | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/vms/rpcchainvm/runtime/subprocess/initializer.go b/vms/rpcchainvm/runtime/subprocess/initializer.go index bc8d4e41c63a..a503a507dd19 100644 --- a/vms/rpcchainvm/runtime/subprocess/initializer.go +++ b/vms/rpcchainvm/runtime/subprocess/initializer.go @@ -16,6 +16,8 @@ var _ runtime.Initializer = (*initializer)(nil) // Subprocess VM Runtime intializer. type initializer struct { + path string + once sync.Once // Address of the RPC Chain VM server vmAddr string @@ -25,8 +27,9 @@ type initializer struct { initialized chan struct{} } -func newInitializer() *initializer { +func newInitializer(path string) *initializer { return &initializer{ + path: path, initialized: make(chan struct{}), } } @@ -34,10 +37,11 @@ func newInitializer() *initializer { func (i *initializer) Initialize(_ context.Context, protocolVersion uint, vmAddr string) error { i.once.Do(func() { if version.RPCChainVMProtocol != protocolVersion { - i.err = fmt.Errorf("%w. AvalancheGo version %s implements RPCChainVM protocol version %d. The VM implements RPCChainVM protocol version %d. Please make sure that there is an exact match of the protocol versions. This can be achieved by updating your VM or running an older/newer version of AvalancheGo. Please be advised that some virtual machines may not yet support the latest RPCChainVM protocol version", + i.err = fmt.Errorf("%w. AvalancheGo version %s implements RPCChainVM protocol version %d. The VM located at %s implements RPCChainVM protocol version %d. Please make sure that there is an exact match of the protocol versions. This can be achieved by updating your VM or running an older/newer version of AvalancheGo. Please be advised that some virtual machines may not yet support the latest RPCChainVM protocol version", runtime.ErrProtocolVersionMismatch, version.Current, version.RPCChainVMProtocol, + i.path, protocolVersion, ) } diff --git a/vms/rpcchainvm/runtime/subprocess/runtime.go b/vms/rpcchainvm/runtime/subprocess/runtime.go index 2cd92a00b04e..5623d9be3bd1 100644 --- a/vms/rpcchainvm/runtime/subprocess/runtime.go +++ b/vms/rpcchainvm/runtime/subprocess/runtime.go @@ -64,7 +64,7 @@ func Bootstrap( return nil, nil, fmt.Errorf("%w: stderr and stdout required", runtime.ErrInvalidConfig) } - intitializer := newInitializer() + intitializer := newInitializer(cmd.Path) server := grpcutils.NewServer() defer server.GracefulStop() From d71e91bb42b2e41cf7743777cf0244fd02e83a68 Mon Sep 17 00:00:00 2001 From: marun Date: Thu, 11 Jul 2024 04:49:55 -0700 Subject: [PATCH 095/102] [ci] Simplify ci monitoring with custom actions (#3161) --- .../run-monitored-tmpnet-cmd/action.yml | 72 +++++++++ .../notify-metrics-availability.sh | 0 .../actions/upload-tmpnet-artifact/action.yml | 20 +++ .github/workflows/ci.yml | 140 ++++-------------- 4 files changed, 117 insertions(+), 115 deletions(-) create mode 100644 .github/actions/run-monitored-tmpnet-cmd/action.yml rename .github/{workflows => actions/run-monitored-tmpnet-cmd}/notify-metrics-availability.sh (100%) create mode 100644 .github/actions/upload-tmpnet-artifact/action.yml diff --git a/.github/actions/run-monitored-tmpnet-cmd/action.yml b/.github/actions/run-monitored-tmpnet-cmd/action.yml new file mode 100644 index 000000000000..7b230e31f5f3 --- /dev/null +++ b/.github/actions/run-monitored-tmpnet-cmd/action.yml @@ -0,0 +1,72 @@ +name: 'Run the provided command in an environment configured to monitor tmpnet networks' +description: 'Run the provided command in an environment configured to monitor tmpnet networks' + +inputs: + run: + description: "the bash command to run" + required: true + url_encoded_repo: + # TODO(marun) Process github.repository into this url-encoded form + default: 'ava-labs%2Favalanchego' + filter_by_owner: + default: '' + prometheus_id: + required: true + prometheus_password: + required: true + loki_id: + required: true + loki_password: + required: true + # The following inputs need never be provided by the caller. They + # default to context values that the action's steps are unable to + # acccess directly. + repository: + default: ${{ github.repository }} + workflow: + default: ${{ github.workflow }} + run_id: + default: ${{ github.run_id }} + run_number: + default: ${{ github.run_number }} + run_attempt: + default: ${{ github.run_attempt }} + job: + default: ${{ github.job }} + +runs: + using: composite + steps: + - name: Start prometheus + # Only run for the original repo; a forked repo won't have access to the monitoring credentials + if: (inputs.prometheus_id != '') + shell: bash + run: bash -x ./scripts/run_prometheus.sh + env: + PROMETHEUS_ID: ${{ inputs.prometheus_id }} + PROMETHEUS_PASSWORD: ${{ inputs.prometheus_password }} + - name: Start promtail + if: (inputs.prometheus_id != '') + shell: bash + run: bash -x ./scripts/run_promtail.sh + env: + LOKI_ID: ${{ inputs.loki_id }} + LOKI_PASSWORD: ${{ inputs.loki_password }} + - name: Notify of metrics availability + if: (inputs.prometheus_id != '') + shell: bash + run: .github/actions/run-monitored-tmpnet-cmd/notify-metrics-availability.sh + env: + GRAFANA_URL: https://grafana-experimental.avax-dev.network/d/kBQpRdWnk/avalanche-main-dashboard?orgId=1&refresh=10s&var-filter=is_ephemeral_node%7C%3D%7Cfalse&var-filter=gh_repo%7C%3D%7C${{ inputs.url_encoded_repo }}&var-filter=gh_run_id%7C%3D%7C${{ inputs.run_id }}&var-filter=gh_run_attempt%7C%3D%7C${{ inputs.run_attempt }} + GH_JOB_ID: ${{ inputs.job }} + FILTER_BY_OWNER: ${{ inputs.filter_by_owner }} + - name: Run command + shell: bash + run: ${{ inputs.run }} + env: + GH_REPO: ${{ inputs.repository }} + GH_WORKFLOW: ${{ inputs.workflow }} + GH_RUN_ID: ${{ inputs.run_id }} + GH_RUN_NUMBER: ${{ inputs.run_number }} + GH_RUN_ATTEMPT: ${{ inputs.run_attempt }} + GH_JOB_ID: ${{ inputs.job }} diff --git a/.github/workflows/notify-metrics-availability.sh b/.github/actions/run-monitored-tmpnet-cmd/notify-metrics-availability.sh similarity index 100% rename from .github/workflows/notify-metrics-availability.sh rename to .github/actions/run-monitored-tmpnet-cmd/notify-metrics-availability.sh diff --git a/.github/actions/upload-tmpnet-artifact/action.yml b/.github/actions/upload-tmpnet-artifact/action.yml new file mode 100644 index 000000000000..8912cda11ed2 --- /dev/null +++ b/.github/actions/upload-tmpnet-artifact/action.yml @@ -0,0 +1,20 @@ +name: 'Upload an artifact of tmpnet data' +description: 'Upload an artifact of data in the ~/.tmpnet path' + +inputs: + name: + description: "the name of the artifact to upload" + required: true + +runs: + using: composite + steps: + - name: Upload tmpnet data + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.name }} + path: | + ~/.tmpnet/networks + ~/.tmpnet/prometheus/prometheus.log + ~/.tmpnet/promtail/promtail.log + if-no-files-found: error diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 68ee2585a3b7..e80f83272efa 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,9 +19,6 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true -env: - grafana_url: https://grafana-experimental.avax-dev.network/d/kBQpRdWnk/avalanche-main-dashboard?orgId=1&refresh=10s&var-filter=is_ephemeral_node%7C%3D%7Cfalse&var-filter=gh_repo%7C%3D%7Cava-labs%2Favalanchego&var-filter=gh_run_id%7C%3D%7C${{ github.run_id }}&var-filter=gh_run_attempt%7C%3D%7C${{ github.run_attempt }} - jobs: Unit: runs-on: ${{ matrix.os }} @@ -59,49 +56,19 @@ jobs: - name: Build AvalancheGo Binary shell: bash run: ./scripts/build.sh -r - - name: Start prometheus - # Only run for the original repo; a forked repo won't have access to the monitoring credentials - if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) - shell: bash - run: bash -x ./scripts/run_prometheus.sh - env: - PROMETHEUS_ID: ${{ secrets.PROMETHEUS_ID }} - PROMETHEUS_PASSWORD: ${{ secrets.PROMETHEUS_PASSWORD }} - - name: Start promtail - if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) - shell: bash - run: bash -x ./scripts/run_promtail.sh - env: - LOKI_ID: ${{ secrets.LOKI_ID }} - LOKI_PASSWORD: ${{ secrets.LOKI_PASSWORD }} - - name: Notify of metrics availability - if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) - shell: bash - run: .github/workflows/notify-metrics-availability.sh - env: - GRAFANA_URL: ${{ env.grafana_url }} - GH_JOB_ID: ${{ github.job }} - FILTER_BY_OWNER: avalanchego-e2e - name: Run e2e tests - shell: bash - run: E2E_SERIAL=1 ./scripts/tests.e2e.sh --delay-network-shutdown - env: - GH_REPO: ${{ github.repository }} - GH_WORKFLOW: ${{ github.workflow }} - GH_RUN_ID: ${{ github.run_id }} - GH_RUN_NUMBER: ${{ github.run_number }} - GH_RUN_ATTEMPT: ${{ github.run_attempt }} - GH_JOB_ID: ${{ github.job }} + uses: ./.github/actions/run-monitored-tmpnet-cmd + with: + run: E2E_SERIAL=1 ./scripts/tests.e2e.sh --delay-network-shutdown + prometheus_id: ${{ secrets.PROMETHEUS_ID || '' }} + prometheus_password: ${{ secrets.PROMETHEUS_PASSWORD || '' }} + loki_id: ${{ secrets.LOKI_ID || '' }} + loki_password: ${{ secrets.LOKI_PASSWORD || '' }} - name: Upload tmpnet network dir - uses: actions/upload-artifact@v4 + uses: ./.github/actions/upload-tmpnet-artifact if: always() with: name: e2e-tmpnet-data - path: | - ~/.tmpnet/networks - ~/.tmpnet/prometheus/prometheus.log - ~/.tmpnet/promtail/promtail.log - if-no-files-found: error e2e_existing_network: runs-on: ubuntu-latest steps: @@ -110,48 +77,19 @@ jobs: - name: Build AvalancheGo Binary shell: bash run: ./scripts/build.sh -r - - name: Start prometheus - # Only run for the original repo; a forked repo won't have access to the monitoring credentials - if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) - shell: bash - run: bash -x ./scripts/run_prometheus.sh - env: - PROMETHEUS_ID: ${{ secrets.PROMETHEUS_ID }} - PROMETHEUS_PASSWORD: ${{ secrets.PROMETHEUS_PASSWORD }} - - name: Start promtail - if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) - shell: bash - run: bash -x ./scripts/run_promtail.sh - env: - LOKI_ID: ${{ secrets.LOKI_ID }} - LOKI_PASSWORD: ${{ secrets.LOKI_PASSWORD }} - - name: Notify of metrics availability - if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) - shell: bash - run: .github/workflows/notify-metrics-availability.sh - env: - GRAFANA_URL: ${{ env.grafana_url }} - GH_JOB_ID: ${{ github.job }} - name: Run e2e tests with existing network - shell: bash - run: E2E_SERIAL=1 ./scripts/tests.e2e.existing.sh --delay-network-shutdown - env: - GH_REPO: ${{ github.repository }} - GH_WORKFLOW: ${{ github.workflow }} - GH_RUN_ID: ${{ github.run_id }} - GH_RUN_NUMBER: ${{ github.run_number }} - GH_RUN_ATTEMPT: ${{ github.run_attempt }} - GH_JOB_ID: ${{ github.job }} + uses: ./.github/actions/run-monitored-tmpnet-cmd + with: + run: E2E_SERIAL=1 ./scripts/tests.e2e.existing.sh --delay-network-shutdown + prometheus_id: ${{ secrets.PROMETHEUS_ID || '' }} + prometheus_password: ${{ secrets.PROMETHEUS_PASSWORD || '' }} + loki_id: ${{ secrets.LOKI_ID || '' }} + loki_password: ${{ secrets.LOKI_PASSWORD || '' }} - name: Upload tmpnet network dir - uses: actions/upload-artifact@v4 + uses: ./.github/actions/upload-tmpnet-artifact if: always() with: name: e2e-existing-network-tmpnet-data - path: | - ~/.tmpnet/networks - ~/.tmpnet/prometheus/prometheus.log - ~/.tmpnet/promtail/promtail.log - if-no-files-found: error Upgrade: runs-on: ubuntu-latest steps: @@ -160,48 +98,20 @@ jobs: - name: Build AvalancheGo Binary shell: bash run: ./scripts/build.sh - - name: Start prometheus - # Only run for the original repo; a forked repo won't have access to the monitoring credentials - if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) - shell: bash - run: bash -x ./scripts/run_prometheus.sh - env: - PROMETHEUS_ID: ${{ secrets.PROMETHEUS_ID }} - PROMETHEUS_PASSWORD: ${{ secrets.PROMETHEUS_PASSWORD }} - - name: Start promtail - if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) - shell: bash - run: bash -x ./scripts/run_promtail.sh - env: - LOKI_ID: ${{ secrets.LOKI_ID }} - LOKI_PASSWORD: ${{ secrets.LOKI_PASSWORD }} - - name: Notify of metrics availability - if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) - shell: bash - run: .github/workflows/notify-metrics-availability.sh - env: - GRAFANA_URL: ${{ env.grafana_url }} - GH_JOB_ID: ${{ github.job }} - name: Run e2e tests - shell: bash - run: ./scripts/tests.upgrade.sh - env: - GH_REPO: ${{ github.repository }} - GH_WORKFLOW: ${{ github.workflow }} - GH_RUN_ID: ${{ github.run_id }} - GH_RUN_NUMBER: ${{ github.run_number }} - GH_RUN_ATTEMPT: ${{ github.run_attempt }} - GH_JOB_ID: ${{ github.job }} + uses: ./.github/actions/run-monitored-tmpnet-cmd + with: + run: ./scripts/tests.upgrade.sh + filter_by_owner: avalanchego-e2e + prometheus_id: ${{ secrets.PROMETHEUS_ID || '' }} + prometheus_password: ${{ secrets.PROMETHEUS_PASSWORD || '' }} + loki_id: ${{ secrets.LOKI_ID || '' }} + loki_password: ${{ secrets.LOKI_PASSWORD || '' }} - name: Upload tmpnet network dir - uses: actions/upload-artifact@v4 + uses: ./.github/actions/upload-tmpnet-artifact if: always() with: name: upgrade-tmpnet-data - path: | - ~/.tmpnet/networks - ~/.tmpnet/prometheus/prometheus.log - ~/.tmpnet/promtail/promtail.log - if-no-files-found: error Lint: runs-on: ubuntu-latest steps: From 57fe51dd59a223422d68acfb48f4bc3892cad782 Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 Jul 2024 04:23:06 -0400 Subject: [PATCH 096/102] [vms/avm] Replace `strings.Replace` with `fmt.Sprintf` in tests (#3177) --- vms/avm/service_test.go | 257 +++++++++++++++++----------------------- 1 file changed, 106 insertions(+), 151 deletions(-) diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index d9dbb8db6f12..4648506b2a5b 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -5,7 +5,7 @@ package avm import ( "encoding/json" - "strings" + "fmt" "testing" "time" @@ -538,10 +538,13 @@ func TestServiceGetTxJSON_BaseTx(t *testing.T) { replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - expectedReplyTxString := `{ + sigStr, err := formatting.Encode(formatting.HexNC, newTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) + require.NoError(err) + + expectedReplyTxString := fmt.Sprintf(`{ "unsignedTx": { "networkID": 10, - "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "blockchainID": %q, "outputs": [ { "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", @@ -589,21 +592,13 @@ func TestServiceGetTxJSON_BaseTx(t *testing.T) { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %q ] } } ], - "id": "PLACEHOLDER_TX_ID" -}` - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", newTx.ID().String(), 1) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", newTx.Unsigned.(*txs.BaseTx).BlockchainID.String(), 1) - - sigStr, err := formatting.Encode(formatting.HexNC, newTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) - require.NoError(err) - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + "id": %q +}`, newTx.Unsigned.(*txs.BaseTx).BlockchainID, sigStr, newTx.ID()) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -630,10 +625,13 @@ func TestServiceGetTxJSON_ExportTx(t *testing.T) { replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - expectedReplyTxString := `{ + sigStr, err := formatting.Encode(formatting.HexNC, newTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) + require.NoError(err) + + expectedReplyTxString := fmt.Sprintf(`{ "unsignedTx": { "networkID": 10, - "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "blockchainID": %q, "outputs": [ { "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", @@ -684,21 +682,13 @@ func TestServiceGetTxJSON_ExportTx(t *testing.T) { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %q ] } } ], - "id": "PLACEHOLDER_TX_ID" -}` - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", newTx.ID().String(), 1) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", newTx.Unsigned.(*txs.ExportTx).BlockchainID.String(), 1) - - sigStr, err := formatting.Encode(formatting.HexNC, newTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) - require.NoError(err) - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + "id": %q +}`, newTx.Unsigned.(*txs.ExportTx).BlockchainID, sigStr, newTx.ID()) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -775,10 +765,13 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - expectedReplyTxString := `{ + sigStr, err := formatting.Encode(formatting.HexNC, createAssetTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) + require.NoError(err) + + expectedReplyTxString := fmt.Sprintf(`{ "unsignedTx": { "networkID": 10, - "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "blockchainID": %q, "outputs": [ { "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", @@ -882,21 +875,13 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %q ] } } ], - "id": "PLACEHOLDER_TX_ID" -}` - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", createAssetTx.ID().String(), 1) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", createAssetTx.Unsigned.(*txs.CreateAssetTx).BlockchainID.String(), 1) - - sigStr, err := formatting.Encode(formatting.HexNC, createAssetTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) - require.NoError(err) - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + "id": %q +}`, createAssetTx.Unsigned.(*txs.CreateAssetTx).BlockchainID, sigStr, createAssetTx.ID().String()) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -951,10 +936,14 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - expectedReplyTxString := `{ + sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[1].Credential.(*nftfx.Credential).Sigs[0][:]) + require.NoError(err) + + args := []any{mintNFTTx.Unsigned.(*txs.OperationTx).BlockchainID, sigStr, mintNFTTx.ID(), createAssetTx.ID()} + expectedReplyTxString := fmt.Sprintf(`{ "unsignedTx": { "networkID": 10, - "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "blockchainID": %[1]q, "outputs": [ { "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", @@ -986,10 +975,10 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { "memo": "0x", "operations": [ { - "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "assetID": %[4]q, "inputIDs": [ { - "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "txID": %[4]q, "outputIndex": 1 } ], @@ -1020,7 +1009,7 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } }, @@ -1028,22 +1017,13 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } } ], - "id": "PLACEHOLDER_TX_ID" -}` - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 2) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintNFTTx.ID().String(), 1) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintNFTTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - - sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[1].Credential.(*nftfx.Credential).Sigs[0][:]) - require.NoError(err) - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) + "id": %[3]q +}`, args...) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -1101,10 +1081,14 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - expectedReplyTxString := `{ + sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[1].Credential.(*nftfx.Credential).Sigs[0][:]) + require.NoError(err) + + args := []any{mintNFTTx.Unsigned.(*txs.OperationTx).BlockchainID, sigStr, mintNFTTx.ID(), createAssetTx.ID()} + expectedReplyTxString := fmt.Sprintf(`{ "unsignedTx": { "networkID": 10, - "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "blockchainID": %[1]q, "outputs": [ { "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", @@ -1136,10 +1120,10 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { "memo": "0x", "operations": [ { - "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "assetID": %[4]q, "inputIDs": [ { - "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "txID": %[4]q, "outputIndex": 1 } ], @@ -1164,10 +1148,10 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { } }, { - "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "assetID": %[4]q, "inputIDs": [ { - "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "txID": %[4]q, "outputIndex": 2 } ], @@ -1198,7 +1182,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } }, @@ -1206,7 +1190,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } }, @@ -1214,22 +1198,13 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } } ], - "id": "PLACEHOLDER_TX_ID" -}` - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 4) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintNFTTx.ID().String(), 1) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintNFTTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - - sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[1].Credential.(*nftfx.Credential).Sigs[0][:]) - require.NoError(err) - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 3) + "id": %[3]q +}`, args...) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -1281,10 +1256,14 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - expectedReplyTxString := `{ + sigStr, err := formatting.Encode(formatting.HexNC, mintSecpOpTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) + require.NoError(err) + + args := []any{mintSecpOpTx.Unsigned.(*txs.OperationTx).BlockchainID, sigStr, mintSecpOpTx.ID(), createAssetTx.ID()} + expectedReplyTxString := fmt.Sprintf(`{ "unsignedTx": { "networkID": 10, - "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "blockchainID": %[1]q, "outputs": [ { "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", @@ -1316,10 +1295,10 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { "memo": "0x", "operations": [ { - "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "assetID": %[4]q, "inputIDs": [ { - "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "txID": %[4]q, "outputIndex": 1 } ], @@ -1354,7 +1333,7 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } }, @@ -1362,22 +1341,13 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } } ], - "id": "PLACEHOLDER_TX_ID" -}` - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 2) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintSecpOpTx.ID().String(), 1) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintSecpOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - - sigStr, err := formatting.Encode(formatting.HexNC, mintSecpOpTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) - require.NoError(err) - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) + "id": %[3]q +}`, args...) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -1433,10 +1403,14 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - expectedReplyTxString := `{ + sigStr, err := formatting.Encode(formatting.HexNC, mintSecpOpTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) + require.NoError(err) + + args := []any{mintSecpOpTx.Unsigned.(*txs.OperationTx).BlockchainID, sigStr, mintSecpOpTx.ID(), createAssetTx.ID()} + expectedReplyTxString := fmt.Sprintf(`{ "unsignedTx": { "networkID": 10, - "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "blockchainID": %[1]q, "outputs": [ { "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", @@ -1468,10 +1442,10 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { "memo": "0x", "operations": [ { - "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "assetID": %[4]q, "inputIDs": [ { - "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "txID": %[4]q, "outputIndex": 1 } ], @@ -1500,10 +1474,10 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { } }, { - "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "assetID": %[4]q, "inputIDs": [ { - "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "txID": %[4]q, "outputIndex": 2 } ], @@ -1538,7 +1512,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } }, @@ -1546,7 +1520,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } }, @@ -1554,22 +1528,13 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } } ], - "id": "PLACEHOLDER_TX_ID" -}` - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 4) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintSecpOpTx.ID().String(), 1) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintSecpOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - - sigStr, err := formatting.Encode(formatting.HexNC, mintSecpOpTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) - require.NoError(err) - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 3) + "id": %[3]q +}`, args...) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -1616,10 +1581,14 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - expectedReplyTxString := `{ + sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[1].Credential.(*propertyfx.Credential).Sigs[0][:]) + require.NoError(err) + + args := []any{mintPropertyFxOpTx.Unsigned.(*txs.OperationTx).BlockchainID, sigStr, mintPropertyFxOpTx.ID(), createAssetTx.ID()} + expectedReplyTxString := fmt.Sprintf(`{ "unsignedTx": { "networkID": 10, - "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "blockchainID": %[1]q, "outputs": [ { "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", @@ -1651,10 +1620,10 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { "memo": "0x", "operations": [ { - "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "assetID": %[4]q, "inputIDs": [ { - "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "txID": %[4]q, "outputIndex": 1 } ], @@ -1686,7 +1655,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } }, @@ -1694,22 +1663,13 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } } ], - "id": "PLACEHOLDER_TX_ID" -}` - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 2) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintPropertyFxOpTx.ID().String(), 1) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintPropertyFxOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - - sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[1].Credential.(*propertyfx.Credential).Sigs[0][:]) - require.NoError(err) - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) + "id": %[3]q +}`, args...) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -1763,10 +1723,14 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - expectedReplyTxString := `{ + sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[1].Credential.(*propertyfx.Credential).Sigs[0][:]) + require.NoError(err) + + args := []any{mintPropertyFxOpTx.Unsigned.(*txs.OperationTx).BlockchainID, sigStr, mintPropertyFxOpTx.ID(), createAssetTx.ID()} + expectedReplyTxString := fmt.Sprintf(`{ "unsignedTx": { "networkID": 10, - "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "blockchainID": %[1]q, "outputs": [ { "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", @@ -1798,10 +1762,10 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) "memo": "0x", "operations": [ { - "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "assetID": %[4]q, "inputIDs": [ { - "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "txID": %[4]q, "outputIndex": 1 } ], @@ -1827,10 +1791,10 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) } }, { - "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "assetID": %[4]q, "inputIDs": [ { - "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "txID": %[4]q, "outputIndex": 2 } ], @@ -1862,7 +1826,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } }, @@ -1870,7 +1834,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } }, @@ -1878,22 +1842,13 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", "credential": { "signatures": [ - "PLACEHOLDER_SIGNATURE" + %[2]q ] } } ], - "id": "PLACEHOLDER_TX_ID" -}` - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 4) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintPropertyFxOpTx.ID().String(), 1) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintPropertyFxOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - - sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[1].Credential.(*propertyfx.Credential).Sigs[0][:]) - require.NoError(err) - - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 3) + "id": %[3]q +}`, args...) require.Equal(expectedReplyTxString, string(replyTxBytes)) } From 8e0d84f32f1051da41009945f33441e590ca8362 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 12 Jul 2024 11:21:08 +0200 Subject: [PATCH 097/102] restructured calculator API --- vms/avm/txs/executor/semantic_verifier.go | 9 ++-- vms/avm/txs/fees/calculator.go | 47 ++----------------- vms/avm/txs/fees/static_calculator.go | 57 +++++++++++++++++++++++ 3 files changed, 66 insertions(+), 47 deletions(-) create mode 100644 vms/avm/txs/fees/static_calculator.go diff --git a/vms/avm/txs/executor/semantic_verifier.go b/vms/avm/txs/executor/semantic_verifier.go index 97692ee8c1b4..b5451e35b191 100644 --- a/vms/avm/txs/executor/semantic_verifier.go +++ b/vms/avm/txs/executor/semantic_verifier.go @@ -131,13 +131,14 @@ func (v *SemanticVerifier) verifyBaseTx( importedIns []*avax.TransferableInput, exportedOuts []*avax.TransferableOutput, ) error { - feeCalculator := fees.NewStaticCalculator(v.Config) - if err := tx.Visit(feeCalculator); err != nil { + feeCalculator := fees.NewStaticCalculator(v.Backend.Config) + fee, err := feeCalculator.CalculateFee(&txs.Tx{Unsigned: tx}) + if err != nil { return err } - err := avax.VerifyTx( - feeCalculator.Fee, + err = avax.VerifyTx( + fee, v.FeeAssetID, [][]*avax.TransferableInput{tx.Ins, importedIns}, [][]*avax.TransferableOutput{tx.Outs, exportedOuts}, diff --git a/vms/avm/txs/fees/calculator.go b/vms/avm/txs/fees/calculator.go index d5339d72998a..334f0d492d23 100644 --- a/vms/avm/txs/fees/calculator.go +++ b/vms/avm/txs/fees/calculator.go @@ -3,48 +3,9 @@ package fees -import ( - "github.com/ava-labs/avalanchego/vms/avm/config" - "github.com/ava-labs/avalanchego/vms/avm/txs" -) +import "github.com/ava-labs/avalanchego/vms/avm/txs" -var _ txs.Visitor = (*Calculator)(nil) - -type Calculator struct { - // Pre E update inputs - config *config.Config - - // outputs of visitor execution - Fee uint64 -} - -func NewStaticCalculator(cfg *config.Config) *Calculator { - return &Calculator{ - config: cfg, - } -} - -func (fc *Calculator) BaseTx(*txs.BaseTx) error { - fc.Fee = fc.config.TxFee - return nil -} - -func (fc *Calculator) CreateAssetTx(*txs.CreateAssetTx) error { - fc.Fee = fc.config.CreateAssetTxFee - return nil -} - -func (fc *Calculator) OperationTx(*txs.OperationTx) error { - fc.Fee = fc.config.TxFee - return nil -} - -func (fc *Calculator) ImportTx(*txs.ImportTx) error { - fc.Fee = fc.config.TxFee - return nil -} - -func (fc *Calculator) ExportTx(*txs.ExportTx) error { - fc.Fee = fc.config.TxFee - return nil +// Calculator is the interfaces that any fee Calculator must implement +type Calculator interface { + CalculateFee(tx *txs.Tx) (uint64, error) } diff --git a/vms/avm/txs/fees/static_calculator.go b/vms/avm/txs/fees/static_calculator.go new file mode 100644 index 000000000000..fe7071318385 --- /dev/null +++ b/vms/avm/txs/fees/static_calculator.go @@ -0,0 +1,57 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fees + +import ( + "github.com/ava-labs/avalanchego/vms/avm/config" + "github.com/ava-labs/avalanchego/vms/avm/txs" +) + +var ( + _ Calculator = (*staticCalculator)(nil) + _ txs.Visitor = (*staticCalculator)(nil) +) + +func NewStaticCalculator(c *config.Config) Calculator { + return &staticCalculator{staticCfg: c} +} + +func (c *staticCalculator) CalculateFee(tx *txs.Tx) (uint64, error) { + c.fee = 0 // zero fee among different calculateFee invocations (unlike gas which gets cumulated) + err := tx.Unsigned.Visit(c) + return c.fee, err +} + +type staticCalculator struct { + // inputs + staticCfg *config.Config + + // outputs of visitor execution + fee uint64 +} + +func (c *staticCalculator) BaseTx(*txs.BaseTx) error { + c.fee = c.staticCfg.TxFee + return nil +} + +func (c *staticCalculator) CreateAssetTx(*txs.CreateAssetTx) error { + c.fee = c.staticCfg.CreateAssetTxFee + return nil +} + +func (c *staticCalculator) OperationTx(*txs.OperationTx) error { + c.fee = c.staticCfg.TxFee + return nil +} + +func (c *staticCalculator) ImportTx(*txs.ImportTx) error { + c.fee = c.staticCfg.TxFee + return nil +} + +func (c *staticCalculator) ExportTx(*txs.ExportTx) error { + c.fee = c.staticCfg.TxFee + return nil +} From c2d6e869d8d806c7a44a9a82506eb5187f79f79b Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 12 Jul 2024 11:36:54 +0200 Subject: [PATCH 098/102] repackaged avm static fee config --- node/node.go | 9 ++++++--- vms/avm/block/executor/block_test.go | 9 ++++++--- vms/avm/config/config.go | 12 ++++++------ vms/avm/environment_test.go | 9 ++++++--- vms/avm/txs/executor/semantic_verifier.go | 2 +- vms/avm/txs/executor/syntactic_verifier_test.go | 9 ++++++--- vms/avm/txs/fees/static_calculator.go | 9 +++------ vms/avm/txs/fees/static_config.go | 12 ++++++++++++ 8 files changed, 46 insertions(+), 25 deletions(-) create mode 100644 vms/avm/txs/fees/static_config.go diff --git a/node/node.go b/node/node.go index fe574b17ea52..9562bd8f98bf 100644 --- a/node/node.go +++ b/node/node.go @@ -75,6 +75,7 @@ import ( "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/avm" + "github.com/ava-labs/avalanchego/vms/avm/txs/fees" "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" @@ -1249,9 +1250,11 @@ func (n *Node) initVMs() error { }), n.VMManager.RegisterFactory(context.TODO(), constants.AVMID, &avm.Factory{ Config: avmconfig.Config{ - TxFee: n.Config.TxFee, - CreateAssetTxFee: n.Config.CreateAssetTxFee, - EUpgradeTime: eUpgradeTime, + StaticConfig: fees.StaticConfig{ + TxFee: n.Config.TxFee, + CreateAssetTxFee: n.Config.CreateAssetTxFee, + }, + EUpgradeTime: eUpgradeTime, }, }), n.VMManager.RegisterFactory(context.TODO(), constants.EVMID, &coreth.Factory{}), diff --git a/vms/avm/block/executor/block_test.go b/vms/avm/block/executor/block_test.go index 8e72f4c21ebe..23b5c761204b 100644 --- a/vms/avm/block/executor/block_test.go +++ b/vms/avm/block/executor/block_test.go @@ -27,6 +27,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/executor" + "github.com/ava-labs/avalanchego/vms/avm/txs/fees" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" ) @@ -1046,9 +1047,11 @@ func defaultTestBackend(bootstrapped bool, sharedMemory atomic.SharedMemory) *ex Log: logging.NoLog{}, }, Config: &config.Config{ - EUpgradeTime: mockable.MaxTime, - TxFee: 0, - CreateAssetTxFee: 0, + StaticConfig: fees.StaticConfig{ + TxFee: 0, + CreateAssetTxFee: 0, + }, + EUpgradeTime: mockable.MaxTime, }, } } diff --git a/vms/avm/config/config.go b/vms/avm/config/config.go index 0d026eb05dbb..357cbfaf560f 100644 --- a/vms/avm/config/config.go +++ b/vms/avm/config/config.go @@ -3,15 +3,15 @@ package config -import "time" +import ( + "time" + + "github.com/ava-labs/avalanchego/vms/avm/txs/fees" +) // Struct collecting all the foundational parameters of the AVM type Config struct { - // Fee that is burned by every non-asset creating transaction - TxFee uint64 - - // Fee that must be burned by every asset creating transaction - CreateAssetTxFee uint64 + fees.StaticConfig // Time of the E network upgrade EUpgradeTime time.Time diff --git a/vms/avm/environment_test.go b/vms/avm/environment_test.go index d4375aa092d1..18457b4216e0 100644 --- a/vms/avm/environment_test.go +++ b/vms/avm/environment_test.go @@ -31,6 +31,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/fees" "github.com/ava-labs/avalanchego/vms/avm/txs/txstest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/nftfx" @@ -230,9 +231,11 @@ func setup(tb testing.TB, c *envConfig) *environment { func staticConfig(tb testing.TB, f fork) config.Config { c := config.Config{ - TxFee: testTxFee, - CreateAssetTxFee: testTxFee, - EUpgradeTime: mockable.MaxTime, + StaticConfig: fees.StaticConfig{ + TxFee: testTxFee, + CreateAssetTxFee: testTxFee, + }, + EUpgradeTime: mockable.MaxTime, } switch f { diff --git a/vms/avm/txs/executor/semantic_verifier.go b/vms/avm/txs/executor/semantic_verifier.go index b5451e35b191..eebcd0012373 100644 --- a/vms/avm/txs/executor/semantic_verifier.go +++ b/vms/avm/txs/executor/semantic_verifier.go @@ -131,7 +131,7 @@ func (v *SemanticVerifier) verifyBaseTx( importedIns []*avax.TransferableInput, exportedOuts []*avax.TransferableOutput, ) error { - feeCalculator := fees.NewStaticCalculator(v.Backend.Config) + feeCalculator := fees.NewStaticCalculator(v.Backend.Config.StaticConfig) fee, err := feeCalculator.CalculateFee(&txs.Tx{Unsigned: tx}) if err != nil { return err diff --git a/vms/avm/txs/executor/syntactic_verifier_test.go b/vms/avm/txs/executor/syntactic_verifier_test.go index ff566c69c035..74f182ad1f5e 100644 --- a/vms/avm/txs/executor/syntactic_verifier_test.go +++ b/vms/avm/txs/executor/syntactic_verifier_test.go @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/fees" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -25,9 +26,11 @@ import ( var ( keys = secp256k1.TestKeys() feeConfig = config.Config{ - TxFee: 2, - CreateAssetTxFee: 3, - EUpgradeTime: mockable.MaxTime, + StaticConfig: fees.StaticConfig{ + TxFee: 2, + CreateAssetTxFee: 3, + }, + EUpgradeTime: mockable.MaxTime, } ) diff --git a/vms/avm/txs/fees/static_calculator.go b/vms/avm/txs/fees/static_calculator.go index fe7071318385..2f1b8d4828b7 100644 --- a/vms/avm/txs/fees/static_calculator.go +++ b/vms/avm/txs/fees/static_calculator.go @@ -3,17 +3,14 @@ package fees -import ( - "github.com/ava-labs/avalanchego/vms/avm/config" - "github.com/ava-labs/avalanchego/vms/avm/txs" -) +import "github.com/ava-labs/avalanchego/vms/avm/txs" var ( _ Calculator = (*staticCalculator)(nil) _ txs.Visitor = (*staticCalculator)(nil) ) -func NewStaticCalculator(c *config.Config) Calculator { +func NewStaticCalculator(c StaticConfig) Calculator { return &staticCalculator{staticCfg: c} } @@ -25,7 +22,7 @@ func (c *staticCalculator) CalculateFee(tx *txs.Tx) (uint64, error) { type staticCalculator struct { // inputs - staticCfg *config.Config + staticCfg StaticConfig // outputs of visitor execution fee uint64 diff --git a/vms/avm/txs/fees/static_config.go b/vms/avm/txs/fees/static_config.go new file mode 100644 index 000000000000..f5b8da043358 --- /dev/null +++ b/vms/avm/txs/fees/static_config.go @@ -0,0 +1,12 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fees + +type StaticConfig struct { + // Fee that is burned by every non-asset creating transaction + TxFee uint64 + + // Fee that must be burned by every asset creating transaction + CreateAssetTxFee uint64 +} From 018988ebcd8447764fdd49db0152d63743551f23 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 12 Jul 2024 11:46:03 +0200 Subject: [PATCH 099/102] renamed fees package to fee --- node/node.go | 4 ++-- vms/avm/block/executor/block_test.go | 4 ++-- vms/avm/config/config.go | 4 ++-- vms/avm/environment_test.go | 4 ++-- vms/avm/txs/executor/semantic_verifier.go | 4 ++-- vms/avm/txs/executor/syntactic_verifier_test.go | 4 ++-- vms/avm/txs/{fees => fee}/calculator.go | 2 +- vms/avm/txs/{fees => fee}/static_calculator.go | 2 +- vms/avm/txs/{fees => fee}/static_config.go | 2 +- 9 files changed, 15 insertions(+), 15 deletions(-) rename vms/avm/txs/{fees => fee}/calculator.go (95%) rename vms/avm/txs/{fees => fee}/static_calculator.go (98%) rename vms/avm/txs/{fees => fee}/static_config.go (95%) diff --git a/node/node.go b/node/node.go index 9562bd8f98bf..b4d513dba190 100644 --- a/node/node.go +++ b/node/node.go @@ -75,7 +75,7 @@ import ( "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/avm" - "github.com/ava-labs/avalanchego/vms/avm/txs/fees" + "github.com/ava-labs/avalanchego/vms/avm/txs/fee" "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" @@ -1250,7 +1250,7 @@ func (n *Node) initVMs() error { }), n.VMManager.RegisterFactory(context.TODO(), constants.AVMID, &avm.Factory{ Config: avmconfig.Config{ - StaticConfig: fees.StaticConfig{ + StaticConfig: fee.StaticConfig{ TxFee: n.Config.TxFee, CreateAssetTxFee: n.Config.CreateAssetTxFee, }, diff --git a/vms/avm/block/executor/block_test.go b/vms/avm/block/executor/block_test.go index 23b5c761204b..f835123e60e9 100644 --- a/vms/avm/block/executor/block_test.go +++ b/vms/avm/block/executor/block_test.go @@ -27,7 +27,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/executor" - "github.com/ava-labs/avalanchego/vms/avm/txs/fees" + "github.com/ava-labs/avalanchego/vms/avm/txs/fee" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" ) @@ -1047,7 +1047,7 @@ func defaultTestBackend(bootstrapped bool, sharedMemory atomic.SharedMemory) *ex Log: logging.NoLog{}, }, Config: &config.Config{ - StaticConfig: fees.StaticConfig{ + StaticConfig: fee.StaticConfig{ TxFee: 0, CreateAssetTxFee: 0, }, diff --git a/vms/avm/config/config.go b/vms/avm/config/config.go index 357cbfaf560f..cb8267204c8f 100644 --- a/vms/avm/config/config.go +++ b/vms/avm/config/config.go @@ -6,12 +6,12 @@ package config import ( "time" - "github.com/ava-labs/avalanchego/vms/avm/txs/fees" + "github.com/ava-labs/avalanchego/vms/avm/txs/fee" ) // Struct collecting all the foundational parameters of the AVM type Config struct { - fees.StaticConfig + fee.StaticConfig // Time of the E network upgrade EUpgradeTime time.Time diff --git a/vms/avm/environment_test.go b/vms/avm/environment_test.go index 18457b4216e0..05f6967423c3 100644 --- a/vms/avm/environment_test.go +++ b/vms/avm/environment_test.go @@ -31,7 +31,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/avm/txs/fees" + "github.com/ava-labs/avalanchego/vms/avm/txs/fee" "github.com/ava-labs/avalanchego/vms/avm/txs/txstest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/nftfx" @@ -231,7 +231,7 @@ func setup(tb testing.TB, c *envConfig) *environment { func staticConfig(tb testing.TB, f fork) config.Config { c := config.Config{ - StaticConfig: fees.StaticConfig{ + StaticConfig: fee.StaticConfig{ TxFee: testTxFee, CreateAssetTxFee: testTxFee, }, diff --git a/vms/avm/txs/executor/semantic_verifier.go b/vms/avm/txs/executor/semantic_verifier.go index eebcd0012373..68a1b9ab4ab5 100644 --- a/vms/avm/txs/executor/semantic_verifier.go +++ b/vms/avm/txs/executor/semantic_verifier.go @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/avm/txs/fees" + "github.com/ava-labs/avalanchego/vms/avm/txs/fee" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" ) @@ -131,7 +131,7 @@ func (v *SemanticVerifier) verifyBaseTx( importedIns []*avax.TransferableInput, exportedOuts []*avax.TransferableOutput, ) error { - feeCalculator := fees.NewStaticCalculator(v.Backend.Config.StaticConfig) + feeCalculator := fee.NewStaticCalculator(v.Backend.Config.StaticConfig) fee, err := feeCalculator.CalculateFee(&txs.Tx{Unsigned: tx}) if err != nil { return err diff --git a/vms/avm/txs/executor/syntactic_verifier_test.go b/vms/avm/txs/executor/syntactic_verifier_test.go index 74f182ad1f5e..b473b4515e97 100644 --- a/vms/avm/txs/executor/syntactic_verifier_test.go +++ b/vms/avm/txs/executor/syntactic_verifier_test.go @@ -17,7 +17,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/avm/txs/fees" + "github.com/ava-labs/avalanchego/vms/avm/txs/fee" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -26,7 +26,7 @@ import ( var ( keys = secp256k1.TestKeys() feeConfig = config.Config{ - StaticConfig: fees.StaticConfig{ + StaticConfig: fee.StaticConfig{ TxFee: 2, CreateAssetTxFee: 3, }, diff --git a/vms/avm/txs/fees/calculator.go b/vms/avm/txs/fee/calculator.go similarity index 95% rename from vms/avm/txs/fees/calculator.go rename to vms/avm/txs/fee/calculator.go index 334f0d492d23..5acc94b05f69 100644 --- a/vms/avm/txs/fees/calculator.go +++ b/vms/avm/txs/fee/calculator.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package fees +package fee import "github.com/ava-labs/avalanchego/vms/avm/txs" diff --git a/vms/avm/txs/fees/static_calculator.go b/vms/avm/txs/fee/static_calculator.go similarity index 98% rename from vms/avm/txs/fees/static_calculator.go rename to vms/avm/txs/fee/static_calculator.go index 2f1b8d4828b7..fe46a0caaf8f 100644 --- a/vms/avm/txs/fees/static_calculator.go +++ b/vms/avm/txs/fee/static_calculator.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package fees +package fee import "github.com/ava-labs/avalanchego/vms/avm/txs" diff --git a/vms/avm/txs/fees/static_config.go b/vms/avm/txs/fee/static_config.go similarity index 95% rename from vms/avm/txs/fees/static_config.go rename to vms/avm/txs/fee/static_config.go index f5b8da043358..a25fea8e95bc 100644 --- a/vms/avm/txs/fees/static_config.go +++ b/vms/avm/txs/fee/static_config.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package fees +package fee type StaticConfig struct { // Fee that is burned by every non-asset creating transaction From d890cbc7b65a85ff2538d552d33021256b216677 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 12 Jul 2024 12:15:40 +0200 Subject: [PATCH 100/102] added tx fees uts --- vms/avm/txs/fee/calculator_test.go | 93 ++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 vms/avm/txs/fee/calculator_test.go diff --git a/vms/avm/txs/fee/calculator_test.go b/vms/avm/txs/fee/calculator_test.go new file mode 100644 index 000000000000..59e4a291c19e --- /dev/null +++ b/vms/avm/txs/fee/calculator_test.go @@ -0,0 +1,93 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fee + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/avm/txs" +) + +func TestTxFees(t *testing.T) { + feeTestsDefaultCfg := StaticConfig{ + TxFee: 1 * units.Avax, + CreateAssetTxFee: 2 * units.Avax, + } + + // chain times needed to have specific upgrades active + eUpgradeTime := time.Unix(1713945427, 0) + preEUpgradeTime := eUpgradeTime.Add(-1 * time.Second) + + tests := []struct { + name string + chainTime time.Time + unsignedTx func() txs.UnsignedTx + expected uint64 + }{ + { + name: "BaseTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: baseTx, + expected: feeTestsDefaultCfg.TxFee, + }, + { + name: "CreateAssetTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: createAssetTx, + expected: feeTestsDefaultCfg.CreateAssetTxFee, + }, + { + name: "OperationTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: operationTx, + expected: feeTestsDefaultCfg.TxFee, + }, + { + name: "ImportTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: importTx, + expected: feeTestsDefaultCfg.TxFee, + }, + { + name: "ExportTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: exportTx, + expected: feeTestsDefaultCfg.TxFee, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + uTx := tt.unsignedTx() + fc := NewStaticCalculator(feeTestsDefaultCfg) + haveFee, err := fc.CalculateFee(&txs.Tx{Unsigned: uTx}) + require.NoError(t, err) + require.Equal(t, tt.expected, haveFee) + }) + } +} + +func baseTx() txs.UnsignedTx { + return &txs.BaseTx{} +} + +func createAssetTx() txs.UnsignedTx { + return &txs.CreateAssetTx{} +} + +func operationTx() txs.UnsignedTx { + return &txs.OperationTx{} +} + +func importTx() txs.UnsignedTx { + return &txs.ImportTx{} +} + +func exportTx() txs.UnsignedTx { + return &txs.ExportTx{} +} From 7f79fd15275945d3bc6039c82398022bca203849 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 12 Jul 2024 16:01:25 +0200 Subject: [PATCH 101/102] reverted fee checks to synctactic verifier --- vms/avm/txs/executor/semantic_verifier.go | 78 +- .../txs/executor/semantic_verifier_test.go | 2022 +++-------------- vms/avm/txs/executor/syntactic_verifier.go | 36 +- .../txs/executor/syntactic_verifier_test.go | 1125 ++++++++- 4 files changed, 1466 insertions(+), 1795 deletions(-) diff --git a/vms/avm/txs/executor/semantic_verifier.go b/vms/avm/txs/executor/semantic_verifier.go index 68a1b9ab4ab5..946346bc0646 100644 --- a/vms/avm/txs/executor/semantic_verifier.go +++ b/vms/avm/txs/executor/semantic_verifier.go @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/avm/txs/fee" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" ) @@ -32,15 +31,36 @@ type SemanticVerifier struct { } func (v *SemanticVerifier) BaseTx(tx *txs.BaseTx) error { - return v.verifyBaseTx(tx, nil, nil) + for i, in := range tx.Ins { + // Note: Verification of the length of [t.tx.Creds] happens during + // syntactic verification, which happens before semantic verification. + cred := v.Tx.Creds[i].Credential + if err := v.verifyTransfer(tx, in, cred); err != nil { + return err + } + } + + for _, out := range tx.Outs { + fxIndex, err := v.getFx(out.Out) + if err != nil { + return err + } + + assetID := out.AssetID() + if err := v.verifyFxUsage(fxIndex, assetID); err != nil { + return err + } + } + + return nil } func (v *SemanticVerifier) CreateAssetTx(tx *txs.CreateAssetTx) error { - return v.verifyBaseTx(&tx.BaseTx, nil, nil) + return v.BaseTx(&tx.BaseTx) } func (v *SemanticVerifier) OperationTx(tx *txs.OperationTx) error { - if err := v.verifyBaseTx(&tx.BaseTx, nil, nil); err != nil { + if err := v.BaseTx(&tx.BaseTx); err != nil { return err } @@ -61,7 +81,7 @@ func (v *SemanticVerifier) OperationTx(tx *txs.OperationTx) error { } func (v *SemanticVerifier) ImportTx(tx *txs.ImportTx) error { - if err := v.verifyBaseTx(&tx.BaseTx, tx.ImportedIns, nil); err != nil { + if err := v.BaseTx(&tx.BaseTx); err != nil { return err } @@ -102,7 +122,7 @@ func (v *SemanticVerifier) ImportTx(tx *txs.ImportTx) error { } func (v *SemanticVerifier) ExportTx(tx *txs.ExportTx) error { - if err := v.verifyBaseTx(&tx.BaseTx, nil, tx.ExportedOuts); err != nil { + if err := v.BaseTx(&tx.BaseTx); err != nil { return err } @@ -126,52 +146,6 @@ func (v *SemanticVerifier) ExportTx(tx *txs.ExportTx) error { return nil } -func (v *SemanticVerifier) verifyBaseTx( - tx *txs.BaseTx, - importedIns []*avax.TransferableInput, - exportedOuts []*avax.TransferableOutput, -) error { - feeCalculator := fee.NewStaticCalculator(v.Backend.Config.StaticConfig) - fee, err := feeCalculator.CalculateFee(&txs.Tx{Unsigned: tx}) - if err != nil { - return err - } - - err = avax.VerifyTx( - fee, - v.FeeAssetID, - [][]*avax.TransferableInput{tx.Ins, importedIns}, - [][]*avax.TransferableOutput{tx.Outs, exportedOuts}, - v.Codec, - ) - if err != nil { - return err - } - - for i, in := range tx.Ins { - // Note: Verification of the length of [t.tx.Creds] happens during - // syntactic verification, which happens before semantic verification. - cred := v.Tx.Creds[i].Credential - if err := v.verifyTransfer(tx, in, cred); err != nil { - return err - } - } - - for _, out := range tx.Outs { - fxIndex, err := v.getFx(out.Out) - if err != nil { - return err - } - - assetID := out.AssetID() - if err := v.verifyFxUsage(fxIndex, assetID); err != nil { - return err - } - } - - return nil -} - func (v *SemanticVerifier) verifyTransfer( tx txs.UnsignedTx, in *avax.TransferableInput, diff --git a/vms/avm/txs/executor/semantic_verifier_test.go b/vms/avm/txs/executor/semantic_verifier_test.go index 7f67c7710725..db89e1e5a5e9 100644 --- a/vms/avm/txs/executor/semantic_verifier_test.go +++ b/vms/avm/txs/executor/semantic_verifier_test.go @@ -4,7 +4,6 @@ package executor import ( - "math" "reflect" "testing" @@ -22,95 +21,60 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/nftfx" - "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - - safemath "github.com/ava-labs/avalanchego/utils/math" ) func TestSemanticVerifierBaseTx(t *testing.T) { ctx := snowtest.Context(t, snowtest.XChainID) - // UTXO to be spent - inputTxID := ids.GenerateTestID() + typeToFxIndex := make(map[reflect.Type]int) + secpFx := &secp256k1fx.Fx{} + parser, err := txs.NewCustomParser( + typeToFxIndex, + new(mockable.Clock), + logging.NoWarn{}, + []fxs.Fx{ + secpFx, + }, + ) + require.NoError(t, err) + + codec := parser.Codec() + txID := ids.GenerateTestID() utxoID := avax.UTXOID{ - TxID: inputTxID, - OutputIndex: 0, + TxID: txID, + OutputIndex: 2, } - - feeAssetID := ids.GenerateTestID() asset := avax.Asset{ - ID: feeAssetID, - } - outputOwners := secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - } - utxoAmount := 100 + feeConfig.TxFee + 50 - utxoOut := secp256k1fx.TransferOutput{ - Amt: utxoAmount, - OutputOwners: outputOwners, - } - utxo := avax.UTXO{ - UTXOID: utxoID, - Asset: asset, - Out: &utxoOut, + ID: ids.GenerateTestID(), } - - // Input spending the UTXO - inputSigners := secp256k1fx.Input{ - SigIndices: []uint32{0}, + inputSigner := secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, } fxInput := secp256k1fx.TransferInput{ - Amt: utxoAmount, - Input: inputSigners, + Amt: 12345, + Input: inputSigner, } input := avax.TransferableInput{ UTXOID: utxoID, Asset: asset, In: &fxInput, } - - // Output produced by BaseTx - fxOutput := secp256k1fx.TransferOutput{ - Amt: 100, - OutputOwners: outputOwners, - } - output := avax.TransferableOutput{ - Asset: asset, - Out: &fxOutput, - } - - // BaseTx - baseTx := avax.BaseTx{ - Outs: []*avax.TransferableOutput{ - &output, - }, - Ins: []*avax.TransferableInput{ - &input, + baseTx := txs.BaseTx{ + BaseTx: avax.BaseTx{ + Ins: []*avax.TransferableInput{ + &input, + }, }, } - // Backend - typeToFxIndex := make(map[reflect.Type]int) - secpFx := &secp256k1fx.Fx{} - parser, err := txs.NewCustomParser( - typeToFxIndex, - new(mockable.Clock), - logging.NoWarn{}, - []fxs.Fx{ - secpFx, - }, - ) - require.NoError(t, err) - codec := parser.Codec() backend := &Backend{ Ctx: ctx, Config: &feeConfig, @@ -122,11 +86,26 @@ func TestSemanticVerifierBaseTx(t *testing.T) { }, TypeToFxIndex: typeToFxIndex, Codec: codec, - FeeAssetID: feeAssetID, + FeeAssetID: ids.GenerateTestID(), Bootstrapped: true, } require.NoError(t, secpFx.Bootstrapped()) + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[0].Address(), + }, + } + output := secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &output, + } unsignedCreateAssetTx := txs.CreateAssetTx{ States: []*txs.InitialState{{ FxIndex: 0, @@ -148,15 +127,13 @@ func TestSemanticVerifierBaseTx(t *testing.T) { state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).Times(2) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state }, txFunc: func(require *require.Assertions) *txs.Tx { tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: baseTx, - }, + Unsigned: &baseTx, } require.NoError(tx.SignSECP256K1Fx( codec, @@ -169,90 +146,21 @@ func TestSemanticVerifierBaseTx(t *testing.T) { err: nil, }, { - name: "invalid output", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - output := output - output.Out = &secp256k1fx.TransferOutput{ - Amt: 0, - OutputOwners: outputOwners, - } - - baseTx := baseTx - baseTx.Outs = []*avax.TransferableOutput{ - &output, - } - - tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: secp256k1fx.ErrNoValueOutput, - }, - { - name: "unsorted outputs", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } - - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: 2, - OutputOwners: outputOwners, - } + name: "assetID mismatch", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) - outputs := []*avax.TransferableOutput{ - &output0, - &output1, - } - avax.SortTransferableOutputs(outputs, codec) - outputs[0], outputs[1] = outputs[1], outputs[0] + utxo := utxo + utxo.Asset.ID = ids.GenerateTestID() - baseTx := baseTx - baseTx.Outs = outputs + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: avax.ErrOutputsNotSorted, - }, - { - name: "invalid input", - stateFunc: func(*gomock.Controller) state.Chain { - return nil + return state }, txFunc: func(require *require.Assertions) *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 0, - Input: inputSigners, - } - - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, + tx := &txs.Tx{ + Unsigned: &baseTx, } - - tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ @@ -261,97 +169,29 @@ func TestSemanticVerifierBaseTx(t *testing.T) { )) return tx }, - err: secp256k1fx.ErrNoValueInput, + err: errAssetIDMismatch, }, { - name: "duplicate inputs", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, - &input, - } + name: "not allowed input feature extension", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) - tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - {keys[0]}, - }, - )) - return tx - }, - err: avax.ErrInputsNotSortedUnique, - }, - { - name: "input overflow", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - input0 := input - input0.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } + unsignedCreateAssetTx := unsignedCreateAssetTx + unsignedCreateAssetTx.States = nil - input1 := input - input1.UTXOID.OutputIndex++ - input1.In = &secp256k1fx.TransferInput{ - Amt: math.MaxUint64, - Input: inputSigners, + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, } - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input0, - &input1, - } - avax.SortTransferableInputsWithSigners(baseTx.Ins, make([][]*secp256k1.PrivateKey, 2)) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) - tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: safemath.ErrOverflow, - }, - { - name: "output overflow", - stateFunc: func(*gomock.Controller) state.Chain { - return nil + return state }, txFunc: func(require *require.Assertions) *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } - - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output0, - &output1, + tx := &txs.Tx{ + Unsigned: &baseTx, } - avax.SortTransferableOutputs(outputs, codec) - - baseTx := baseTx - baseTx.Outs = outputs - - tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ @@ -360,98 +200,45 @@ func TestSemanticVerifierBaseTx(t *testing.T) { )) return tx }, - err: safemath.ErrOverflow, + err: errIncompatibleFx, }, { - name: "barely sufficient funds", + name: "invalid signature", stateFunc: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - utxoAmount := 100 + feeConfig.TxFee - utxoOut := secp256k1fx.TransferOutput{ - Amt: utxoAmount, - OutputOwners: outputOwners, - } - utxo := avax.UTXO{ - UTXOID: utxoID, - Asset: asset, - Out: &utxoOut, - } - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).Times(2) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state }, txFunc: func(require *require.Assertions) *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.TxFee, - Input: inputSigners, - } - - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, + tx := &txs.Tx{ + Unsigned: &baseTx, } - - tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ - {keys[0]}, + {keys[1]}, }, )) return tx }, - err: nil, + err: secp256k1fx.ErrWrongSig, }, { - name: "insufficient funds", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } + name: "missing UTXO", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, - } + state.EXPECT().GetUTXO(utxoID.InputID()).Return(nil, database.ErrNotFound) - tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: avax.ErrInsufficientFunds, - }, - { - name: "barely insufficient funds", - stateFunc: func(*gomock.Controller) state.Chain { - return nil + return state }, txFunc: func(require *require.Assertions) *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.TxFee - 1, - Input: inputSigners, - } - - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, + tx := &txs.Tx{ + Unsigned: &baseTx, } - - tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ @@ -460,25 +247,27 @@ func TestSemanticVerifierBaseTx(t *testing.T) { )) return tx }, - err: avax.ErrInsufficientFunds, + err: database.ErrNotFound, }, { - name: "assetID mismatch", + name: "invalid UTXO amount", stateFunc: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) + output := output + output.Amt-- + utxo := utxo - utxo.Asset.ID = ids.GenerateTestID() + utxo.Out = &output state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state }, txFunc: func(require *require.Assertions) *txs.Tx { tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: baseTx, - }, + Unsigned: &baseTx, } require.NoError(tx.SignSECP256K1Fx( codec, @@ -488,10 +277,10 @@ func TestSemanticVerifierBaseTx(t *testing.T) { )) return tx }, - err: errAssetIDMismatch, + err: secp256k1fx.ErrMismatchedAmounts, }, { - name: "not allowed input feature extension", + name: "not allowed output feature extension", stateFunc: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) @@ -502,67 +291,71 @@ func TestSemanticVerifierBaseTx(t *testing.T) { Unsigned: &unsignedCreateAssetTx, } - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state }, txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: baseTx, + baseTx := baseTx + baseTx.Ins = nil + baseTx.Outs = []*avax.TransferableOutput{ + { + Asset: asset, + Out: &output, }, } + tx := &txs.Tx{ + Unsigned: &baseTx, + } require.NoError(tx.SignSECP256K1Fx( codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, + [][]*secp256k1.PrivateKey{}, )) return tx }, err: errIncompatibleFx, }, { - name: "invalid signature", + name: "unknown asset", stateFunc: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + state.EXPECT().GetTx(asset.ID).Return(nil, database.ErrNotFound) return state }, txFunc: func(require *require.Assertions) *txs.Tx { tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: baseTx, - }, + Unsigned: &baseTx, } require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ - {keys[1]}, + {keys[0]}, }, )) return tx }, - err: secp256k1fx.ErrWrongSig, + err: database.ErrNotFound, }, { - name: "missing UTXO", + name: "not an asset", stateFunc: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - state.EXPECT().GetUTXO(utxoID.InputID()).Return(nil, database.ErrNotFound) + tx := txs.Tx{ + Unsigned: &baseTx, + } + + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&tx, nil) return state }, txFunc: func(require *require.Assertions) *txs.Tx { tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: baseTx, - }, + Unsigned: &baseTx, } require.NoError(tx.SignSECP256K1Fx( codec, @@ -572,130 +365,7 @@ func TestSemanticVerifierBaseTx(t *testing.T) { )) return tx }, - err: database.ErrNotFound, - }, - { - name: "invalid UTXO amount", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - utxoOut := utxoOut - utxoOut.Amt-- - - utxo := utxo - utxo.Out = &utxoOut - - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) - - return state - }, - txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: baseTx, - }, - } - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: secp256k1fx.ErrMismatchedAmounts, - }, - { - name: "not allowed output feature extension", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - unsignedCreateAssetTx := unsignedCreateAssetTx - unsignedCreateAssetTx.States = nil - - createAssetTx := txs.Tx{ - Unsigned: &unsignedCreateAssetTx, - } - - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - - return state - }, - txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: baseTx, - }, - } - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: errIncompatibleFx, - }, - { - name: "unknown asset", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(nil, database.ErrNotFound) - - return state - }, - txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: baseTx, - }, - } - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: database.ErrNotFound, - }, - { - name: "not an asset", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - tx := txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: baseTx, - }, - } - - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&tx, nil) - - return state - }, - txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: baseTx, - }, - } - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: errNotAnAsset, + err: errNotAnAsset, }, } for _, test := range tests { @@ -719,84 +389,53 @@ func TestSemanticVerifierBaseTx(t *testing.T) { func TestSemanticVerifierExportTx(t *testing.T) { ctx := snowtest.Context(t, snowtest.XChainID) - // UTXO to be spent - inputTxID := ids.GenerateTestID() + typeToFxIndex := make(map[reflect.Type]int) + secpFx := &secp256k1fx.Fx{} + parser, err := txs.NewCustomParser( + typeToFxIndex, + new(mockable.Clock), + logging.NoWarn{}, + []fxs.Fx{ + secpFx, + }, + ) + require.NoError(t, err) + + codec := parser.Codec() + txID := ids.GenerateTestID() utxoID := avax.UTXOID{ - TxID: inputTxID, - OutputIndex: 0, + TxID: txID, + OutputIndex: 2, } - - feeAssetID := ids.GenerateTestID() asset := avax.Asset{ - ID: feeAssetID, - } - outputOwners := secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - } - utxoAmount := 100 + feeConfig.TxFee + 50 - utxoOut := secp256k1fx.TransferOutput{ - Amt: utxoAmount, - OutputOwners: outputOwners, - } - utxo := avax.UTXO{ - UTXOID: utxoID, - Asset: asset, - Out: &utxoOut, + ID: ids.GenerateTestID(), } - - // Input spending the UTXO - inputSigners := secp256k1fx.Input{ - SigIndices: []uint32{0}, + inputSigner := secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, } fxInput := secp256k1fx.TransferInput{ - Amt: utxoAmount, - Input: inputSigners, + Amt: 12345, + Input: inputSigner, } input := avax.TransferableInput{ UTXOID: utxoID, Asset: asset, In: &fxInput, } - - // Output produced by BaseTx - fxOutput := secp256k1fx.TransferOutput{ - Amt: 100, - OutputOwners: outputOwners, - } - output := avax.TransferableOutput{ - Asset: asset, - Out: &fxOutput, - } - - baseTx := avax.BaseTx{ - Outs: []*avax.TransferableOutput{ - &output, - }, - Ins: []*avax.TransferableInput{ - &input, + baseTx := txs.BaseTx{ + BaseTx: avax.BaseTx{ + Ins: []*avax.TransferableInput{ + &input, + }, }, } - exportTx := txs.ExportTx{ - BaseTx: txs.BaseTx{ - BaseTx: baseTx, - }, + BaseTx: baseTx, DestinationChain: ctx.CChainID, } - typeToFxIndex := make(map[reflect.Type]int) - secpFx := &secp256k1fx.Fx{} - parser, err := txs.NewCustomParser( - typeToFxIndex, - new(mockable.Clock), - logging.NoWarn{}, - []fxs.Fx{ - secpFx, - }, - ) - require.NoError(t, err) - codec := parser.Codec() backend := &Backend{ Ctx: ctx, Config: &feeConfig, @@ -808,11 +447,26 @@ func TestSemanticVerifierExportTx(t *testing.T) { }, TypeToFxIndex: typeToFxIndex, Codec: codec, - FeeAssetID: feeAssetID, + FeeAssetID: ids.GenerateTestID(), Bootstrapped: true, } require.NoError(t, secpFx.Bootstrapped()) + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[0].Address(), + }, + } + output := secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &output, + } unsignedCreateAssetTx := txs.CreateAssetTx{ States: []*txs.InitialState{{ FxIndex: 0, @@ -834,7 +488,7 @@ func TestSemanticVerifierExportTx(t *testing.T) { state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).Times(2) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state }, @@ -853,25 +507,21 @@ func TestSemanticVerifierExportTx(t *testing.T) { err: nil, }, { - name: "invalid output", - stateFunc: func(*gomock.Controller) state.Chain { - return nil + name: "assetID mismatch", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + + utxo := utxo + utxo.Asset.ID = ids.GenerateTestID() + + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + + return state }, txFunc: func(require *require.Assertions) *txs.Tx { - output := output - output.Out = &secp256k1fx.TransferOutput{ - Amt: 0, - OutputOwners: outputOwners, - } - - baseTx := baseTx - baseTx.Outs = []*avax.TransferableOutput{ - &output, + tx := &txs.Tx{ + Unsigned: &exportTx, } - - eTx := exportTx - eTx.BaseTx.BaseTx = baseTx - tx := &txs.Tx{Unsigned: &eTx} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ @@ -880,39 +530,29 @@ func TestSemanticVerifierExportTx(t *testing.T) { )) return tx }, - err: secp256k1fx.ErrNoValueOutput, + err: errAssetIDMismatch, }, { - name: "unsorted outputs", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } + name: "not allowed input feature extension", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: 2, - OutputOwners: outputOwners, - } + unsignedCreateAssetTx := unsignedCreateAssetTx + unsignedCreateAssetTx.States = nil - outputs := []*avax.TransferableOutput{ - &output0, - &output1, + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, } - avax.SortTransferableOutputs(outputs, codec) - outputs[0], outputs[1] = outputs[1], outputs[0] - baseTx := baseTx - baseTx.Outs = outputs + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) - eTx := exportTx - eTx.BaseTx.BaseTx = baseTx - tx := &txs.Tx{Unsigned: &eTx} + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, + } require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ @@ -921,66 +561,45 @@ func TestSemanticVerifierExportTx(t *testing.T) { )) return tx }, - err: avax.ErrOutputsNotSorted, + err: errIncompatibleFx, }, { - name: "unsorted exported outputs", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } + name: "invalid signature", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: 2, - OutputOwners: outputOwners, - } + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) - outputs := []*avax.TransferableOutput{ - &output0, - &output1, + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, } - avax.SortTransferableOutputs(outputs, codec) - outputs[0], outputs[1] = outputs[1], outputs[0] - - utx := exportTx - utx.ExportedOuts = outputs - tx := &txs.Tx{Unsigned: &utx} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ - {keys[0]}, + {keys[1]}, }, )) return tx }, - err: avax.ErrOutputsNotSorted, + err: secp256k1fx.ErrWrongSig, }, { - name: "invalid input", - stateFunc: func(*gomock.Controller) state.Chain { - return nil + name: "missing UTXO", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + + state.EXPECT().GetUTXO(utxoID.InputID()).Return(nil, database.ErrNotFound) + + return state }, txFunc: func(require *require.Assertions) *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 0, - Input: inputSigners, - } - - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, + tx := &txs.Tx{ + Unsigned: &exportTx, } - - eTx := exportTx - eTx.BaseTx.BaseTx = baseTx - tx := &txs.Tx{Unsigned: &eTx} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ @@ -989,329 +608,14 @@ func TestSemanticVerifierExportTx(t *testing.T) { )) return tx }, - err: secp256k1fx.ErrNoValueInput, - }, - { - name: "duplicate inputs", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, - &input, - } - - eTx := exportTx - eTx.BaseTx.BaseTx = baseTx - tx := &txs.Tx{Unsigned: &eTx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - {keys[0]}, - }, - )) - return tx - }, - err: avax.ErrInputsNotSortedUnique, - }, - { - name: "input overflow", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - input0 := input - input0.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - input1 := input - input1.UTXOID.OutputIndex++ - input1.In = &secp256k1fx.TransferInput{ - Amt: math.MaxUint64, - Input: inputSigners, - } - - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input0, - &input1, - } - avax.SortTransferableInputsWithSigners(baseTx.Ins, make([][]*secp256k1.PrivateKey, 2)) - - eTx := exportTx - eTx.BaseTx.BaseTx = baseTx - tx := &txs.Tx{Unsigned: &eTx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: safemath.ErrOverflow, - }, - { - name: "output overflow", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } - - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output0, - &output1, - } - avax.SortTransferableOutputs(outputs, codec) - - baseTx := baseTx - baseTx.Outs = outputs - - eTx := exportTx - eTx.BaseTx.BaseTx = baseTx - tx := &txs.Tx{Unsigned: &eTx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: safemath.ErrOverflow, - }, - { - name: "barely sufficient funds", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - utxoAmount := 100 + feeConfig.TxFee - utxoOut := secp256k1fx.TransferOutput{ - Amt: utxoAmount, - OutputOwners: outputOwners, - } - utxo := avax.UTXO{ - UTXOID: utxoID, - Asset: asset, - Out: &utxoOut, - } - - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).Times(2) - - return state - }, - txFunc: func(require *require.Assertions) *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.TxFee, - Input: inputSigners, - } - - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, - } - - eTx := exportTx - eTx.BaseTx.BaseTx = baseTx - tx := &txs.Tx{Unsigned: &eTx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: nil, - }, - { - name: "insufficient funds", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, - } - - eTx := exportTx - eTx.BaseTx.BaseTx = baseTx - tx := &txs.Tx{Unsigned: &eTx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: avax.ErrInsufficientFunds, - }, - { - name: "barely insufficient funds", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.TxFee - 1, - Input: inputSigners, - } - - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, - } - - eTx := exportTx - eTx.BaseTx.BaseTx = baseTx - tx := &txs.Tx{Unsigned: &eTx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: avax.ErrInsufficientFunds, - }, - { - name: "assetID mismatch", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - utxo := utxo - utxo.Asset.ID = ids.GenerateTestID() - - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - - return state - }, - txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &exportTx, - } - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: errAssetIDMismatch, - }, - { - name: "not allowed input feature extension", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - unsignedCreateAssetTx := unsignedCreateAssetTx - unsignedCreateAssetTx.States = nil - - createAssetTx := txs.Tx{ - Unsigned: &unsignedCreateAssetTx, - } - - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) - - return state - }, - txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &exportTx, - } - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: errIncompatibleFx, - }, - { - name: "invalid signature", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) - - return state - }, - txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &exportTx, - } - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[1]}, - }, - )) - return tx - }, - err: secp256k1fx.ErrWrongSig, - }, - { - name: "missing UTXO", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - state.EXPECT().GetUTXO(utxoID.InputID()).Return(nil, database.ErrNotFound) - - return state - }, - txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &exportTx, - } - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - err: database.ErrNotFound, + err: database.ErrNotFound, }, { name: "invalid UTXO amount", stateFunc: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - output := utxoOut + output := output output.Amt-- utxo := utxo @@ -1349,19 +653,24 @@ func TestSemanticVerifierExportTx(t *testing.T) { } state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) return state }, txFunc: func(require *require.Assertions) *txs.Tx { + exportTx := exportTx + exportTx.Ins = nil + exportTx.ExportedOuts = []*avax.TransferableOutput{ + { + Asset: asset, + Out: &output, + }, + } tx := &txs.Tx{ Unsigned: &exportTx, } require.NoError(tx.SignSECP256K1Fx( codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, + [][]*secp256k1.PrivateKey{}, )) return tx }, @@ -1397,7 +706,7 @@ func TestSemanticVerifierExportTx(t *testing.T) { state := state.NewMockChain(ctrl) tx := txs.Tx{ - Unsigned: &exportTx, + Unsigned: &baseTx, } state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) @@ -1448,86 +757,53 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { validatorState.EXPECT().GetSubnetID(gomock.Any(), ctx.CChainID).AnyTimes().Return(ids.GenerateTestID(), nil) ctx.ValidatorState = validatorState - // UTXO to be spent - inputTxID := ids.GenerateTestID() + typeToFxIndex := make(map[reflect.Type]int) + secpFx := &secp256k1fx.Fx{} + parser, err := txs.NewCustomParser( + typeToFxIndex, + new(mockable.Clock), + logging.NoWarn{}, + []fxs.Fx{ + secpFx, + }, + ) + require.NoError(err) + + codec := parser.Codec() + txID := ids.GenerateTestID() utxoID := avax.UTXOID{ - TxID: inputTxID, - OutputIndex: 0, + TxID: txID, + OutputIndex: 2, } - - feeAssetID := ids.GenerateTestID() asset := avax.Asset{ - ID: feeAssetID, - } - outputOwners := secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - } - utxoAmount := 100 + feeConfig.TxFee - utxoOut := secp256k1fx.TransferOutput{ - Amt: utxoAmount, - OutputOwners: outputOwners, - } - utxo := avax.UTXO{ - UTXOID: utxoID, - Asset: asset, - Out: &utxoOut, + ID: ids.GenerateTestID(), } - - // Input spending the UTXO - inputSigners := secp256k1fx.Input{ - SigIndices: []uint32{0}, + inputSigner := secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, } fxInput := secp256k1fx.TransferInput{ - Amt: utxoAmount, - Input: inputSigners, + Amt: 12345, + Input: inputSigner, } input := avax.TransferableInput{ UTXOID: utxoID, Asset: asset, In: &fxInput, } - - // Output produced by BaseTx - fxOutput := secp256k1fx.TransferOutput{ - Amt: 100, - OutputOwners: outputOwners, - } - output := avax.TransferableOutput{ - Asset: asset, - Out: &fxOutput, - } - - // BaseTx - baseTx := avax.BaseTx{ - Outs: []*avax.TransferableOutput{ - &output, - }, - Ins: []*avax.TransferableInput{ - &input, + baseTx := txs.BaseTx{ + BaseTx: avax.BaseTx{ + Ins: []*avax.TransferableInput{ + &input, + }, }, } - exportTx := txs.ExportTx{ - BaseTx: txs.BaseTx{ - BaseTx: baseTx, - }, + BaseTx: baseTx, DestinationChain: ctx.CChainID, } - typeToFxIndex := make(map[reflect.Type]int) - secpFx := &secp256k1fx.Fx{} - parser, err := txs.NewCustomParser( - typeToFxIndex, - new(mockable.Clock), - logging.NoWarn{}, - []fxs.Fx{ - secpFx, - }, - ) - require.NoError(err) - - codec := parser.Codec() backend := &Backend{ Ctx: ctx, Config: &feeConfig, @@ -1539,11 +815,26 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { }, TypeToFxIndex: typeToFxIndex, Codec: codec, - FeeAssetID: feeAssetID, + FeeAssetID: ids.GenerateTestID(), Bootstrapped: true, } require.NoError(secpFx.Bootstrapped()) + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[0].Address(), + }, + } + output := secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &output, + } unsignedCreateAssetTx := txs.CreateAssetTx{ States: []*txs.InitialState{{ FxIndex: 0, @@ -1556,7 +847,7 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).Times(2) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) tx := &txs.Tx{ Unsigned: &exportTx, @@ -1593,84 +884,50 @@ func TestSemanticVerifierImportTx(t *testing.T) { }, ) require.NoError(t, err) - codec := parser.Codec() - // UTXOs to be spent - utxoAmount := 100 + feeConfig.TxFee - inputTxID := ids.GenerateTestID() + codec := parser.Codec() utxoID := avax.UTXOID{ - TxID: inputTxID, - OutputIndex: 0, + TxID: ids.GenerateTestID(), + OutputIndex: 2, } - feeAssetID := ids.GenerateTestID() asset := avax.Asset{ - ID: feeAssetID, + ID: ids.GenerateTestID(), } outputOwners := secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - } - utxoOut := secp256k1fx.TransferOutput{ - Amt: utxoAmount, - OutputOwners: outputOwners, - } - utxo := avax.UTXO{ - UTXOID: utxoID, - Asset: asset, - Out: &utxoOut, - } - - // Input spending the UTXO - inputSigners := secp256k1fx.Input{ - SigIndices: []uint32{0}, - } - fxInput := secp256k1fx.TransferInput{ - Amt: utxoAmount, - Input: inputSigners, - } - input := avax.TransferableInput{ - UTXOID: utxoID, - Asset: asset, - In: &fxInput, - } - - // Output produced by BaseTx - fxOutput := secp256k1fx.TransferOutput{ - Amt: 100, - OutputOwners: outputOwners, - } - output := avax.TransferableOutput{ - Asset: asset, - Out: &fxOutput, + Addrs: []ids.ShortID{ + keys[0].Address(), + }, } - - baseTx := avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: ctx.ChainID, - Outs: []*avax.TransferableOutput{ - &output, + baseTx := txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: ctx.ChainID, + Outs: []*avax.TransferableOutput{{ + Asset: asset, + Out: &secp256k1fx.TransferOutput{ + Amt: 1000, + OutputOwners: outputOwners, + }, + }}, }, - // no inputs here, only imported ones } - - importedInput := avax.TransferableInput{ + input := avax.TransferableInput{ UTXOID: utxoID, Asset: asset, In: &secp256k1fx.TransferInput{ - Amt: utxoAmount, + Amt: 12345, Input: secp256k1fx.Input{ SigIndices: []uint32{0}, }, }, } unsignedImportTx := txs.ImportTx{ - BaseTx: txs.BaseTx{ - BaseTx: baseTx, - }, + BaseTx: baseTx, SourceChain: ctx.CChainID, ImportedIns: []*avax.TransferableInput{ - &importedInput, + &input, }, } importTx := &txs.Tx{ @@ -1680,7 +937,6 @@ func TestSemanticVerifierImportTx(t *testing.T) { codec, [][]*secp256k1.PrivateKey{ {keys[0]}, - {keys[0]}, }, )) @@ -1695,11 +951,20 @@ func TestSemanticVerifierImportTx(t *testing.T) { }, TypeToFxIndex: typeToFxIndex, Codec: codec, - FeeAssetID: feeAssetID, + FeeAssetID: ids.GenerateTestID(), Bootstrapped: true, } require.NoError(t, fx.Bootstrapped()) + output := secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &output, + } utxoBytes, err := codec.Marshal(txs.CodecVersion, utxo) require.NoError(t, err) @@ -1740,237 +1005,6 @@ func TestSemanticVerifierImportTx(t *testing.T) { }, expectedErr: nil, }, - { - name: "invalid output", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - output := output - output.Out = &secp256k1fx.TransferOutput{ - Amt: 0, - OutputOwners: outputOwners, - } - - utx := unsignedImportTx - utx.Outs = []*avax.TransferableOutput{ - &output, - } - - tx := &txs.Tx{Unsigned: &utx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - expectedErr: secp256k1fx.ErrNoValueOutput, - }, - { - name: "unsorted outputs", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } - - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: 2, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output0, - &output1, - } - avax.SortTransferableOutputs(outputs, codec) - outputs[0], outputs[1] = outputs[1], outputs[0] - - utx := unsignedImportTx - utx.Outs = outputs - tx := &txs.Tx{Unsigned: &utx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - expectedErr: avax.ErrOutputsNotSorted, - }, - { - name: "invalid input", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 0, - Input: inputSigners, - } - - utx := unsignedImportTx - utx.Ins = []*avax.TransferableInput{ - &input, - } - tx := &txs.Tx{Unsigned: &utx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - }, - )) - return tx - }, - expectedErr: secp256k1fx.ErrNoValueInput, - }, - { - name: "duplicate inputs", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - utx := unsignedImportTx - utx.Ins = []*avax.TransferableInput{ - &input, - &input, - } - tx := &txs.Tx{Unsigned: &utx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - {keys[0]}, - {keys[0]}, - })) - return tx - }, - expectedErr: avax.ErrInputsNotSortedUnique, - }, - { - name: "duplicate imported inputs", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - utx := unsignedImportTx - utx.ImportedIns = []*avax.TransferableInput{ - &input, - &input, - } - tx := &txs.Tx{Unsigned: &utx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - {keys[0]}, - })) - return tx - }, - expectedErr: avax.ErrInputsNotSortedUnique, - }, - { - name: "input overflow", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - input0 := input - input0.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - input1 := input - input1.UTXOID.OutputIndex++ - input1.In = &secp256k1fx.TransferInput{ - Amt: math.MaxUint64, - Input: inputSigners, - } - - utx := unsignedImportTx - utx.Ins = []*avax.TransferableInput{ - &input0, - &input1, - } - avax.SortTransferableInputsWithSigners(utx.Ins, make([][]*secp256k1.PrivateKey, 2)) - tx := &txs.Tx{Unsigned: &utx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - {keys[0]}, - })) - return tx - }, - expectedErr: safemath.ErrOverflow, - }, - { - name: "output overflow", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - output := output - output.Out = &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output, - } - avax.SortTransferableOutputs(outputs, codec) - - utx := unsignedImportTx - utx.Outs = outputs - tx := &txs.Tx{Unsigned: &utx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - {keys[0]}, - })) - return tx - }, - expectedErr: safemath.ErrOverflow, - }, - { - name: "insufficient funds", - stateFunc: func(*gomock.Controller) state.Chain { - return nil - }, - txFunc: func(require *require.Assertions) *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - utx := unsignedImportTx - utx.ImportedIns = []*avax.TransferableInput{ - &input, - } - tx := &txs.Tx{Unsigned: &utx} - require.NoError(tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - {keys[0]}, - })) - return tx - }, - expectedErr: avax.ErrInsufficientFunds, - }, { name: "not allowed input feature extension", stateFunc: func(ctrl *gomock.Controller) state.Chain { @@ -2058,9 +1092,7 @@ func TestSemanticVerifierImportTx(t *testing.T) { stateFunc: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) tx := txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: baseTx, - }, + Unsigned: &baseTx, } state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil).AnyTimes() state.EXPECT().GetTx(asset.ID).Return(&tx, nil) @@ -2088,451 +1120,3 @@ func TestSemanticVerifierImportTx(t *testing.T) { }) } } - -func TestSemanticVerifierOperationTx(t *testing.T) { - ctx := snowtest.Context(t, snowtest.XChainID) - - var ( - secpFx = &secp256k1fx.Fx{} - nftFx = &nftfx.Fx{} - propertyFx = &propertyfx.Fx{} - ) - - typeToFxIndex := make(map[reflect.Type]int) - parser, err := txs.NewCustomParser( - typeToFxIndex, - new(mockable.Clock), - logging.NoWarn{}, - []fxs.Fx{ - secpFx, - nftFx, - propertyFx, - }, - ) - - require.NoError(t, err) - codec := parser.Codec() - - feeAssetID := ids.GenerateTestID() - feeAsset := avax.Asset{ - ID: feeAssetID, - } - - outputOwners := secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - keys[0].Address(), - }, - } - - utxoAmount := 20 * units.KiloAvax - utxoID := avax.UTXOID{ - TxID: ids.GenerateTestID(), - OutputIndex: 1, - } - utxo := &avax.UTXO{ - UTXOID: utxoID, - Asset: feeAsset, - Out: &secp256k1fx.TransferOutput{ - Amt: 20 * units.KiloAvax, - OutputOwners: outputOwners, - }, - } - - opUTXOID := avax.UTXOID{ - TxID: ids.GenerateTestID(), - OutputIndex: 1, - } - opUTXO := &avax.UTXO{ - UTXOID: opUTXOID, - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.MintOutput{ - OutputOwners: outputOwners, - }, - } - - unsignedCreateAssetTx := txs.CreateAssetTx{ - States: []*txs.InitialState{{ - FxIndex: 0, - }}, - } - createAssetTx := &txs.Tx{ - Unsigned: &unsignedCreateAssetTx, - } - - opTxInSigner := secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - } - opTxIn := avax.TransferableInput{ - UTXOID: utxoID, - Asset: feeAsset, - In: &secp256k1fx.TransferInput{ - Amt: utxoAmount, - Input: opTxInSigner, - }, - } - opTxOut := avax.TransferableOutput{ - Asset: avax.Asset{ID: feeAssetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: units.NanoAvax, - OutputOwners: outputOwners, - }, - } - unsignedOperationTx := txs.OperationTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{&opTxIn}, - Outs: []*avax.TransferableOutput{&opTxOut}, - }}, - Ops: []*txs.Operation{{ - Asset: avax.Asset{ID: assetID}, - UTXOIDs: []*avax.UTXOID{ - &opUTXOID, - }, - Op: &secp256k1fx.MintOperation{ - MintInput: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - MintOutput: secp256k1fx.MintOutput{ - OutputOwners: outputOwners, - }, - TransferOutput: secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - }, - }, - }}, - } - - operationTx := txs.Tx{Unsigned: &unsignedOperationTx} - require.NoError(t, operationTx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - {keys[0]}, - }, - )) - - backend := &Backend{ - Ctx: ctx, - Config: &feeConfig, - Fxs: []*fxs.ParsedFx{ - { - ID: secp256k1fx.ID, - Fx: secpFx, - }, - { - ID: nftfx.ID, - Fx: nftFx, - }, - { - ID: propertyfx.ID, - Fx: propertyFx, - }, - }, - Codec: codec, - TypeToFxIndex: typeToFxIndex, - FeeAssetID: feeAssetID, - } - require.NoError(t, secpFx.Bootstrapped()) - require.NoError(t, nftFx.Bootstrapped()) - require.NoError(t, propertyFx.Bootstrapped()) - - tests := []struct { - name string - stateFunc func(*gomock.Controller) state.Chain - txFunc func(*require.Assertions) *txs.Tx - expectedErr error - }{ - { - name: "valid", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - state.EXPECT().GetUTXO(utxoID.InputID()).Return(utxo, nil).AnyTimes() - state.EXPECT().GetUTXO(opUTXO.InputID()).Return(opUTXO, nil).AnyTimes() - state.EXPECT().GetTx(feeAssetID).Return(createAssetTx, nil).AnyTimes() - return state - }, - txFunc: func(*require.Assertions) *txs.Tx { - return &operationTx - }, - expectedErr: nil, - }, - { - name: "invalid output", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - return state - }, - txFunc: func(*require.Assertions) *txs.Tx { - output := opTxOut - output.Out = &secp256k1fx.TransferOutput{ - Amt: 0, - OutputOwners: outputOwners, - } - - unsignedTx := unsignedOperationTx - unsignedTx.Outs = []*avax.TransferableOutput{ - &output, - } - return &txs.Tx{ - Unsigned: &unsignedTx, - Creds: operationTx.Creds, - } - }, - expectedErr: secp256k1fx.ErrNoValueOutput, - }, - { - name: "unsorted outputs", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - return state - }, - txFunc: func(*require.Assertions) *txs.Tx { - output0 := opTxOut - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } - - output1 := opTxOut - output1.Out = &secp256k1fx.TransferOutput{ - Amt: 2, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output0, - &output1, - } - avax.SortTransferableOutputs(outputs, codec) - outputs[0], outputs[1] = outputs[1], outputs[0] - - unsignedTx := unsignedOperationTx - unsignedTx.Outs = outputs - return &txs.Tx{ - Unsigned: &unsignedTx, - Creds: operationTx.Creds, - } - }, - expectedErr: avax.ErrOutputsNotSorted, - }, - { - name: "invalid input", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - return state - }, - txFunc: func(*require.Assertions) *txs.Tx { - input := opTxIn - input.In = &secp256k1fx.TransferInput{ - Amt: 0, - Input: opTxInSigner, - } - - tx := unsignedOperationTx - tx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: operationTx.Creds, - } - }, - expectedErr: secp256k1fx.ErrNoValueInput, - }, - { - name: "duplicate inputs", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - return state - }, - txFunc: func(*require.Assertions) *txs.Tx { - unsignedTx := unsignedOperationTx - unsignedTx.Ins = []*avax.TransferableInput{ - &opTxIn, - &opTxIn, - } - - tx := &txs.Tx{Unsigned: &unsignedTx} - require.NoError(t, tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - {keys[0]}, - })) - - return tx - }, - expectedErr: avax.ErrInputsNotSortedUnique, - }, - { - name: "input overflow", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - return state - }, - txFunc: func(*require.Assertions) *txs.Tx { - input0 := opTxIn - input0.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: opTxInSigner, - } - - input1 := opTxIn - input1.UTXOID.OutputIndex++ - input1.In = &secp256k1fx.TransferInput{ - Amt: math.MaxUint64, - Input: opTxInSigner, - } - - unsignedTx := unsignedOperationTx - unsignedTx.Ins = []*avax.TransferableInput{ - &input0, - &input1, - } - avax.SortTransferableInputsWithSigners(unsignedTx.Ins, make([][]*secp256k1.PrivateKey, 2)) - - tx := &txs.Tx{Unsigned: &unsignedTx} - require.NoError(t, tx.SignSECP256K1Fx( - codec, - [][]*secp256k1.PrivateKey{ - {keys[0]}, - {keys[0]}, - })) - return tx - }, - expectedErr: safemath.ErrOverflow, - }, - { - name: "output overflow", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - return state - }, - txFunc: func(*require.Assertions) *txs.Tx { - output := opTxOut - output.Out = &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output, - } - avax.SortTransferableOutputs(outputs, codec) - - unsignedTx := unsignedOperationTx - unsignedTx.Outs = outputs - return &txs.Tx{ - Unsigned: &unsignedTx, - Creds: operationTx.Creds, - } - }, - expectedErr: safemath.ErrOverflow, - }, - { - name: "insufficient funds", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - return state - }, - txFunc: func(*require.Assertions) *txs.Tx { - input := opTxIn - input.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: opTxInSigner, - } - - unsignedTx := unsignedOperationTx - unsignedTx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &unsignedTx, - Creds: operationTx.Creds, - } - }, - expectedErr: avax.ErrInsufficientFunds, - }, - { - name: "barely sufficient funds", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - barelySufficientUtxo := &avax.UTXO{ - UTXOID: utxoID, - Asset: feeAsset, - Out: &secp256k1fx.TransferOutput{ - Amt: units.NanoAvax + feeConfig.TxFee, - OutputOwners: outputOwners, - }, - } - - state.EXPECT().GetUTXO(utxoID.InputID()).Return(barelySufficientUtxo, nil).AnyTimes() - state.EXPECT().GetUTXO(opUTXO.InputID()).Return(opUTXO, nil).AnyTimes() - state.EXPECT().GetTx(feeAssetID).Return(createAssetTx, nil).AnyTimes() - return state - }, - txFunc: func(*require.Assertions) *txs.Tx { - input := opTxIn - input.In = &secp256k1fx.TransferInput{ - Amt: units.NanoAvax + feeConfig.TxFee, - Input: opTxInSigner, - } - - unsignedTx := unsignedOperationTx - unsignedTx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &unsignedTx, - Creds: operationTx.Creds, - } - }, - expectedErr: nil, - }, - { - name: "barely insufficient funds", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - return state - }, - txFunc: func(*require.Assertions) *txs.Tx { - input := opTxIn - input.In = &secp256k1fx.TransferInput{ - Amt: feeConfig.TxFee, - Input: opTxInSigner, - } - - unsignedTx := unsignedOperationTx - unsignedTx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &unsignedTx, - Creds: operationTx.Creds, - } - }, - expectedErr: avax.ErrInsufficientFunds, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - - state := test.stateFunc(ctrl) - tx := test.txFunc(require) - err := tx.Unsigned.Visit(&SemanticVerifier{ - Backend: backend, - State: state, - Tx: tx, - }) - require.ErrorIs(err, test.expectedErr) - }) - } -} diff --git a/vms/avm/txs/executor/syntactic_verifier.go b/vms/avm/txs/executor/syntactic_verifier.go index 726f69644eef..b964ca1df02e 100644 --- a/vms/avm/txs/executor/syntactic_verifier.go +++ b/vms/avm/txs/executor/syntactic_verifier.go @@ -13,6 +13,8 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/fee" + "github.com/ava-labs/avalanchego/vms/components/avax" ) const ( @@ -50,7 +52,7 @@ type SyntacticVerifier struct { } func (v *SyntacticVerifier) BaseTx(tx *txs.BaseTx) error { - if err := tx.BaseTx.Verify(v.Ctx); err != nil { + if err := v.verifyBaseTx(tx, nil, nil); err != nil { return err } @@ -102,7 +104,7 @@ func (v *SyntacticVerifier) CreateAssetTx(tx *txs.CreateAssetTx) error { } } - if err := tx.BaseTx.BaseTx.Verify(v.Ctx); err != nil { + if err := v.verifyBaseTx(&tx.BaseTx, nil, nil); err != nil { return err } @@ -139,7 +141,7 @@ func (v *SyntacticVerifier) OperationTx(tx *txs.OperationTx) error { return errNoOperations } - if err := tx.BaseTx.BaseTx.Verify(v.Ctx); err != nil { + if err := v.verifyBaseTx(&tx.BaseTx, nil, nil); err != nil { return err } @@ -188,7 +190,7 @@ func (v *SyntacticVerifier) ImportTx(tx *txs.ImportTx) error { return errNoImportInputs } - if err := tx.BaseTx.BaseTx.Verify(v.Ctx); err != nil { + if err := v.verifyBaseTx(&tx.BaseTx, tx.ImportedIns, nil); err != nil { return err } @@ -216,7 +218,7 @@ func (v *SyntacticVerifier) ExportTx(tx *txs.ExportTx) error { return errNoExportOutputs } - if err := tx.BaseTx.BaseTx.Verify(v.Ctx); err != nil { + if err := v.verifyBaseTx(&tx.BaseTx, nil, tx.ExportedOuts); err != nil { return err } @@ -238,3 +240,27 @@ func (v *SyntacticVerifier) ExportTx(tx *txs.ExportTx) error { return nil } + +func (v *SyntacticVerifier) verifyBaseTx( + bTx *txs.BaseTx, + importedIns []*avax.TransferableInput, + exportedOuts []*avax.TransferableOutput, +) error { + if err := bTx.BaseTx.Verify(v.Ctx); err != nil { + return err + } + + feeCalculator := fee.NewStaticCalculator(v.Backend.Config.StaticConfig) + fee, err := feeCalculator.CalculateFee(v.Tx) + if err != nil { + return err + } + + return avax.VerifyTx( + fee, + v.FeeAssetID, + [][]*avax.TransferableInput{bTx.Ins, importedIns}, + [][]*avax.TransferableOutput{bTx.Outs, exportedOuts}, + v.Codec, + ) +} diff --git a/vms/avm/txs/executor/syntactic_verifier_test.go b/vms/avm/txs/executor/syntactic_verifier_test.go index b473b4515e97..4fc6b10968e4 100644 --- a/vms/avm/txs/executor/syntactic_verifier_test.go +++ b/vms/avm/txs/executor/syntactic_verifier_test.go @@ -4,6 +4,7 @@ package executor import ( + "math" "strings" "testing" @@ -21,6 +22,8 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) var ( @@ -160,6 +163,177 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { }, err: avax.ErrMemoTooLarge, }, + { + name: "invalid output", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } + + baseTx := baseTx + baseTx.Outs = []*avax.TransferableOutput{ + &output, + } + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueOutput, + }, + { + name: "unsorted outputs", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + baseTx := baseTx + baseTx.Outs = outputs + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: avax.ErrOutputsNotSorted, + }, + { + name: "invalid input", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueInput, + }, + { + name: "duplicate inputs", + txFunc: func() *txs.Tx { + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + &input, + } + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + txFunc: func() *txs.Tx { + input0 := input + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + input1 := input + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: math.MaxUint64, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(baseTx.Ins, make([][]*secp256k1.PrivateKey, 2)) + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: safemath.ErrOverflow, + }, + { + name: "output overflow", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + + baseTx := baseTx + baseTx.Outs = outputs + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: safemath.ErrOverflow, + }, + { + name: "insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, { name: "invalid credential", txFunc: func() *txs.Tx { @@ -181,6 +355,46 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { }, err: errWrongNumberOfCredentials, }, + { + name: "barely sufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: nil, + }, + { + name: "barely insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee - 1, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -444,6 +658,177 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { }, err: avax.ErrMemoTooLarge, }, + { + name: "invalid output", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } + + tx := tx + tx.Outs = []*avax.TransferableOutput{ + &output, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueOutput, + }, + { + name: "unsorted outputs", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrOutputsNotSorted, + }, + { + name: "invalid input", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueInput, + }, + { + name: "duplicate inputs", + txFunc: func() *txs.Tx { + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + txFunc: func() *txs.Tx { + input0 := input + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + input1 := input + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: math.MaxUint64, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(baseTx.Ins, make([][]*secp256k1.PrivateKey, 2)) + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: safemath.ErrOverflow, + }, + { + name: "output overflow", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: safemath.ErrOverflow, + }, + { + name: "insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, { name: "invalid nil state", txFunc: func() *txs.Tx { @@ -579,6 +964,46 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { }, err: errWrongNumberOfCredentials, }, + { + name: "barely sufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.CreateAssetTxFee, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: nil, + }, + { + name: "barely insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.CreateAssetTxFee - 1, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -755,55 +1180,219 @@ func TestSyntacticVerifierOperationTx(t *testing.T) { err: avax.ErrMemoTooLarge, }, { - name: "invalid nil op", + name: "invalid output", txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } + tx := tx - tx.Ops = []*txs.Operation{ - nil, + tx.Outs = []*avax.TransferableOutput{ + &output, } return &txs.Tx{ Unsigned: &tx, Creds: creds, } }, - err: txs.ErrNilOperation, + err: secp256k1fx.ErrNoValueOutput, }, { - name: "invalid nil fx op", + name: "unsorted outputs", txFunc: func() *txs.Tx { - op := op - op.Op = nil + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } - tx := tx - tx.Ops = []*txs.Operation{ - &op, + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + tx := tx + tx.Outs = outputs return &txs.Tx{ Unsigned: &tx, Creds: creds, } }, - err: txs.ErrNilFxOperation, + err: avax.ErrOutputsNotSorted, }, { - name: "invalid duplicated op UTXOs", + name: "invalid input", txFunc: func() *txs.Tx { - op := op - op.UTXOIDs = []*avax.UTXOID{ - &opUTXOID, - &opUTXOID, + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: inputSigners, } tx := tx - tx.Ops = []*txs.Operation{ - &op, + tx.Ins = []*avax.TransferableInput{ + &input, } return &txs.Tx{ Unsigned: &tx, Creds: creds, } }, - err: txs.ErrNotSortedAndUniqueUTXOIDs, + err: secp256k1fx.ErrNoValueInput, + }, + { + name: "duplicate inputs", + txFunc: func() *txs.Tx { + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + txFunc: func() *txs.Tx { + input0 := input + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + input1 := input + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: math.MaxUint64, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(tx.Ins, make([][]*secp256k1.PrivateKey, 2)) + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: safemath.ErrOverflow, + }, + { + name: "output overflow", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output, + } + avax.SortTransferableOutputs(outputs, codec) + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: safemath.ErrOverflow, + }, + { + name: "insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, + { + name: "invalid nil op", + txFunc: func() *txs.Tx { + tx := tx + tx.Ops = []*txs.Operation{ + nil, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: txs.ErrNilOperation, + }, + { + name: "invalid nil fx op", + txFunc: func() *txs.Tx { + op := op + op.Op = nil + + tx := tx + tx.Ops = []*txs.Operation{ + &op, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: txs.ErrNilFxOperation, + }, + { + name: "invalid duplicated op UTXOs", + txFunc: func() *txs.Tx { + op := op + op.UTXOIDs = []*avax.UTXOID{ + &opUTXOID, + &opUTXOID, + } + + tx := tx + tx.Ops = []*txs.Operation{ + &op, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: txs.ErrNotSortedAndUniqueUTXOIDs, }, { name: "invalid duplicated UTXOs across ops", @@ -864,6 +1453,46 @@ func TestSyntacticVerifierOperationTx(t *testing.T) { }, err: errWrongNumberOfCredentials, }, + { + name: "barely sufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: nil, + }, + { + name: "barely insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee - 1, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -1020,6 +1649,189 @@ func TestSyntacticVerifierImportTx(t *testing.T) { }, err: avax.ErrMemoTooLarge, }, + { + name: "invalid output", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } + + tx := tx + tx.Outs = []*avax.TransferableOutput{ + &output, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueOutput, + }, + { + name: "unsorted outputs", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrOutputsNotSorted, + }, + { + name: "invalid input", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueInput, + }, + { + name: "duplicate inputs", + txFunc: func() *txs.Tx { + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + &cred, + }, + } + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "duplicate imported inputs", + txFunc: func() *txs.Tx { + tx := tx + tx.ImportedIns = []*avax.TransferableInput{ + &input, + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + txFunc: func() *txs.Tx { + input0 := input + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + input1 := input + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: math.MaxUint64, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(tx.Ins, make([][]*secp256k1.PrivateKey, 2)) + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: safemath.ErrOverflow, + }, + { + name: "output overflow", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output, + } + avax.SortTransferableOutputs(outputs, codec) + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: safemath.ErrOverflow, + }, + { + name: "insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + tx := tx + tx.ImportedIns = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, { name: "invalid credential", txFunc: func() *txs.Tx { @@ -1041,6 +1853,46 @@ func TestSyntacticVerifierImportTx(t *testing.T) { }, err: errWrongNumberOfCredentials, }, + { + name: "barely sufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee, + Input: inputSigners, + } + + tx := tx + tx.ImportedIns = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: nil, + }, + { + name: "barely insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee - 1, + Input: inputSigners, + } + + tx := tx + tx.ImportedIns = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -1197,6 +2049,201 @@ func TestSyntacticVerifierExportTx(t *testing.T) { }, err: avax.ErrMemoTooLarge, }, + { + name: "invalid output", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } + + tx := tx + tx.Outs = []*avax.TransferableOutput{ + &output, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueOutput, + }, + { + name: "unsorted outputs", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrOutputsNotSorted, + }, + { + name: "unsorted exported outputs", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + tx := tx + tx.ExportedOuts = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrOutputsNotSorted, + }, + { + name: "invalid input", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueInput, + }, + { + name: "duplicate inputs", + txFunc: func() *txs.Tx { + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + txFunc: func() *txs.Tx { + input0 := input + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + input1 := input + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: math.MaxUint64, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(tx.Ins, make([][]*secp256k1.PrivateKey, 2)) + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: safemath.ErrOverflow, + }, + { + name: "output overflow", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output, + } + avax.SortTransferableOutputs(outputs, codec) + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: safemath.ErrOverflow, + }, + { + name: "insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, { name: "invalid credential", txFunc: func() *txs.Tx { @@ -1218,6 +2265,46 @@ func TestSyntacticVerifierExportTx(t *testing.T) { }, err: errWrongNumberOfCredentials, }, + { + name: "barely sufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: nil, + }, + { + name: "barely insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee - 1, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { From 3f493ae1297e02ca7aab42bc703168211f197f7e Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 12 Jul 2024 17:43:54 +0200 Subject: [PATCH 102/102] fixed merge --- vms/avm/txs/executor/semantic_verifier.go | 78 +- .../txs/executor/semantic_verifier_test.go | 2034 ++++++++++++++--- vms/avm/txs/executor/syntactic_verifier.go | 36 +- .../txs/executor/syntactic_verifier_test.go | 1125 +-------- 4 files changed, 1801 insertions(+), 1472 deletions(-) diff --git a/vms/avm/txs/executor/semantic_verifier.go b/vms/avm/txs/executor/semantic_verifier.go index 946346bc0646..68a1b9ab4ab5 100644 --- a/vms/avm/txs/executor/semantic_verifier.go +++ b/vms/avm/txs/executor/semantic_verifier.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/fee" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" ) @@ -31,36 +32,15 @@ type SemanticVerifier struct { } func (v *SemanticVerifier) BaseTx(tx *txs.BaseTx) error { - for i, in := range tx.Ins { - // Note: Verification of the length of [t.tx.Creds] happens during - // syntactic verification, which happens before semantic verification. - cred := v.Tx.Creds[i].Credential - if err := v.verifyTransfer(tx, in, cred); err != nil { - return err - } - } - - for _, out := range tx.Outs { - fxIndex, err := v.getFx(out.Out) - if err != nil { - return err - } - - assetID := out.AssetID() - if err := v.verifyFxUsage(fxIndex, assetID); err != nil { - return err - } - } - - return nil + return v.verifyBaseTx(tx, nil, nil) } func (v *SemanticVerifier) CreateAssetTx(tx *txs.CreateAssetTx) error { - return v.BaseTx(&tx.BaseTx) + return v.verifyBaseTx(&tx.BaseTx, nil, nil) } func (v *SemanticVerifier) OperationTx(tx *txs.OperationTx) error { - if err := v.BaseTx(&tx.BaseTx); err != nil { + if err := v.verifyBaseTx(&tx.BaseTx, nil, nil); err != nil { return err } @@ -81,7 +61,7 @@ func (v *SemanticVerifier) OperationTx(tx *txs.OperationTx) error { } func (v *SemanticVerifier) ImportTx(tx *txs.ImportTx) error { - if err := v.BaseTx(&tx.BaseTx); err != nil { + if err := v.verifyBaseTx(&tx.BaseTx, tx.ImportedIns, nil); err != nil { return err } @@ -122,7 +102,7 @@ func (v *SemanticVerifier) ImportTx(tx *txs.ImportTx) error { } func (v *SemanticVerifier) ExportTx(tx *txs.ExportTx) error { - if err := v.BaseTx(&tx.BaseTx); err != nil { + if err := v.verifyBaseTx(&tx.BaseTx, nil, tx.ExportedOuts); err != nil { return err } @@ -146,6 +126,52 @@ func (v *SemanticVerifier) ExportTx(tx *txs.ExportTx) error { return nil } +func (v *SemanticVerifier) verifyBaseTx( + tx *txs.BaseTx, + importedIns []*avax.TransferableInput, + exportedOuts []*avax.TransferableOutput, +) error { + feeCalculator := fee.NewStaticCalculator(v.Backend.Config.StaticConfig) + fee, err := feeCalculator.CalculateFee(&txs.Tx{Unsigned: tx}) + if err != nil { + return err + } + + err = avax.VerifyTx( + fee, + v.FeeAssetID, + [][]*avax.TransferableInput{tx.Ins, importedIns}, + [][]*avax.TransferableOutput{tx.Outs, exportedOuts}, + v.Codec, + ) + if err != nil { + return err + } + + for i, in := range tx.Ins { + // Note: Verification of the length of [t.tx.Creds] happens during + // syntactic verification, which happens before semantic verification. + cred := v.Tx.Creds[i].Credential + if err := v.verifyTransfer(tx, in, cred); err != nil { + return err + } + } + + for _, out := range tx.Outs { + fxIndex, err := v.getFx(out.Out) + if err != nil { + return err + } + + assetID := out.AssetID() + if err := v.verifyFxUsage(fxIndex, assetID); err != nil { + return err + } + } + + return nil +} + func (v *SemanticVerifier) verifyTransfer( tx txs.UnsignedTx, in *avax.TransferableInput, diff --git a/vms/avm/txs/executor/semantic_verifier_test.go b/vms/avm/txs/executor/semantic_verifier_test.go index db89e1e5a5e9..7f67c7710725 100644 --- a/vms/avm/txs/executor/semantic_verifier_test.go +++ b/vms/avm/txs/executor/semantic_verifier_test.go @@ -4,6 +4,7 @@ package executor import ( + "math" "reflect" "testing" @@ -21,60 +22,95 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/nftfx" + "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) func TestSemanticVerifierBaseTx(t *testing.T) { ctx := snowtest.Context(t, snowtest.XChainID) - typeToFxIndex := make(map[reflect.Type]int) - secpFx := &secp256k1fx.Fx{} - parser, err := txs.NewCustomParser( - typeToFxIndex, - new(mockable.Clock), - logging.NoWarn{}, - []fxs.Fx{ - secpFx, - }, - ) - require.NoError(t, err) - - codec := parser.Codec() - txID := ids.GenerateTestID() + // UTXO to be spent + inputTxID := ids.GenerateTestID() utxoID := avax.UTXOID{ - TxID: txID, - OutputIndex: 2, + TxID: inputTxID, + OutputIndex: 0, } + + feeAssetID := ids.GenerateTestID() asset := avax.Asset{ - ID: ids.GenerateTestID(), + ID: feeAssetID, } - inputSigner := secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + } + utxoAmount := 100 + feeConfig.TxFee + 50 + utxoOut := secp256k1fx.TransferOutput{ + Amt: utxoAmount, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &utxoOut, + } + + // Input spending the UTXO + inputSigners := secp256k1fx.Input{ + SigIndices: []uint32{0}, } fxInput := secp256k1fx.TransferInput{ - Amt: 12345, - Input: inputSigner, + Amt: utxoAmount, + Input: inputSigners, } input := avax.TransferableInput{ UTXOID: utxoID, Asset: asset, In: &fxInput, } - baseTx := txs.BaseTx{ - BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{ - &input, - }, + + // Output produced by BaseTx + fxOutput := secp256k1fx.TransferOutput{ + Amt: 100, + OutputOwners: outputOwners, + } + output := avax.TransferableOutput{ + Asset: asset, + Out: &fxOutput, + } + + // BaseTx + baseTx := avax.BaseTx{ + Outs: []*avax.TransferableOutput{ + &output, + }, + Ins: []*avax.TransferableInput{ + &input, }, } + // Backend + typeToFxIndex := make(map[reflect.Type]int) + secpFx := &secp256k1fx.Fx{} + parser, err := txs.NewCustomParser( + typeToFxIndex, + new(mockable.Clock), + logging.NoWarn{}, + []fxs.Fx{ + secpFx, + }, + ) + require.NoError(t, err) + codec := parser.Codec() backend := &Backend{ Ctx: ctx, Config: &feeConfig, @@ -86,26 +122,11 @@ func TestSemanticVerifierBaseTx(t *testing.T) { }, TypeToFxIndex: typeToFxIndex, Codec: codec, - FeeAssetID: ids.GenerateTestID(), + FeeAssetID: feeAssetID, Bootstrapped: true, } require.NoError(t, secpFx.Bootstrapped()) - outputOwners := secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - keys[0].Address(), - }, - } - output := secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: outputOwners, - } - utxo := avax.UTXO{ - UTXOID: utxoID, - Asset: asset, - Out: &output, - } unsignedCreateAssetTx := txs.CreateAssetTx{ States: []*txs.InitialState{{ FxIndex: 0, @@ -127,13 +148,15 @@ func TestSemanticVerifierBaseTx(t *testing.T) { state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).Times(2) return state }, txFunc: func(require *require.Assertions) *txs.Tx { tx := &txs.Tx{ - Unsigned: &baseTx, + Unsigned: &txs.BaseTx{ + BaseTx: baseTx, + }, } require.NoError(tx.SignSECP256K1Fx( codec, @@ -146,21 +169,90 @@ func TestSemanticVerifierBaseTx(t *testing.T) { err: nil, }, { - name: "assetID mismatch", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) + name: "invalid output", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } - utxo := utxo - utxo.Asset.ID = ids.GenerateTestID() + baseTx := baseTx + baseTx.Outs = []*avax.TransferableOutput{ + &output, + } - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: secp256k1fx.ErrNoValueOutput, + }, + { + name: "unsorted outputs", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } - return state + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + baseTx := baseTx + baseTx.Outs = outputs + + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: avax.ErrOutputsNotSorted, + }, + { + name: "invalid input", + stateFunc: func(*gomock.Controller) state.Chain { + return nil }, txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &baseTx, + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, } + + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ @@ -169,29 +261,97 @@ func TestSemanticVerifierBaseTx(t *testing.T) { )) return tx }, - err: errAssetIDMismatch, + err: secp256k1fx.ErrNoValueInput, }, { - name: "not allowed input feature extension", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) + name: "duplicate inputs", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + &input, + } - unsignedCreateAssetTx := unsignedCreateAssetTx - unsignedCreateAssetTx.States = nil + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + {keys[0]}, + }, + )) + return tx + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + input0 := input + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } - createAssetTx := txs.Tx{ - Unsigned: &unsignedCreateAssetTx, + input1 := input + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: math.MaxUint64, + Input: inputSigners, } - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(baseTx.Ins, make([][]*secp256k1.PrivateKey, 2)) - return state + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: safemath.ErrOverflow, + }, + { + name: "output overflow", + stateFunc: func(*gomock.Controller) state.Chain { + return nil }, txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &baseTx, + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, } + avax.SortTransferableOutputs(outputs, codec) + + baseTx := baseTx + baseTx.Outs = outputs + + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ @@ -200,45 +360,98 @@ func TestSemanticVerifierBaseTx(t *testing.T) { )) return tx }, - err: errIncompatibleFx, + err: safemath.ErrOverflow, }, { - name: "invalid signature", + name: "barely sufficient funds", stateFunc: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) + utxoAmount := 100 + feeConfig.TxFee + utxoOut := secp256k1fx.TransferOutput{ + Amt: utxoAmount, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &utxoOut, + } + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).Times(2) return state }, txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &baseTx, + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, } + + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ - {keys[1]}, + {keys[0]}, }, )) return tx }, - err: secp256k1fx.ErrWrongSig, + err: nil, }, { - name: "missing UTXO", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) + name: "insufficient funds", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } - state.EXPECT().GetUTXO(utxoID.InputID()).Return(nil, database.ErrNotFound) + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + } - return state + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: avax.ErrInsufficientFunds, + }, + { + name: "barely insufficient funds", + stateFunc: func(*gomock.Controller) state.Chain { + return nil }, txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &baseTx, + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee - 1, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, } + + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: baseTx}} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ @@ -247,27 +460,25 @@ func TestSemanticVerifierBaseTx(t *testing.T) { )) return tx }, - err: database.ErrNotFound, + err: avax.ErrInsufficientFunds, }, { - name: "invalid UTXO amount", + name: "assetID mismatch", stateFunc: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - output := output - output.Amt-- - utxo := utxo - utxo.Out = &output + utxo.Asset.ID = ids.GenerateTestID() state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state }, txFunc: func(require *require.Assertions) *txs.Tx { tx := &txs.Tx{ - Unsigned: &baseTx, + Unsigned: &txs.BaseTx{ + BaseTx: baseTx, + }, } require.NoError(tx.SignSECP256K1Fx( codec, @@ -277,10 +488,10 @@ func TestSemanticVerifierBaseTx(t *testing.T) { )) return tx }, - err: secp256k1fx.ErrMismatchedAmounts, + err: errAssetIDMismatch, }, { - name: "not allowed output feature extension", + name: "not allowed input feature extension", stateFunc: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) @@ -291,71 +502,67 @@ func TestSemanticVerifierBaseTx(t *testing.T) { Unsigned: &unsignedCreateAssetTx, } + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state }, txFunc: func(require *require.Assertions) *txs.Tx { - baseTx := baseTx - baseTx.Ins = nil - baseTx.Outs = []*avax.TransferableOutput{ - { - Asset: asset, - Out: &output, - }, - } tx := &txs.Tx{ - Unsigned: &baseTx, + Unsigned: &txs.BaseTx{ + BaseTx: baseTx, + }, } require.NoError(tx.SignSECP256K1Fx( codec, - [][]*secp256k1.PrivateKey{}, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, )) return tx }, err: errIncompatibleFx, }, { - name: "unknown asset", + name: "invalid signature", stateFunc: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(nil, database.ErrNotFound) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state }, txFunc: func(require *require.Assertions) *txs.Tx { tx := &txs.Tx{ - Unsigned: &baseTx, + Unsigned: &txs.BaseTx{ + BaseTx: baseTx, + }, } require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ - {keys[0]}, + {keys[1]}, }, )) return tx }, - err: database.ErrNotFound, + err: secp256k1fx.ErrWrongSig, }, { - name: "not an asset", + name: "missing UTXO", stateFunc: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - tx := txs.Tx{ - Unsigned: &baseTx, - } - - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&tx, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(nil, database.ErrNotFound) return state }, txFunc: func(require *require.Assertions) *txs.Tx { tx := &txs.Tx{ - Unsigned: &baseTx, + Unsigned: &txs.BaseTx{ + BaseTx: baseTx, + }, } require.NoError(tx.SignSECP256K1Fx( codec, @@ -365,10 +572,133 @@ func TestSemanticVerifierBaseTx(t *testing.T) { )) return tx }, - err: errNotAnAsset, + err: database.ErrNotFound, }, - } - for _, test := range tests { + { + name: "invalid UTXO amount", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + + utxoOut := utxoOut + utxoOut.Amt-- + + utxo := utxo + utxo.Out = &utxoOut + + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &txs.BaseTx{ + BaseTx: baseTx, + }, + } + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: secp256k1fx.ErrMismatchedAmounts, + }, + { + name: "not allowed output feature extension", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + + unsignedCreateAssetTx := unsignedCreateAssetTx + unsignedCreateAssetTx.States = nil + + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, + } + + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &txs.BaseTx{ + BaseTx: baseTx, + }, + } + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: errIncompatibleFx, + }, + { + name: "unknown asset", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(nil, database.ErrNotFound) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &txs.BaseTx{ + BaseTx: baseTx, + }, + } + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: database.ErrNotFound, + }, + { + name: "not an asset", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + + tx := txs.Tx{ + Unsigned: &txs.BaseTx{ + BaseTx: baseTx, + }, + } + + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&tx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &txs.BaseTx{ + BaseTx: baseTx, + }, + } + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: errNotAnAsset, + }, + } + for _, test := range tests { t.Run(test.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) @@ -389,53 +719,84 @@ func TestSemanticVerifierBaseTx(t *testing.T) { func TestSemanticVerifierExportTx(t *testing.T) { ctx := snowtest.Context(t, snowtest.XChainID) - typeToFxIndex := make(map[reflect.Type]int) - secpFx := &secp256k1fx.Fx{} - parser, err := txs.NewCustomParser( - typeToFxIndex, - new(mockable.Clock), - logging.NoWarn{}, - []fxs.Fx{ - secpFx, - }, - ) - require.NoError(t, err) - - codec := parser.Codec() - txID := ids.GenerateTestID() + // UTXO to be spent + inputTxID := ids.GenerateTestID() utxoID := avax.UTXOID{ - TxID: txID, - OutputIndex: 2, + TxID: inputTxID, + OutputIndex: 0, } + + feeAssetID := ids.GenerateTestID() asset := avax.Asset{ - ID: ids.GenerateTestID(), + ID: feeAssetID, } - inputSigner := secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + } + utxoAmount := 100 + feeConfig.TxFee + 50 + utxoOut := secp256k1fx.TransferOutput{ + Amt: utxoAmount, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &utxoOut, + } + + // Input spending the UTXO + inputSigners := secp256k1fx.Input{ + SigIndices: []uint32{0}, } fxInput := secp256k1fx.TransferInput{ - Amt: 12345, - Input: inputSigner, + Amt: utxoAmount, + Input: inputSigners, } input := avax.TransferableInput{ UTXOID: utxoID, Asset: asset, In: &fxInput, } - baseTx := txs.BaseTx{ - BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{ - &input, - }, + + // Output produced by BaseTx + fxOutput := secp256k1fx.TransferOutput{ + Amt: 100, + OutputOwners: outputOwners, + } + output := avax.TransferableOutput{ + Asset: asset, + Out: &fxOutput, + } + + baseTx := avax.BaseTx{ + Outs: []*avax.TransferableOutput{ + &output, + }, + Ins: []*avax.TransferableInput{ + &input, }, } + exportTx := txs.ExportTx{ - BaseTx: baseTx, + BaseTx: txs.BaseTx{ + BaseTx: baseTx, + }, DestinationChain: ctx.CChainID, } + typeToFxIndex := make(map[reflect.Type]int) + secpFx := &secp256k1fx.Fx{} + parser, err := txs.NewCustomParser( + typeToFxIndex, + new(mockable.Clock), + logging.NoWarn{}, + []fxs.Fx{ + secpFx, + }, + ) + require.NoError(t, err) + codec := parser.Codec() backend := &Backend{ Ctx: ctx, Config: &feeConfig, @@ -447,26 +808,11 @@ func TestSemanticVerifierExportTx(t *testing.T) { }, TypeToFxIndex: typeToFxIndex, Codec: codec, - FeeAssetID: ids.GenerateTestID(), + FeeAssetID: feeAssetID, Bootstrapped: true, } require.NoError(t, secpFx.Bootstrapped()) - outputOwners := secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - keys[0].Address(), - }, - } - output := secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: outputOwners, - } - utxo := avax.UTXO{ - UTXOID: utxoID, - Asset: asset, - Out: &output, - } unsignedCreateAssetTx := txs.CreateAssetTx{ States: []*txs.InitialState{{ FxIndex: 0, @@ -488,7 +834,7 @@ func TestSemanticVerifierExportTx(t *testing.T) { state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).Times(2) return state }, @@ -507,21 +853,25 @@ func TestSemanticVerifierExportTx(t *testing.T) { err: nil, }, { - name: "assetID mismatch", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - utxo := utxo - utxo.Asset.ID = ids.GenerateTestID() - - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - - return state + name: "invalid output", + stateFunc: func(*gomock.Controller) state.Chain { + return nil }, txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &exportTx, + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } + + baseTx := baseTx + baseTx.Outs = []*avax.TransferableOutput{ + &output, } + + eTx := exportTx + eTx.BaseTx.BaseTx = baseTx + tx := &txs.Tx{Unsigned: &eTx} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ @@ -530,29 +880,39 @@ func TestSemanticVerifierExportTx(t *testing.T) { )) return tx }, - err: errAssetIDMismatch, + err: secp256k1fx.ErrNoValueOutput, }, { - name: "not allowed input feature extension", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) + name: "unsorted outputs", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } - unsignedCreateAssetTx := unsignedCreateAssetTx - unsignedCreateAssetTx.States = nil + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } - createAssetTx := txs.Tx{ - Unsigned: &unsignedCreateAssetTx, + outputs := []*avax.TransferableOutput{ + &output0, + &output1, } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + baseTx := baseTx + baseTx.Outs = outputs - return state - }, - txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &exportTx, - } + eTx := exportTx + eTx.BaseTx.BaseTx = baseTx + tx := &txs.Tx{Unsigned: &eTx} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ @@ -561,45 +921,66 @@ func TestSemanticVerifierExportTx(t *testing.T) { )) return tx }, - err: errIncompatibleFx, + err: avax.ErrOutputsNotSorted, }, { - name: "invalid signature", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) - - return state + name: "unsorted exported outputs", + stateFunc: func(*gomock.Controller) state.Chain { + return nil }, txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &exportTx, + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + utx := exportTx + utx.ExportedOuts = outputs + tx := &txs.Tx{Unsigned: &utx} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ - {keys[1]}, + {keys[0]}, }, )) return tx }, - err: secp256k1fx.ErrWrongSig, + err: avax.ErrOutputsNotSorted, }, { - name: "missing UTXO", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - state.EXPECT().GetUTXO(utxoID.InputID()).Return(nil, database.ErrNotFound) - - return state + name: "invalid input", + stateFunc: func(*gomock.Controller) state.Chain { + return nil }, txFunc: func(require *require.Assertions) *txs.Tx { - tx := &txs.Tx{ - Unsigned: &exportTx, + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, } + + eTx := exportTx + eTx.BaseTx.BaseTx = baseTx + tx := &txs.Tx{Unsigned: &eTx} require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ @@ -608,14 +989,329 @@ func TestSemanticVerifierExportTx(t *testing.T) { )) return tx }, - err: database.ErrNotFound, + err: secp256k1fx.ErrNoValueInput, }, { - name: "invalid UTXO amount", - stateFunc: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - - output := output + name: "duplicate inputs", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + &input, + } + + eTx := exportTx + eTx.BaseTx.BaseTx = baseTx + tx := &txs.Tx{Unsigned: &eTx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + {keys[0]}, + }, + )) + return tx + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + input0 := input + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + input1 := input + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: math.MaxUint64, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(baseTx.Ins, make([][]*secp256k1.PrivateKey, 2)) + + eTx := exportTx + eTx.BaseTx.BaseTx = baseTx + tx := &txs.Tx{Unsigned: &eTx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: safemath.ErrOverflow, + }, + { + name: "output overflow", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + + baseTx := baseTx + baseTx.Outs = outputs + + eTx := exportTx + eTx.BaseTx.BaseTx = baseTx + tx := &txs.Tx{Unsigned: &eTx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: safemath.ErrOverflow, + }, + { + name: "barely sufficient funds", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + + utxoAmount := 100 + feeConfig.TxFee + utxoOut := secp256k1fx.TransferOutput{ + Amt: utxoAmount, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &utxoOut, + } + + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).Times(2) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + } + + eTx := exportTx + eTx.BaseTx.BaseTx = baseTx + tx := &txs.Tx{Unsigned: &eTx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: nil, + }, + { + name: "insufficient funds", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + } + + eTx := exportTx + eTx.BaseTx.BaseTx = baseTx + tx := &txs.Tx{Unsigned: &eTx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: avax.ErrInsufficientFunds, + }, + { + name: "barely insufficient funds", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee - 1, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + } + + eTx := exportTx + eTx.BaseTx.BaseTx = baseTx + tx := &txs.Tx{Unsigned: &eTx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: avax.ErrInsufficientFunds, + }, + { + name: "assetID mismatch", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + + utxo := utxo + utxo.Asset.ID = ids.GenerateTestID() + + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, + } + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: errAssetIDMismatch, + }, + { + name: "not allowed input feature extension", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + + unsignedCreateAssetTx := unsignedCreateAssetTx + unsignedCreateAssetTx.States = nil + + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, + } + + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, + } + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: errIncompatibleFx, + }, + { + name: "invalid signature", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, + } + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[1]}, + }, + )) + return tx + }, + err: secp256k1fx.ErrWrongSig, + }, + { + name: "missing UTXO", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + + state.EXPECT().GetUTXO(utxoID.InputID()).Return(nil, database.ErrNotFound) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, + } + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + err: database.ErrNotFound, + }, + { + name: "invalid UTXO amount", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + + output := utxoOut output.Amt-- utxo := utxo @@ -653,24 +1349,19 @@ func TestSemanticVerifierExportTx(t *testing.T) { } state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) return state }, txFunc: func(require *require.Assertions) *txs.Tx { - exportTx := exportTx - exportTx.Ins = nil - exportTx.ExportedOuts = []*avax.TransferableOutput{ - { - Asset: asset, - Out: &output, - }, - } tx := &txs.Tx{ Unsigned: &exportTx, } require.NoError(tx.SignSECP256K1Fx( codec, - [][]*secp256k1.PrivateKey{}, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, )) return tx }, @@ -706,7 +1397,7 @@ func TestSemanticVerifierExportTx(t *testing.T) { state := state.NewMockChain(ctrl) tx := txs.Tx{ - Unsigned: &baseTx, + Unsigned: &exportTx, } state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) @@ -757,53 +1448,86 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { validatorState.EXPECT().GetSubnetID(gomock.Any(), ctx.CChainID).AnyTimes().Return(ids.GenerateTestID(), nil) ctx.ValidatorState = validatorState - typeToFxIndex := make(map[reflect.Type]int) - secpFx := &secp256k1fx.Fx{} - parser, err := txs.NewCustomParser( - typeToFxIndex, - new(mockable.Clock), - logging.NoWarn{}, - []fxs.Fx{ - secpFx, - }, - ) - require.NoError(err) - - codec := parser.Codec() - txID := ids.GenerateTestID() + // UTXO to be spent + inputTxID := ids.GenerateTestID() utxoID := avax.UTXOID{ - TxID: txID, - OutputIndex: 2, + TxID: inputTxID, + OutputIndex: 0, } + + feeAssetID := ids.GenerateTestID() asset := avax.Asset{ - ID: ids.GenerateTestID(), + ID: feeAssetID, } - inputSigner := secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + } + utxoAmount := 100 + feeConfig.TxFee + utxoOut := secp256k1fx.TransferOutput{ + Amt: utxoAmount, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &utxoOut, + } + + // Input spending the UTXO + inputSigners := secp256k1fx.Input{ + SigIndices: []uint32{0}, } fxInput := secp256k1fx.TransferInput{ - Amt: 12345, - Input: inputSigner, + Amt: utxoAmount, + Input: inputSigners, } input := avax.TransferableInput{ UTXOID: utxoID, Asset: asset, In: &fxInput, } - baseTx := txs.BaseTx{ - BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{ - &input, - }, + + // Output produced by BaseTx + fxOutput := secp256k1fx.TransferOutput{ + Amt: 100, + OutputOwners: outputOwners, + } + output := avax.TransferableOutput{ + Asset: asset, + Out: &fxOutput, + } + + // BaseTx + baseTx := avax.BaseTx{ + Outs: []*avax.TransferableOutput{ + &output, + }, + Ins: []*avax.TransferableInput{ + &input, }, } + exportTx := txs.ExportTx{ - BaseTx: baseTx, + BaseTx: txs.BaseTx{ + BaseTx: baseTx, + }, DestinationChain: ctx.CChainID, } + typeToFxIndex := make(map[reflect.Type]int) + secpFx := &secp256k1fx.Fx{} + parser, err := txs.NewCustomParser( + typeToFxIndex, + new(mockable.Clock), + logging.NoWarn{}, + []fxs.Fx{ + secpFx, + }, + ) + require.NoError(err) + + codec := parser.Codec() backend := &Backend{ Ctx: ctx, Config: &feeConfig, @@ -815,26 +1539,11 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { }, TypeToFxIndex: typeToFxIndex, Codec: codec, - FeeAssetID: ids.GenerateTestID(), + FeeAssetID: feeAssetID, Bootstrapped: true, } require.NoError(secpFx.Bootstrapped()) - outputOwners := secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - keys[0].Address(), - }, - } - output := secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: outputOwners, - } - utxo := avax.UTXO{ - UTXOID: utxoID, - Asset: asset, - Out: &output, - } unsignedCreateAssetTx := txs.CreateAssetTx{ States: []*txs.InitialState{{ FxIndex: 0, @@ -847,7 +1556,7 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { state := state.NewMockChain(ctrl) state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) - state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).Times(2) tx := &txs.Tx{ Unsigned: &exportTx, @@ -884,50 +1593,84 @@ func TestSemanticVerifierImportTx(t *testing.T) { }, ) require.NoError(t, err) - codec := parser.Codec() + + // UTXOs to be spent + utxoAmount := 100 + feeConfig.TxFee + inputTxID := ids.GenerateTestID() utxoID := avax.UTXOID{ - TxID: ids.GenerateTestID(), - OutputIndex: 2, + TxID: inputTxID, + OutputIndex: 0, } + feeAssetID := ids.GenerateTestID() asset := avax.Asset{ - ID: ids.GenerateTestID(), + ID: feeAssetID, } outputOwners := secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{ - keys[0].Address(), - }, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, } - baseTx := txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: ctx.ChainID, - Outs: []*avax.TransferableOutput{{ - Asset: asset, - Out: &secp256k1fx.TransferOutput{ - Amt: 1000, - OutputOwners: outputOwners, - }, - }}, - }, + utxoOut := secp256k1fx.TransferOutput{ + Amt: utxoAmount, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &utxoOut, + } + + // Input spending the UTXO + inputSigners := secp256k1fx.Input{ + SigIndices: []uint32{0}, + } + fxInput := secp256k1fx.TransferInput{ + Amt: utxoAmount, + Input: inputSigners, } input := avax.TransferableInput{ + UTXOID: utxoID, + Asset: asset, + In: &fxInput, + } + + // Output produced by BaseTx + fxOutput := secp256k1fx.TransferOutput{ + Amt: 100, + OutputOwners: outputOwners, + } + output := avax.TransferableOutput{ + Asset: asset, + Out: &fxOutput, + } + + baseTx := avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: ctx.ChainID, + Outs: []*avax.TransferableOutput{ + &output, + }, + // no inputs here, only imported ones + } + + importedInput := avax.TransferableInput{ UTXOID: utxoID, Asset: asset, In: &secp256k1fx.TransferInput{ - Amt: 12345, + Amt: utxoAmount, Input: secp256k1fx.Input{ SigIndices: []uint32{0}, }, }, } unsignedImportTx := txs.ImportTx{ - BaseTx: baseTx, + BaseTx: txs.BaseTx{ + BaseTx: baseTx, + }, SourceChain: ctx.CChainID, ImportedIns: []*avax.TransferableInput{ - &input, + &importedInput, }, } importTx := &txs.Tx{ @@ -937,6 +1680,7 @@ func TestSemanticVerifierImportTx(t *testing.T) { codec, [][]*secp256k1.PrivateKey{ {keys[0]}, + {keys[0]}, }, )) @@ -951,20 +1695,11 @@ func TestSemanticVerifierImportTx(t *testing.T) { }, TypeToFxIndex: typeToFxIndex, Codec: codec, - FeeAssetID: ids.GenerateTestID(), + FeeAssetID: feeAssetID, Bootstrapped: true, } require.NoError(t, fx.Bootstrapped()) - output := secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: outputOwners, - } - utxo := avax.UTXO{ - UTXOID: utxoID, - Asset: asset, - Out: &output, - } utxoBytes, err := codec.Marshal(txs.CodecVersion, utxo) require.NoError(t, err) @@ -1005,6 +1740,237 @@ func TestSemanticVerifierImportTx(t *testing.T) { }, expectedErr: nil, }, + { + name: "invalid output", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } + + utx := unsignedImportTx + utx.Outs = []*avax.TransferableOutput{ + &output, + } + + tx := &txs.Tx{Unsigned: &utx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + expectedErr: secp256k1fx.ErrNoValueOutput, + }, + { + name: "unsorted outputs", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + utx := unsignedImportTx + utx.Outs = outputs + tx := &txs.Tx{Unsigned: &utx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + expectedErr: avax.ErrOutputsNotSorted, + }, + { + name: "invalid input", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: inputSigners, + } + + utx := unsignedImportTx + utx.Ins = []*avax.TransferableInput{ + &input, + } + tx := &txs.Tx{Unsigned: &utx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + return tx + }, + expectedErr: secp256k1fx.ErrNoValueInput, + }, + { + name: "duplicate inputs", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + utx := unsignedImportTx + utx.Ins = []*avax.TransferableInput{ + &input, + &input, + } + tx := &txs.Tx{Unsigned: &utx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + {keys[0]}, + {keys[0]}, + })) + return tx + }, + expectedErr: avax.ErrInputsNotSortedUnique, + }, + { + name: "duplicate imported inputs", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + utx := unsignedImportTx + utx.ImportedIns = []*avax.TransferableInput{ + &input, + &input, + } + tx := &txs.Tx{Unsigned: &utx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + {keys[0]}, + })) + return tx + }, + expectedErr: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + input0 := input + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + input1 := input + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: math.MaxUint64, + Input: inputSigners, + } + + utx := unsignedImportTx + utx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(utx.Ins, make([][]*secp256k1.PrivateKey, 2)) + tx := &txs.Tx{Unsigned: &utx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + {keys[0]}, + })) + return tx + }, + expectedErr: safemath.ErrOverflow, + }, + { + name: "output overflow", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output, + } + avax.SortTransferableOutputs(outputs, codec) + + utx := unsignedImportTx + utx.Outs = outputs + tx := &txs.Tx{Unsigned: &utx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + {keys[0]}, + })) + return tx + }, + expectedErr: safemath.ErrOverflow, + }, + { + name: "insufficient funds", + stateFunc: func(*gomock.Controller) state.Chain { + return nil + }, + txFunc: func(require *require.Assertions) *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + utx := unsignedImportTx + utx.ImportedIns = []*avax.TransferableInput{ + &input, + } + tx := &txs.Tx{Unsigned: &utx} + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + {keys[0]}, + })) + return tx + }, + expectedErr: avax.ErrInsufficientFunds, + }, { name: "not allowed input feature extension", stateFunc: func(ctrl *gomock.Controller) state.Chain { @@ -1092,7 +2058,9 @@ func TestSemanticVerifierImportTx(t *testing.T) { stateFunc: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) tx := txs.Tx{ - Unsigned: &baseTx, + Unsigned: &txs.BaseTx{ + BaseTx: baseTx, + }, } state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil).AnyTimes() state.EXPECT().GetTx(asset.ID).Return(&tx, nil) @@ -1120,3 +2088,451 @@ func TestSemanticVerifierImportTx(t *testing.T) { }) } } + +func TestSemanticVerifierOperationTx(t *testing.T) { + ctx := snowtest.Context(t, snowtest.XChainID) + + var ( + secpFx = &secp256k1fx.Fx{} + nftFx = &nftfx.Fx{} + propertyFx = &propertyfx.Fx{} + ) + + typeToFxIndex := make(map[reflect.Type]int) + parser, err := txs.NewCustomParser( + typeToFxIndex, + new(mockable.Clock), + logging.NoWarn{}, + []fxs.Fx{ + secpFx, + nftFx, + propertyFx, + }, + ) + + require.NoError(t, err) + codec := parser.Codec() + + feeAssetID := ids.GenerateTestID() + feeAsset := avax.Asset{ + ID: feeAssetID, + } + + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[0].Address(), + }, + } + + utxoAmount := 20 * units.KiloAvax + utxoID := avax.UTXOID{ + TxID: ids.GenerateTestID(), + OutputIndex: 1, + } + utxo := &avax.UTXO{ + UTXOID: utxoID, + Asset: feeAsset, + Out: &secp256k1fx.TransferOutput{ + Amt: 20 * units.KiloAvax, + OutputOwners: outputOwners, + }, + } + + opUTXOID := avax.UTXOID{ + TxID: ids.GenerateTestID(), + OutputIndex: 1, + } + opUTXO := &avax.UTXO{ + UTXOID: opUTXOID, + Asset: avax.Asset{ID: assetID}, + Out: &secp256k1fx.MintOutput{ + OutputOwners: outputOwners, + }, + } + + unsignedCreateAssetTx := txs.CreateAssetTx{ + States: []*txs.InitialState{{ + FxIndex: 0, + }}, + } + createAssetTx := &txs.Tx{ + Unsigned: &unsignedCreateAssetTx, + } + + opTxInSigner := secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + } + opTxIn := avax.TransferableInput{ + UTXOID: utxoID, + Asset: feeAsset, + In: &secp256k1fx.TransferInput{ + Amt: utxoAmount, + Input: opTxInSigner, + }, + } + opTxOut := avax.TransferableOutput{ + Asset: avax.Asset{ID: feeAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: units.NanoAvax, + OutputOwners: outputOwners, + }, + } + unsignedOperationTx := txs.OperationTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{&opTxIn}, + Outs: []*avax.TransferableOutput{&opTxOut}, + }}, + Ops: []*txs.Operation{{ + Asset: avax.Asset{ID: assetID}, + UTXOIDs: []*avax.UTXOID{ + &opUTXOID, + }, + Op: &secp256k1fx.MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + MintOutput: secp256k1fx.MintOutput{ + OutputOwners: outputOwners, + }, + TransferOutput: secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + }, + }, + }}, + } + + operationTx := txs.Tx{Unsigned: &unsignedOperationTx} + require.NoError(t, operationTx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + {keys[0]}, + }, + )) + + backend := &Backend{ + Ctx: ctx, + Config: &feeConfig, + Fxs: []*fxs.ParsedFx{ + { + ID: secp256k1fx.ID, + Fx: secpFx, + }, + { + ID: nftfx.ID, + Fx: nftFx, + }, + { + ID: propertyfx.ID, + Fx: propertyFx, + }, + }, + Codec: codec, + TypeToFxIndex: typeToFxIndex, + FeeAssetID: feeAssetID, + } + require.NoError(t, secpFx.Bootstrapped()) + require.NoError(t, nftFx.Bootstrapped()) + require.NoError(t, propertyFx.Bootstrapped()) + + tests := []struct { + name string + stateFunc func(*gomock.Controller) state.Chain + txFunc func(*require.Assertions) *txs.Tx + expectedErr error + }{ + { + name: "valid", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(utxo, nil).AnyTimes() + state.EXPECT().GetUTXO(opUTXO.InputID()).Return(opUTXO, nil).AnyTimes() + state.EXPECT().GetTx(feeAssetID).Return(createAssetTx, nil).AnyTimes() + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + return &operationTx + }, + expectedErr: nil, + }, + { + name: "invalid output", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + output := opTxOut + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } + + unsignedTx := unsignedOperationTx + unsignedTx.Outs = []*avax.TransferableOutput{ + &output, + } + return &txs.Tx{ + Unsigned: &unsignedTx, + Creds: operationTx.Creds, + } + }, + expectedErr: secp256k1fx.ErrNoValueOutput, + }, + { + name: "unsorted outputs", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + output0 := opTxOut + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := opTxOut + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + unsignedTx := unsignedOperationTx + unsignedTx.Outs = outputs + return &txs.Tx{ + Unsigned: &unsignedTx, + Creds: operationTx.Creds, + } + }, + expectedErr: avax.ErrOutputsNotSorted, + }, + { + name: "invalid input", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + input := opTxIn + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: opTxInSigner, + } + + tx := unsignedOperationTx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: operationTx.Creds, + } + }, + expectedErr: secp256k1fx.ErrNoValueInput, + }, + { + name: "duplicate inputs", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + unsignedTx := unsignedOperationTx + unsignedTx.Ins = []*avax.TransferableInput{ + &opTxIn, + &opTxIn, + } + + tx := &txs.Tx{Unsigned: &unsignedTx} + require.NoError(t, tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + {keys[0]}, + })) + + return tx + }, + expectedErr: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + input0 := opTxIn + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: opTxInSigner, + } + + input1 := opTxIn + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: math.MaxUint64, + Input: opTxInSigner, + } + + unsignedTx := unsignedOperationTx + unsignedTx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(unsignedTx.Ins, make([][]*secp256k1.PrivateKey, 2)) + + tx := &txs.Tx{Unsigned: &unsignedTx} + require.NoError(t, tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + {keys[0]}, + })) + return tx + }, + expectedErr: safemath.ErrOverflow, + }, + { + name: "output overflow", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + output := opTxOut + output.Out = &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output, + } + avax.SortTransferableOutputs(outputs, codec) + + unsignedTx := unsignedOperationTx + unsignedTx.Outs = outputs + return &txs.Tx{ + Unsigned: &unsignedTx, + Creds: operationTx.Creds, + } + }, + expectedErr: safemath.ErrOverflow, + }, + { + name: "insufficient funds", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + input := opTxIn + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: opTxInSigner, + } + + unsignedTx := unsignedOperationTx + unsignedTx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &unsignedTx, + Creds: operationTx.Creds, + } + }, + expectedErr: avax.ErrInsufficientFunds, + }, + { + name: "barely sufficient funds", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + + barelySufficientUtxo := &avax.UTXO{ + UTXOID: utxoID, + Asset: feeAsset, + Out: &secp256k1fx.TransferOutput{ + Amt: units.NanoAvax + feeConfig.TxFee, + OutputOwners: outputOwners, + }, + } + + state.EXPECT().GetUTXO(utxoID.InputID()).Return(barelySufficientUtxo, nil).AnyTimes() + state.EXPECT().GetUTXO(opUTXO.InputID()).Return(opUTXO, nil).AnyTimes() + state.EXPECT().GetTx(feeAssetID).Return(createAssetTx, nil).AnyTimes() + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + input := opTxIn + input.In = &secp256k1fx.TransferInput{ + Amt: units.NanoAvax + feeConfig.TxFee, + Input: opTxInSigner, + } + + unsignedTx := unsignedOperationTx + unsignedTx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &unsignedTx, + Creds: operationTx.Creds, + } + }, + expectedErr: nil, + }, + { + name: "barely insufficient funds", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + input := opTxIn + input.In = &secp256k1fx.TransferInput{ + Amt: feeConfig.TxFee, + Input: opTxInSigner, + } + + unsignedTx := unsignedOperationTx + unsignedTx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &unsignedTx, + Creds: operationTx.Creds, + } + }, + expectedErr: avax.ErrInsufficientFunds, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + state := test.stateFunc(ctrl) + tx := test.txFunc(require) + err := tx.Unsigned.Visit(&SemanticVerifier{ + Backend: backend, + State: state, + Tx: tx, + }) + require.ErrorIs(err, test.expectedErr) + }) + } +} diff --git a/vms/avm/txs/executor/syntactic_verifier.go b/vms/avm/txs/executor/syntactic_verifier.go index b964ca1df02e..726f69644eef 100644 --- a/vms/avm/txs/executor/syntactic_verifier.go +++ b/vms/avm/txs/executor/syntactic_verifier.go @@ -13,8 +13,6 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/avm/txs/fee" - "github.com/ava-labs/avalanchego/vms/components/avax" ) const ( @@ -52,7 +50,7 @@ type SyntacticVerifier struct { } func (v *SyntacticVerifier) BaseTx(tx *txs.BaseTx) error { - if err := v.verifyBaseTx(tx, nil, nil); err != nil { + if err := tx.BaseTx.Verify(v.Ctx); err != nil { return err } @@ -104,7 +102,7 @@ func (v *SyntacticVerifier) CreateAssetTx(tx *txs.CreateAssetTx) error { } } - if err := v.verifyBaseTx(&tx.BaseTx, nil, nil); err != nil { + if err := tx.BaseTx.BaseTx.Verify(v.Ctx); err != nil { return err } @@ -141,7 +139,7 @@ func (v *SyntacticVerifier) OperationTx(tx *txs.OperationTx) error { return errNoOperations } - if err := v.verifyBaseTx(&tx.BaseTx, nil, nil); err != nil { + if err := tx.BaseTx.BaseTx.Verify(v.Ctx); err != nil { return err } @@ -190,7 +188,7 @@ func (v *SyntacticVerifier) ImportTx(tx *txs.ImportTx) error { return errNoImportInputs } - if err := v.verifyBaseTx(&tx.BaseTx, tx.ImportedIns, nil); err != nil { + if err := tx.BaseTx.BaseTx.Verify(v.Ctx); err != nil { return err } @@ -218,7 +216,7 @@ func (v *SyntacticVerifier) ExportTx(tx *txs.ExportTx) error { return errNoExportOutputs } - if err := v.verifyBaseTx(&tx.BaseTx, nil, tx.ExportedOuts); err != nil { + if err := tx.BaseTx.BaseTx.Verify(v.Ctx); err != nil { return err } @@ -240,27 +238,3 @@ func (v *SyntacticVerifier) ExportTx(tx *txs.ExportTx) error { return nil } - -func (v *SyntacticVerifier) verifyBaseTx( - bTx *txs.BaseTx, - importedIns []*avax.TransferableInput, - exportedOuts []*avax.TransferableOutput, -) error { - if err := bTx.BaseTx.Verify(v.Ctx); err != nil { - return err - } - - feeCalculator := fee.NewStaticCalculator(v.Backend.Config.StaticConfig) - fee, err := feeCalculator.CalculateFee(v.Tx) - if err != nil { - return err - } - - return avax.VerifyTx( - fee, - v.FeeAssetID, - [][]*avax.TransferableInput{bTx.Ins, importedIns}, - [][]*avax.TransferableOutput{bTx.Outs, exportedOuts}, - v.Codec, - ) -} diff --git a/vms/avm/txs/executor/syntactic_verifier_test.go b/vms/avm/txs/executor/syntactic_verifier_test.go index 4fc6b10968e4..b473b4515e97 100644 --- a/vms/avm/txs/executor/syntactic_verifier_test.go +++ b/vms/avm/txs/executor/syntactic_verifier_test.go @@ -4,7 +4,6 @@ package executor import ( - "math" "strings" "testing" @@ -22,8 +21,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - - safemath "github.com/ava-labs/avalanchego/utils/math" ) var ( @@ -163,177 +160,6 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { }, err: avax.ErrMemoTooLarge, }, - { - name: "invalid output", - txFunc: func() *txs.Tx { - output := output - output.Out = &secp256k1fx.TransferOutput{ - Amt: 0, - OutputOwners: outputOwners, - } - - baseTx := baseTx - baseTx.Outs = []*avax.TransferableOutput{ - &output, - } - return &txs.Tx{ - Unsigned: &txs.BaseTx{BaseTx: baseTx}, - Creds: creds, - } - }, - err: secp256k1fx.ErrNoValueOutput, - }, - { - name: "unsorted outputs", - txFunc: func() *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } - - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: 2, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output0, - &output1, - } - avax.SortTransferableOutputs(outputs, codec) - outputs[0], outputs[1] = outputs[1], outputs[0] - - baseTx := baseTx - baseTx.Outs = outputs - return &txs.Tx{ - Unsigned: &txs.BaseTx{BaseTx: baseTx}, - Creds: creds, - } - }, - err: avax.ErrOutputsNotSorted, - }, - { - name: "invalid input", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 0, - Input: inputSigners, - } - - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &txs.BaseTx{BaseTx: baseTx}, - Creds: creds, - } - }, - err: secp256k1fx.ErrNoValueInput, - }, - { - name: "duplicate inputs", - txFunc: func() *txs.Tx { - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, - &input, - } - return &txs.Tx{ - Unsigned: &txs.BaseTx{BaseTx: baseTx}, - Creds: []*fxs.FxCredential{ - &cred, - &cred, - }, - } - }, - err: avax.ErrInputsNotSortedUnique, - }, - { - name: "input overflow", - txFunc: func() *txs.Tx { - input0 := input - input0.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - input1 := input - input1.UTXOID.OutputIndex++ - input1.In = &secp256k1fx.TransferInput{ - Amt: math.MaxUint64, - Input: inputSigners, - } - - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input0, - &input1, - } - avax.SortTransferableInputsWithSigners(baseTx.Ins, make([][]*secp256k1.PrivateKey, 2)) - return &txs.Tx{ - Unsigned: &txs.BaseTx{BaseTx: baseTx}, - Creds: []*fxs.FxCredential{ - &cred, - &cred, - }, - } - }, - err: safemath.ErrOverflow, - }, - { - name: "output overflow", - txFunc: func() *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } - - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output0, - &output1, - } - avax.SortTransferableOutputs(outputs, codec) - - baseTx := baseTx - baseTx.Outs = outputs - return &txs.Tx{ - Unsigned: &txs.BaseTx{BaseTx: baseTx}, - Creds: creds, - } - }, - err: safemath.ErrOverflow, - }, - { - name: "insufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &txs.BaseTx{BaseTx: baseTx}, - Creds: creds, - } - }, - err: avax.ErrInsufficientFunds, - }, { name: "invalid credential", txFunc: func() *txs.Tx { @@ -355,46 +181,6 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { }, err: errWrongNumberOfCredentials, }, - { - name: "barely sufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.TxFee, - Input: inputSigners, - } - - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &txs.BaseTx{BaseTx: baseTx}, - Creds: creds, - } - }, - err: nil, - }, - { - name: "barely insufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.TxFee - 1, - Input: inputSigners, - } - - baseTx := baseTx - baseTx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &txs.BaseTx{BaseTx: baseTx}, - Creds: creds, - } - }, - err: avax.ErrInsufficientFunds, - }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -658,177 +444,6 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { }, err: avax.ErrMemoTooLarge, }, - { - name: "invalid output", - txFunc: func() *txs.Tx { - output := output - output.Out = &secp256k1fx.TransferOutput{ - Amt: 0, - OutputOwners: outputOwners, - } - - tx := tx - tx.Outs = []*avax.TransferableOutput{ - &output, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: secp256k1fx.ErrNoValueOutput, - }, - { - name: "unsorted outputs", - txFunc: func() *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } - - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: 2, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output0, - &output1, - } - avax.SortTransferableOutputs(outputs, codec) - outputs[0], outputs[1] = outputs[1], outputs[0] - - tx := tx - tx.Outs = outputs - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: avax.ErrOutputsNotSorted, - }, - { - name: "invalid input", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 0, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: secp256k1fx.ErrNoValueInput, - }, - { - name: "duplicate inputs", - txFunc: func() *txs.Tx { - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: []*fxs.FxCredential{ - &cred, - &cred, - }, - } - }, - err: avax.ErrInputsNotSortedUnique, - }, - { - name: "input overflow", - txFunc: func() *txs.Tx { - input0 := input - input0.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - input1 := input - input1.UTXOID.OutputIndex++ - input1.In = &secp256k1fx.TransferInput{ - Amt: math.MaxUint64, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input0, - &input1, - } - avax.SortTransferableInputsWithSigners(baseTx.Ins, make([][]*secp256k1.PrivateKey, 2)) - return &txs.Tx{ - Unsigned: &tx, - Creds: []*fxs.FxCredential{ - &cred, - &cred, - }, - } - }, - err: safemath.ErrOverflow, - }, - { - name: "output overflow", - txFunc: func() *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } - - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output0, - &output1, - } - avax.SortTransferableOutputs(outputs, codec) - - tx := tx - tx.Outs = outputs - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: safemath.ErrOverflow, - }, - { - name: "insufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: avax.ErrInsufficientFunds, - }, { name: "invalid nil state", txFunc: func() *txs.Tx { @@ -964,46 +579,6 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { }, err: errWrongNumberOfCredentials, }, - { - name: "barely sufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.CreateAssetTxFee, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: nil, - }, - { - name: "barely insufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.CreateAssetTxFee - 1, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: avax.ErrInsufficientFunds, - }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -1180,219 +755,55 @@ func TestSyntacticVerifierOperationTx(t *testing.T) { err: avax.ErrMemoTooLarge, }, { - name: "invalid output", + name: "invalid nil op", txFunc: func() *txs.Tx { - output := output - output.Out = &secp256k1fx.TransferOutput{ - Amt: 0, - OutputOwners: outputOwners, - } - tx := tx - tx.Outs = []*avax.TransferableOutput{ - &output, + tx.Ops = []*txs.Operation{ + nil, } return &txs.Tx{ Unsigned: &tx, Creds: creds, } }, - err: secp256k1fx.ErrNoValueOutput, + err: txs.ErrNilOperation, }, { - name: "unsorted outputs", + name: "invalid nil fx op", txFunc: func() *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } - - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: 2, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output0, - &output1, - } - avax.SortTransferableOutputs(outputs, codec) - outputs[0], outputs[1] = outputs[1], outputs[0] + op := op + op.Op = nil tx := tx - tx.Outs = outputs + tx.Ops = []*txs.Operation{ + &op, + } return &txs.Tx{ Unsigned: &tx, Creds: creds, } }, - err: avax.ErrOutputsNotSorted, + err: txs.ErrNilFxOperation, }, { - name: "invalid input", + name: "invalid duplicated op UTXOs", txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 0, - Input: inputSigners, + op := op + op.UTXOIDs = []*avax.UTXOID{ + &opUTXOID, + &opUTXOID, } tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, + tx.Ops = []*txs.Operation{ + &op, } return &txs.Tx{ Unsigned: &tx, Creds: creds, } }, - err: secp256k1fx.ErrNoValueInput, - }, - { - name: "duplicate inputs", - txFunc: func() *txs.Tx { - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: []*fxs.FxCredential{ - &cred, - &cred, - }, - } - }, - err: avax.ErrInputsNotSortedUnique, - }, - { - name: "input overflow", - txFunc: func() *txs.Tx { - input0 := input - input0.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - input1 := input - input1.UTXOID.OutputIndex++ - input1.In = &secp256k1fx.TransferInput{ - Amt: math.MaxUint64, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input0, - &input1, - } - avax.SortTransferableInputsWithSigners(tx.Ins, make([][]*secp256k1.PrivateKey, 2)) - return &txs.Tx{ - Unsigned: &tx, - Creds: []*fxs.FxCredential{ - &cred, - &cred, - }, - } - }, - err: safemath.ErrOverflow, - }, - { - name: "output overflow", - txFunc: func() *txs.Tx { - output := output - output.Out = &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output, - } - avax.SortTransferableOutputs(outputs, codec) - - tx := tx - tx.Outs = outputs - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: safemath.ErrOverflow, - }, - { - name: "insufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: avax.ErrInsufficientFunds, - }, - { - name: "invalid nil op", - txFunc: func() *txs.Tx { - tx := tx - tx.Ops = []*txs.Operation{ - nil, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: txs.ErrNilOperation, - }, - { - name: "invalid nil fx op", - txFunc: func() *txs.Tx { - op := op - op.Op = nil - - tx := tx - tx.Ops = []*txs.Operation{ - &op, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: txs.ErrNilFxOperation, - }, - { - name: "invalid duplicated op UTXOs", - txFunc: func() *txs.Tx { - op := op - op.UTXOIDs = []*avax.UTXOID{ - &opUTXOID, - &opUTXOID, - } - - tx := tx - tx.Ops = []*txs.Operation{ - &op, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: txs.ErrNotSortedAndUniqueUTXOIDs, + err: txs.ErrNotSortedAndUniqueUTXOIDs, }, { name: "invalid duplicated UTXOs across ops", @@ -1453,46 +864,6 @@ func TestSyntacticVerifierOperationTx(t *testing.T) { }, err: errWrongNumberOfCredentials, }, - { - name: "barely sufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.TxFee, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: nil, - }, - { - name: "barely insufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.TxFee - 1, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: avax.ErrInsufficientFunds, - }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -1649,189 +1020,6 @@ func TestSyntacticVerifierImportTx(t *testing.T) { }, err: avax.ErrMemoTooLarge, }, - { - name: "invalid output", - txFunc: func() *txs.Tx { - output := output - output.Out = &secp256k1fx.TransferOutput{ - Amt: 0, - OutputOwners: outputOwners, - } - - tx := tx - tx.Outs = []*avax.TransferableOutput{ - &output, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: secp256k1fx.ErrNoValueOutput, - }, - { - name: "unsorted outputs", - txFunc: func() *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } - - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: 2, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output0, - &output1, - } - avax.SortTransferableOutputs(outputs, codec) - outputs[0], outputs[1] = outputs[1], outputs[0] - - tx := tx - tx.Outs = outputs - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: avax.ErrOutputsNotSorted, - }, - { - name: "invalid input", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 0, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: secp256k1fx.ErrNoValueInput, - }, - { - name: "duplicate inputs", - txFunc: func() *txs.Tx { - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: []*fxs.FxCredential{ - &cred, - &cred, - &cred, - }, - } - }, - err: avax.ErrInputsNotSortedUnique, - }, - { - name: "duplicate imported inputs", - txFunc: func() *txs.Tx { - tx := tx - tx.ImportedIns = []*avax.TransferableInput{ - &input, - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: []*fxs.FxCredential{ - &cred, - &cred, - }, - } - }, - err: avax.ErrInputsNotSortedUnique, - }, - { - name: "input overflow", - txFunc: func() *txs.Tx { - input0 := input - input0.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - input1 := input - input1.UTXOID.OutputIndex++ - input1.In = &secp256k1fx.TransferInput{ - Amt: math.MaxUint64, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input0, - &input1, - } - avax.SortTransferableInputsWithSigners(tx.Ins, make([][]*secp256k1.PrivateKey, 2)) - return &txs.Tx{ - Unsigned: &tx, - Creds: []*fxs.FxCredential{ - &cred, - &cred, - }, - } - }, - err: safemath.ErrOverflow, - }, - { - name: "output overflow", - txFunc: func() *txs.Tx { - output := output - output.Out = &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output, - } - avax.SortTransferableOutputs(outputs, codec) - - tx := tx - tx.Outs = outputs - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: safemath.ErrOverflow, - }, - { - name: "insufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - tx := tx - tx.ImportedIns = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: avax.ErrInsufficientFunds, - }, { name: "invalid credential", txFunc: func() *txs.Tx { @@ -1853,46 +1041,6 @@ func TestSyntacticVerifierImportTx(t *testing.T) { }, err: errWrongNumberOfCredentials, }, - { - name: "barely sufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.TxFee, - Input: inputSigners, - } - - tx := tx - tx.ImportedIns = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: nil, - }, - { - name: "barely insufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.TxFee - 1, - Input: inputSigners, - } - - tx := tx - tx.ImportedIns = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: avax.ErrInsufficientFunds, - }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -2049,201 +1197,6 @@ func TestSyntacticVerifierExportTx(t *testing.T) { }, err: avax.ErrMemoTooLarge, }, - { - name: "invalid output", - txFunc: func() *txs.Tx { - output := output - output.Out = &secp256k1fx.TransferOutput{ - Amt: 0, - OutputOwners: outputOwners, - } - - tx := tx - tx.Outs = []*avax.TransferableOutput{ - &output, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: secp256k1fx.ErrNoValueOutput, - }, - { - name: "unsorted outputs", - txFunc: func() *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } - - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: 2, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output0, - &output1, - } - avax.SortTransferableOutputs(outputs, codec) - outputs[0], outputs[1] = outputs[1], outputs[0] - - tx := tx - tx.Outs = outputs - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: avax.ErrOutputsNotSorted, - }, - { - name: "unsorted exported outputs", - txFunc: func() *txs.Tx { - output0 := output - output0.Out = &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: outputOwners, - } - - output1 := output - output1.Out = &secp256k1fx.TransferOutput{ - Amt: 2, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output0, - &output1, - } - avax.SortTransferableOutputs(outputs, codec) - outputs[0], outputs[1] = outputs[1], outputs[0] - - tx := tx - tx.ExportedOuts = outputs - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: avax.ErrOutputsNotSorted, - }, - { - name: "invalid input", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 0, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: secp256k1fx.ErrNoValueInput, - }, - { - name: "duplicate inputs", - txFunc: func() *txs.Tx { - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: []*fxs.FxCredential{ - &cred, - &cred, - }, - } - }, - err: avax.ErrInputsNotSortedUnique, - }, - { - name: "input overflow", - txFunc: func() *txs.Tx { - input0 := input - input0.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - input1 := input - input1.UTXOID.OutputIndex++ - input1.In = &secp256k1fx.TransferInput{ - Amt: math.MaxUint64, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input0, - &input1, - } - avax.SortTransferableInputsWithSigners(tx.Ins, make([][]*secp256k1.PrivateKey, 2)) - return &txs.Tx{ - Unsigned: &tx, - Creds: []*fxs.FxCredential{ - &cred, - &cred, - }, - } - }, - err: safemath.ErrOverflow, - }, - { - name: "output overflow", - txFunc: func() *txs.Tx { - output := output - output.Out = &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - OutputOwners: outputOwners, - } - - outputs := []*avax.TransferableOutput{ - &output, - } - avax.SortTransferableOutputs(outputs, codec) - - tx := tx - tx.Outs = outputs - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: safemath.ErrOverflow, - }, - { - name: "insufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: 1, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: avax.ErrInsufficientFunds, - }, { name: "invalid credential", txFunc: func() *txs.Tx { @@ -2265,46 +1218,6 @@ func TestSyntacticVerifierExportTx(t *testing.T) { }, err: errWrongNumberOfCredentials, }, - { - name: "barely sufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.TxFee, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: nil, - }, - { - name: "barely insufficient funds", - txFunc: func() *txs.Tx { - input := input - input.In = &secp256k1fx.TransferInput{ - Amt: fxOutput.Amt + feeConfig.TxFee - 1, - Input: inputSigners, - } - - tx := tx - tx.Ins = []*avax.TransferableInput{ - &input, - } - return &txs.Tx{ - Unsigned: &tx, - Creds: creds, - } - }, - err: avax.ErrInsufficientFunds, - }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) {