From 4535a50d83a78437d48b6e0e02f66c60f3acbe28 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Fri, 17 Nov 2023 13:40:56 -0500 Subject: [PATCH] Squashed commit of the following: commit fe72c5b91d09d8c73486e7ce71cfdd50a517449a Author: Stephen Buttolph Date: Fri Nov 17 01:23:21 2023 -0500 Remove `common.Config` functions (#2328) commit 585424e6a4e4dc7eec52017a90b3026704ebc01e Author: Stephen Buttolph Date: Fri Nov 17 00:39:46 2023 -0500 Unexport avalanche constant from common package (#2327) commit 85201128fbbb05e85e5164dda8fb3355390234a3 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu Nov 16 20:53:10 2023 -0800 Move `network` implementation to separate package (#2296) Signed-off-by: Stephen Buttolph Co-authored-by: Stephen Buttolph commit 5236d7280da25fe83f3e90594da17b316d1ca1d8 Author: Stephen Buttolph Date: Thu Nov 16 20:08:57 2023 -0500 Remove useless anon functions (#2326) commit e7ca38b5268d2762e30839ad039333ac6841724c Author: Dan Laine Date: Thu Nov 16 16:03:17 2023 -0500 Update zap dependency to v1.26.0 (#2325) commit 6900e72a28c38886ce615eb90148654c90194f7c Author: Dan Laine Date: Thu Nov 16 15:20:19 2023 -0500 nit: loop --> variadic (#2316) commit 35fbb3a6f05f387bbb3dd17416e552674a0b3b63 Author: Alberto Benegiamo Date: Thu Nov 16 10:33:18 2023 -0700 Pchain - Cleanup NodeID generation in UTs (#2291) Co-authored-by: Dan Laine Co-authored-by: Stephen Buttolph commit 043644f969ff1a4af0d302fd25cce84506aaa99e Author: Stephen Buttolph Date: Thu Nov 16 12:13:09 2023 -0500 Refactor bootstrapper implementation into consensus (#2300) Co-authored-by: Dan Laine commit f1ec30c3427ce506dd51457c9189cc8089f2e4e5 Author: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Thu Nov 16 12:03:00 2023 -0500 Update `error_code` to be int32 instead of uint32. (#2322) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Co-authored-by: Stephen Buttolph commit 348f842050fb8772a206ddde3fd9cc48565e3566 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu Nov 16 08:39:03 2023 -0800 Remove `Network` interface from `Builder` (#2312) commit 6484de4fa06323afa11e206a3b960916019250fd Author: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Thu Nov 16 04:31:34 2023 -0500 Rename AppRequestFailed to AppError (#2321) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> commit 3d0611ce0103ecfa763ebe1e292d78f98e99c70c Author: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Wed Nov 15 19:24:10 2023 -0500 Remove error from SDK AppGossip handler (#2252) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Co-authored-by: Stephen Buttolph commit 01a1bbe788905d822a1f8ccccf6a9c6834b75e9a Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed Nov 15 16:01:29 2023 -0800 Remove `AddUnverifiedTx` from `Builder` (#2311) commit e8ef4ad2a20e1f5e817068bc2101090f49a2124d Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed Nov 15 14:15:17 2023 -0800 Move `AddUnverifiedTx` logic to `network.IssueTx` (#2310) commit dcc6ea863146e119e5ce6e2695967e565df44c52 Author: Stephen Buttolph Date: Wed Nov 15 17:13:19 2023 -0500 Use zap.Stringer rather than zap.Any (#2320) commit 44f3aba31269e9eb2a66ae974115081024566ce6 Author: Stephen Buttolph Date: Wed Nov 15 17:12:57 2023 -0500 Replace unique slices with sets in the engine interface (#2317) commit d00b67fdb6e6ce92dc092b3b946c669c59d4ad6c Author: Stephen Buttolph Date: Wed Nov 15 11:43:17 2023 -0500 Simplify avalanche bootstrapping (#2286) commit 5dff15390e71c5ce73a48a27aeb904917b9bde62 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue Nov 14 12:47:06 2023 -0800 Add `VerifyTx` to `executor.Manager` (#2293) commit 29f86e96beb400f08966a8ec4f998dced96979e6 Author: marun Date: Tue Nov 14 21:25:25 2023 +0100 e2e: More fixture refinement in support of coreth integration testing (#2275) commit 72d2fae887ba9205502a8061dfddcf9f076fa972 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue Nov 14 10:26:39 2023 -0800 Add `recentTxsLock` to platform `network` struct (#2294) commit eb21b422cc75bb99340a2294bab03f8c307685b2 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue Nov 14 10:24:01 2023 -0800 Move management of platformvm preferred block to `executor.Manager` (#2292) commit 7f70fcf9ae97520c42222b5a8387ebead2ba1b9b Author: Stephen Buttolph Date: Mon Nov 13 15:18:34 2023 -0500 Simplify get server creation (#2285) Co-authored-by: Dan Laine commit baf0ef7df304171f6e430a836e17cc2adf874292 Author: Dan Laine Date: Mon Nov 13 10:33:31 2023 -0500 `merkledb` -- Add `Clearer` interface (#2277) commit b8746dea22995ea30d7e9ae1c273160349afb910 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu Nov 9 16:21:04 2023 -0800 Embed `noop` handler for all unhandled messages (#2288) commit 86201ae6bf1d3ba7119d2464aff64c3a5325a44f Author: David Boehm <91908103+dboehm-avalabs@users.noreply.github.com> Date: Thu Nov 9 14:08:10 2023 -0500 Remove sentinel node from MerkleDB proofs (#2106) Signed-off-by: David Boehm <91908103+dboehm-avalabs@users.noreply.github.com> Co-authored-by: Stephen Buttolph Co-authored-by: Dan Laine commit 094ce50a65e7baaa5186b19fec997c07d1e4b559 Author: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Thu Nov 9 13:58:28 2023 -0500 Remove Lazy Initialize on Node (#1384) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Co-authored-by: Dan Laine commit e3f121299b8c2366f6085c52e4fe6bcdb0397303 Author: Alberto Benegiamo Date: Thu Nov 9 10:08:59 2023 -0700 Genesis validators cleanup (#2282) commit 151621ff66f28266d140eb28f423071578d87643 Author: Alberto Benegiamo Date: Thu Nov 9 09:33:42 2023 -0700 Cleanup `ids.NodeID` usage (#2280) commit ebaf9d4bb1e6ba5a0e2f24e05b57945d9d68b1dd Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed Nov 8 18:08:23 2023 -0500 Move `DropExpiredStakerTxs` to platformvm mempool (#2279) commit c94ff4e3a4d881ce6277d3422be8220bb7751746 Author: David Boehm <91908103+dboehm-avalabs@users.noreply.github.com> Date: Wed Nov 8 17:55:02 2023 -0500 MerkleDB:Naming and comments cleanup (#2274) Co-authored-by: Dan Laine commit bcd4a9482c7ce9909faec3fdb2aafc396e17ae37 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed Nov 8 17:28:33 2023 -0500 Cleanup platformvm mempool errs (#2278) commit aba404e8e40a080391f5acef5192ea216269406e Author: marun Date: Wed Nov 8 22:54:42 2023 +0100 e2e: Refactor suite setup and helpers to tests/fixture/e2e for reuse by coreth (#2265) commit fc95834bb2ef89faffc4f8de43eaec71aa80cad3 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed Nov 8 16:41:39 2023 -0500 `mempool.NewMempool` -> `mempool.New` (#2276) commit 93d88c04437a3053f1de8a761d2504a80b00a512 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed Nov 8 15:02:10 2023 -0500 Return if element was deleted from `Hashmap` (#2271) Co-authored-by: Dan Laine commit 1329a591e9d8ca0511037caac9bf947c08ac464e Author: Dan Laine Date: Wed Nov 8 12:56:27 2023 -0500 Add fuzz test for `NewIteratorWithStartAndPrefix` (#1992) Co-authored-by: Alberto Benegiamo commit 22f3c894d712c55dbe84a23fe1cca0242dba77c7 Author: Dan Laine Date: Tue Nov 7 19:12:17 2023 -0500 `merkledb` -- remove unneeded var declarations (#2269) commit 683fcfaa84edb9203a5474caab6d191be4c93af2 Author: Dan Laine Date: Tue Nov 7 17:36:20 2023 -0500 Add read-only database flag (`--db-read-only`) (#2266) commit 52f93c8ef3d35aa83e64ed19ccf1c80137535572 Author: Dan Laine Date: Tue Nov 7 17:34:19 2023 -0500 `merkledb` -- fix nil check in test (#2268) commit 1faec387fe4c950beb770f47ae4725919dcd6c65 Author: Dan Laine Date: Tue Nov 7 17:34:09 2023 -0500 `merkledb` -- rename nit (#2267) commit cdcfb5be8668a889be185513628f2b75aa34f19a Author: felipemadero Date: Tue Nov 7 17:06:59 2023 -0300 Use extended public key to derive ledger addresses (#2246) Signed-off-by: felipemadero Co-authored-by: Dan Laine Co-authored-by: Stephen Buttolph commit 7490a925f72f0435878320373e79ca8217a60ceb Author: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Mon Nov 6 15:39:23 2023 -0500 Document p2p package (#2254) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Co-authored-by: Stephen Buttolph commit 10bd4286d2572044dda9193dc8ceabf7c60c4f42 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon Nov 6 14:49:45 2023 -0500 Remove unused `UnsortedEquals` function (#2264) commit e71089930ee5748ddf1640ad5df5aad8956431b6 Author: David Boehm <91908103+dboehm-avalabs@users.noreply.github.com> Date: Mon Nov 6 13:36:34 2023 -0500 Remove Token constants information from keys (#2197) Signed-off-by: David Boehm <91908103+dboehm-avalabs@users.noreply.github.com> Signed-off-by: Dan Laine Co-authored-by: Darioush Jalali Co-authored-by: Dan Laine commit 558d8fb602a08468c8bf4fa37bba46261054f43a Author: vuittont60 <81072379+vuittont60@users.noreply.github.com> Date: Mon Nov 6 22:26:43 2023 +0800 Fix typos in docs (#2261) Signed-off-by: vuittont60 <81072379+vuittont60@users.noreply.github.com> commit a8db08e9be5929b12d84a0e0ac06b028e1e012cd Author: marun Date: Mon Nov 6 15:26:27 2023 +0100 e2e: Make NewWallet and NewEthclient regular functions (#2262) commit aaed8f34c3b21430a896a2d6d893d215ed62e520 Author: Stephen Buttolph Date: Sat Nov 4 17:29:12 2023 -0400 Track all subnet validator sets in the validator manager (#2253) commit cec1cd1d4a3f459671e3fcacdda554e31f4af152 Author: Stephen Buttolph Date: Fri Nov 3 19:19:26 2023 -0400 Require poll metrics to be registered (#2260) commit e4cb2cd484896a6c84c1445da1651aa1e6edf8f0 Author: Dan Laine Date: Fri Nov 3 17:33:44 2023 -0400 Cleanup `ipcs` `Socket` test (#2257) commit 437ade80946f525e89697d06ffdd0d19c334bd7a Author: marun Date: Fri Nov 3 21:45:12 2023 +0100 Switch to using require.TestingT interface in SenderTest struct (#2258) commit 11f1b55baf037e6919033d767552d76b37b834b2 Author: DoTheBestToGetTheBest <146037313+DoTheBestToGetTheBest@users.noreply.github.com> Date: Thu Nov 2 15:41:47 2023 -0700 feat(api) : Peers function to return the PrimaryAlias of the chainID (#2251) Signed-off-by: DoTheBestToGetTheBest <146037313+DoTheBestToGetTheBest@users.noreply.github.com> Co-authored-by: Stephen Buttolph commit c174c6230528c9d8a3ace05ee2cc47235f06532d Author: Stephen Buttolph Date: Thu Nov 2 16:20:03 2023 -0400 Return log levels from admin.SetLoggerLevel (#2250) commit 20f3580b4912f29adc602ad883e84897f0965cc1 Author: Stephen Buttolph Date: Wed Nov 1 22:11:47 2023 -0400 Update versions for v1.10.15 (#2245) commit 36d1630b7901b27bfe1642f873731292c385702e Author: Cesar <137245636+nytzuga@users.noreply.github.com> Date: Wed Nov 1 22:44:14 2023 -0300 Add nullable option to codec (#2171) Signed-off-by: Cesar <137245636+nytzuga@users.noreply.github.com> Co-authored-by: Stephen Buttolph Co-authored-by: Dan Laine commit 4957ccb4ee4fc4a8f8fe2a0ae02237d83b3574f9 Author: Dan Laine Date: Tue Oct 31 18:38:57 2023 -0400 Add `pebble` as valid value for `--db-type`. (#2244) Signed-off-by: Dan Laine Co-authored-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Co-authored-by: Stephen Buttolph commit 047d493084c9e5cad6ecf98ea9e4ee3e19b4c460 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue Oct 31 18:14:14 2023 -0400 Add `BaseTx` support to platformvm (#2232) Signed-off-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> commit 1f9df8f40543043a8024c09e196b3d92ec4df971 Author: Dan Laine Date: Tue Oct 31 17:10:05 2023 -0400 Remove `database.Manager` (#2239) Signed-off-by: Dan Laine commit 8d15c2294ea872569ab3669a7100ad7e81fea7b4 Author: Stephen Buttolph Date: Tue Oct 31 13:28:49 2023 -0400 Document host and port behavior in help text (#2236) commit 76d756ff6b6b919f90b80fe61d10643a56bf6a1b Author: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue Oct 31 11:43:13 2023 -0400 Remove error from Router AppGossip (#2238) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> commit 5b9678936fb015d9ca838d67358235cc6fdf480c Author: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Mon Oct 30 16:41:51 2023 -0400 P2P AppRequestFailed protobuf definition (#2111) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Co-authored-by: Stephen Buttolph commit 826f9415f93c6cba04adb52767ad2e98777afba4 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri Oct 27 19:41:19 2023 -0400 Fix test typos (#2233) Signed-off-by: marun Co-authored-by: marun commit 66375f57eed975aebd30907394134017e374da8a Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri Oct 27 18:44:34 2023 -0400 Trim down size of secp256k1 `Factory` struct (#2223) Signed-off-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> commit 42d4e3ed5dee4e567fd2c122e37a340c7934d43b Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri Oct 27 17:44:38 2023 -0400 Enable `perfsprint` linter (#2229) commit b83af9bcaa98c79414db7ec040bcf5043511edd7 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri Oct 27 17:42:03 2023 -0400 Add `utils.Err` helper (#2212) Signed-off-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- .golangci.yml | 1 + RELEASES.md | 38 +- api/admin/client.go | 16 +- api/admin/client_test.go | 74 +- api/admin/service.go | 91 +-- api/info/service.go | 13 +- api/keystore/keystore.go | 8 +- api/keystore/service.go | 17 - api/keystore/service_test.go | 40 +- api/server/metrics.go | 7 +- app/app.go | 114 +-- chains/linearizable_vm.go | 9 +- chains/manager.go | 176 ++-- codec/reflectcodec/struct_fielder.go | 19 +- codec/reflectcodec/type_codec.go | 217 +++-- codec/reflectcodec/type_codec_test.go | 30 + codec/test_codec.go | 94 ++- config/config.go | 3 +- config/flags.go | 12 +- config/keys.go | 1 + database/corruptabledb/db_test.go | 25 +- database/encdb/db_test.go | 24 +- database/leveldb/db_test.go | 27 +- database/leveldb/metrics.go | 7 +- database/manager/manager.go | 325 -------- database/manager/manager_test.go | 429 ---------- database/manager/versioned_database.go | 27 - database/memdb/db_test.go | 4 + database/meterdb/db_test.go | 24 +- database/pebble/db.go | 20 +- database/pebble/db_test.go | 8 +- database/prefixdb/db_test.go | 4 + database/rpcdb/db_test.go | 7 + database/test_database.go | 69 +- database/versiondb/db_test.go | 4 + genesis/config.go | 31 +- genesis/genesis.go | 4 +- genesis/genesis_local.go | 5 +- genesis/genesis_test.go | 5 +- go.mod | 19 +- go.sum | 46 +- ids/test_generator.go | 9 + indexer/index.go | 6 +- ipcs/eventsocket.go | 12 +- ipcs/socket/socket_test.go | 4 +- main/main.go | 7 +- network/metrics.go | 7 +- network/network.go | 2 +- network/p2p/gossip/gossip.go | 8 +- network/p2p/gossip/handler.go | 8 +- network/p2p/handler.go | 37 +- network/p2p/handler_test.go | 20 +- network/p2p/mocks/mock_handler.go | 6 +- network/p2p/router.go | 48 +- network/p2p/throttler_handler.go | 11 +- network/p2p/throttler_handler_test.go | 65 +- network/peer/gossip_tracker_metrics.go | 8 +- network/peer/set_test.go | 16 +- network/peer/upgrader.go | 8 +- .../throttling/inbound_resource_throttler.go | 7 +- network/throttling/outbound_msg_throttler.go | 6 +- node/config.go | 3 + node/insecure_validator_manager.go | 2 +- node/node.go | 384 ++++----- proto/README.md | 2 +- proto/p2p/p2p.proto | 16 +- proto/pb/p2p/p2p.pb.go | 603 ++++++++------ proto/pb/sync/sync.pb.go | 103 +-- proto/pb/sync/sync_grpc.pb.go | 37 + proto/pb/vm/vm.pb.go | 763 ++++++++---------- proto/sync/sync.proto | 2 + proto/vm/vm.proto | 9 +- snow/consensus/metrics/polls.go | 7 +- .../snowman/bootstrapper/majority.go | 110 +++ .../snowman/bootstrapper/majority_test.go | 396 +++++++++ .../snowman/bootstrapper/minority.go | 77 ++ .../snowman/bootstrapper/minority_test.go | 242 ++++++ snow/consensus/snowman/bootstrapper/noop.go | 27 + .../snowman/bootstrapper/noop_test.go | 23 + snow/consensus/snowman/bootstrapper/poll.go | 23 + .../snowman/bootstrapper/poll_test.go | 15 + .../snowman/bootstrapper/requests.go | 48 ++ .../consensus/snowman/bootstrapper/sampler.go | 49 ++ .../snowman/bootstrapper/sampler_test.go | 75 ++ snow/consensus/snowman/poll/set.go | 18 +- snow/consensus/snowman/poll/set_test.go | 52 +- .../avalanche/bootstrap/bootstrapper.go | 147 ++-- .../avalanche/bootstrap/bootstrapper_test.go | 458 +---------- snow/engine/avalanche/bootstrap/config.go | 24 +- snow/engine/avalanche/bootstrap/metrics.go | 6 +- snow/engine/avalanche/getter/getter.go | 52 +- snow/engine/avalanche/getter/getter_test.go | 76 +- snow/engine/avalanche/vertex/mock_vm.go | 4 +- .../common/appsender/appsender_client.go | 8 +- snow/engine/common/bootstrapper.go | 338 ++------ snow/engine/common/config.go | 18 - snow/engine/common/engine.go | 9 +- snow/engine/common/no_ops_handlers.go | 5 +- snow/engine/common/queue/jobs.go | 6 +- snow/engine/common/queue/state.go | 6 +- snow/engine/common/requests_test.go | 4 +- snow/engine/common/test_config.go | 1 - snow/engine/common/test_engine.go | 15 +- snow/engine/common/test_sender.go | 3 +- snow/engine/common/test_vm.go | 6 +- snow/engine/common/traced_engine.go | 17 +- snow/engine/common/tracker/peers.go | 7 +- snow/engine/common/vm.go | 4 +- snow/engine/snowman/block/mocks/chain_vm.go | 4 +- snow/engine/snowman/bootstrap/bootstrapper.go | 10 +- .../snowman/bootstrap/bootstrapper_test.go | 12 +- snow/engine/snowman/bootstrap/metrics.go | 7 +- snow/engine/snowman/getter/getter.go | 54 +- snow/engine/snowman/getter/getter_test.go | 101 +-- snow/engine/snowman/syncer/state_syncer.go | 14 +- .../snowman/syncer/state_syncer_test.go | 43 +- snow/engine/snowman/syncer/utils_test.go | 10 +- snow/engine/snowman/transitive.go | 17 +- snow/engine/snowman/transitive_test.go | 10 +- snow/networking/handler/handler.go | 25 +- snow/networking/handler/handler_test.go | 5 +- snow/networking/handler/parser.go | 17 +- .../networking/router/chain_router_metrics.go | 7 +- snow/networking/sender/sender_test.go | 4 +- snow/networking/timeout/manager.go | 4 +- snow/networking/timeout/manager_test.go | 2 +- snow/networking/tracker/resource_tracker.go | 7 +- .../tracker/resource_tracker_test.go | 4 +- snow/networking/tracker/targeter_test.go | 4 +- .../gvalidators/validator_state_server.go | 2 +- snow/validators/logger.go | 10 +- snow/validators/manager_test.go | 12 +- snow/validators/set_test.go | 16 +- tests/e2e/banff/suites.go | 4 +- tests/e2e/c/dynamic_fees.go | 7 +- tests/e2e/c/interchain_workflow.go | 9 +- tests/e2e/e2e_test.go | 76 +- tests/e2e/faultinjection/duplicate_node_id.go | 2 +- tests/e2e/ignore.go | 13 + tests/e2e/p/interchain_workflow.go | 13 +- tests/e2e/p/permissionless_subnets.go | 4 +- tests/e2e/p/staking_rewards.go | 19 +- tests/e2e/p/workflow.go | 4 +- tests/e2e/static-handlers/suites.go | 13 +- tests/e2e/x/interchain_workflow.go | 9 +- tests/e2e/x/transfer/virtuous.go | 4 +- tests/{ => fixture}/e2e/describe.go | 0 tests/fixture/e2e/env.go | 138 ++++ tests/fixture/e2e/flags.go | 57 ++ tests/{e2e/e2e.go => fixture/e2e/helpers.go} | 111 +-- tests/fixture/test_data_server_test.go | 3 +- tests/fixture/testnet/local/network.go | 22 +- tests/upgrade/upgrade_test.go | 2 +- utils/beacon/set_test.go | 6 +- utils/compare/compare.go | 27 - utils/compare/compare_test.go | 26 - utils/crypto/bls/bls_benchmark_test.go | 8 +- utils/crypto/ledger/ledger.go | 31 +- utils/crypto/ledger/ledger_test.go | 4 +- utils/crypto/secp256k1/rfc6979_test.go | 3 +- utils/crypto/secp256k1/secp256k1.go | 57 +- .../secp256k1/secp256k1_benchmark_test.go | 4 +- utils/crypto/secp256k1/secp256k1_test.go | 91 ++- utils/crypto/secp256k1/test_keys.go | 5 +- utils/error.go | 13 + utils/ips/ip_port.go | 2 +- utils/ips/ip_test.go | 3 +- utils/linkedhashmap/linkedhashmap.go | 12 +- utils/linkedhashmap/linkedhashmap_test.go | 8 +- utils/metric/api_interceptor.go | 7 +- utils/resource/metrics.go | 7 +- utils/sampler/rand_test.go | 4 +- utils/set/bits.go | 4 +- utils/sorting.go | 13 - utils/sorting_test.go | 25 - utils/timer/adaptive_timeout_manager.go | 7 +- version/compatibility.json | 3 + version/constants.go | 37 +- vms/avm/block/builder/builder_test.go | 6 +- vms/avm/block/executor/manager.go | 4 +- vms/avm/block/parser.go | 12 +- vms/avm/environment_test.go | 18 +- vms/avm/network/network.go | 2 - vms/avm/service_test.go | 6 +- vms/avm/states/state.go | 26 +- .../txs/executor/semantic_verifier_test.go | 6 +- vms/avm/txs/mempool/mempool.go | 14 +- vms/avm/txs/parser.go | 9 +- vms/avm/vm.go | 10 +- vms/avm/vm_test.go | 43 +- vms/components/keystore/codec.go | 9 +- vms/components/keystore/user.go | 9 +- vms/components/keystore/user_test.go | 6 +- vms/components/message/codec.go | 9 +- vms/example/xsvm/execute/tx.go | 5 +- vms/example/xsvm/tx/codec.go | 9 +- vms/example/xsvm/tx/tx.go | 6 +- vms/example/xsvm/vm.go | 6 +- vms/metervm/block_vm.go | 4 +- vms/metervm/vertex_vm.go | 4 +- vms/nftfx/fx.go | 6 +- vms/platformvm/api/static_service.go | 30 +- vms/platformvm/api/static_service_test.go | 40 +- vms/platformvm/block/builder/builder.go | 118 +-- vms/platformvm/block/builder/builder_test.go | 4 +- vms/platformvm/block/builder/helpers_test.go | 60 +- vms/platformvm/block/builder/network.go | 175 ---- vms/platformvm/block/builder/network_test.go | 147 ---- vms/platformvm/block/codec.go | 9 +- vms/platformvm/block/executor/helpers_test.go | 39 +- vms/platformvm/block/executor/manager.go | 48 +- vms/platformvm/block/executor/mock_manager.go | 43 + .../block/executor/proposal_block_test.go | 73 +- .../block/executor/standard_block_test.go | 48 +- vms/platformvm/metrics/metrics.go | 2 +- vms/platformvm/metrics/tx_metrics.go | 9 +- vms/platformvm/network/main_test.go | 14 + vms/platformvm/network/network.go | 208 +++++ vms/platformvm/network/network_test.go | 353 ++++++++ vms/platformvm/service.go | 75 +- vms/platformvm/service_test.go | 25 +- .../state/disk_staker_diff_iterator.go | 2 +- .../state/disk_staker_diff_iterator_test.go | 4 +- vms/platformvm/state/mock_state.go | 14 - vms/platformvm/state/state.go | 129 +-- vms/platformvm/state/state_test.go | 2 - .../add_permissionless_delegator_tx_test.go | 8 +- .../add_permissionless_validator_tx_test.go | 8 +- vms/platformvm/txs/base_tx.go | 6 + vms/platformvm/txs/base_tx_test.go | 450 ++++++++++- vms/platformvm/txs/builder/builder.go | 51 ++ vms/platformvm/txs/builder/mock_builder.go | 16 + vms/platformvm/txs/codec.go | 6 +- .../txs/executor/advance_time_test.go | 15 +- .../txs/executor/atomic_tx_executor.go | 4 + .../txs/executor/create_chain_test.go | 3 +- vms/platformvm/txs/executor/helpers_test.go | 32 +- vms/platformvm/txs/executor/import_test.go | 3 +- .../txs/executor/proposal_tx_executor.go | 4 + .../txs/executor/proposal_tx_executor_test.go | 42 +- .../txs/executor/standard_tx_executor.go | 31 + .../txs/executor/standard_tx_executor_test.go | 42 +- .../txs/executor/tx_mempool_verifier.go | 4 + vms/platformvm/txs/mempool/issuer.go | 5 + vms/platformvm/txs/mempool/mempool.go | 64 +- vms/platformvm/txs/mempool/mempool_test.go | 6 +- vms/platformvm/txs/mempool/remover.go | 5 + .../txs/remove_subnet_validator_tx_test.go | 6 +- .../txs/transfer_subnet_ownership_tx_test.go | 4 +- vms/platformvm/txs/txheap/by_end_time_test.go | 6 +- .../txs/txheap/by_start_time_test.go | 6 +- vms/platformvm/txs/validator_test.go | 10 +- vms/platformvm/txs/visitor.go | 1 + vms/platformvm/validator_set_property_test.go | 23 +- vms/platformvm/validators/manager.go | 21 +- .../validators/manager_benchmark_test.go | 6 +- vms/platformvm/vm.go | 46 +- vms/platformvm/vm_regression_test.go | 194 ++--- vms/platformvm/vm_test.go | 283 ++++--- vms/platformvm/warp/codec.go | 9 +- vms/platformvm/warp/payload/codec.go | 9 +- vms/platformvm/warp/validator_test.go | 4 +- vms/propertyfx/fx.go | 6 +- vms/proposervm/batched_vm_test.go | 34 +- vms/proposervm/block/codec.go | 9 +- vms/proposervm/post_fork_option_test.go | 3 +- vms/proposervm/proposer/validators_test.go | 2 +- vms/proposervm/proposer/windower_test.go | 4 +- vms/proposervm/state_syncable_vm_test.go | 13 +- vms/proposervm/vm.go | 9 +- vms/proposervm/vm_regression_test.go | 11 +- vms/proposervm/vm_test.go | 140 ++-- vms/rpcchainvm/batched_vm_test.go | 6 +- vms/rpcchainvm/state_syncable_vm_test.go | 8 +- vms/rpcchainvm/vm_client.go | 51 +- vms/rpcchainvm/vm_server.go | 65 +- vms/rpcchainvm/with_context_vm_test.go | 6 +- vms/secp256k1fx/fx.go | 15 +- vms/secp256k1fx/keychain.go | 4 +- vms/secp256k1fx/keychain_test.go | 12 +- vms/tracedvm/block_vm.go | 4 +- vms/tracedvm/vertex_vm.go | 4 +- wallet/chain/p/backend_visitor.go | 4 + wallet/chain/p/signer_visitor.go | 8 + x/merkledb/README.md | 8 +- x/merkledb/cache.go | 2 +- x/merkledb/codec.go | 59 +- x/merkledb/codec_test.go | 98 +-- x/merkledb/db.go | 153 ++-- x/merkledb/db_test.go | 83 +- x/merkledb/helpers_test.go | 4 +- x/merkledb/history.go | 15 +- x/merkledb/history_test.go | 87 +- x/merkledb/intermediate_node_db.go | 24 +- x/merkledb/intermediate_node_db_test.go | 91 ++- x/merkledb/key.go | 364 ++++----- x/merkledb/key_test.go | 569 +++++++------ x/merkledb/metrics.go | 7 +- x/merkledb/mock_db.go | 14 + x/merkledb/node.go | 36 +- x/merkledb/node_test.go | 33 +- x/merkledb/proof.go | 178 ++-- x/merkledb/proof_test.go | 277 +++---- x/merkledb/trie_test.go | 82 +- x/merkledb/trieview.go | 203 +++-- x/merkledb/value_node_db.go | 20 +- x/merkledb/value_node_db_test.go | 41 +- x/merkledb/view_iterator.go | 4 +- x/sync/client.go | 21 +- x/sync/client_test.go | 8 +- x/sync/db.go | 1 + x/sync/g_db/db_client.go | 19 +- x/sync/g_db/db_server.go | 18 +- x/sync/manager.go | 70 +- x/sync/metrics.go | 7 +- x/sync/network_server.go | 1 - x/sync/network_server_test.go | 2 +- x/sync/peer_tracker.go | 7 +- x/sync/sync_test.go | 7 +- 319 files changed, 7770 insertions(+), 6601 deletions(-) create mode 100644 codec/reflectcodec/type_codec_test.go delete mode 100644 database/manager/manager.go delete mode 100644 database/manager/manager_test.go delete mode 100644 database/manager/versioned_database.go create mode 100644 snow/consensus/snowman/bootstrapper/majority.go create mode 100644 snow/consensus/snowman/bootstrapper/majority_test.go create mode 100644 snow/consensus/snowman/bootstrapper/minority.go create mode 100644 snow/consensus/snowman/bootstrapper/minority_test.go create mode 100644 snow/consensus/snowman/bootstrapper/noop.go create mode 100644 snow/consensus/snowman/bootstrapper/noop_test.go create mode 100644 snow/consensus/snowman/bootstrapper/poll.go create mode 100644 snow/consensus/snowman/bootstrapper/poll_test.go create mode 100644 snow/consensus/snowman/bootstrapper/requests.go create mode 100644 snow/consensus/snowman/bootstrapper/sampler.go create mode 100644 snow/consensus/snowman/bootstrapper/sampler_test.go create mode 100644 tests/e2e/ignore.go rename tests/{ => fixture}/e2e/describe.go (100%) create mode 100644 tests/fixture/e2e/env.go create mode 100644 tests/fixture/e2e/flags.go rename tests/{e2e/e2e.go => fixture/e2e/helpers.go} (69%) delete mode 100644 utils/compare/compare.go delete mode 100644 utils/compare/compare_test.go create mode 100644 utils/error.go delete mode 100644 vms/platformvm/block/builder/network.go delete mode 100644 vms/platformvm/block/builder/network_test.go create mode 100644 vms/platformvm/network/main_test.go create mode 100644 vms/platformvm/network/network.go create mode 100644 vms/platformvm/network/network_test.go diff --git a/.golangci.yml b/.golangci.yml index 160f617659ff..b5c99facb2b1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -65,6 +65,7 @@ linters: - nakedret - noctx - nolintlint + - perfsprint - prealloc - revive - staticcheck diff --git a/RELEASES.md b/RELEASES.md index 31d9e3605dce..0f8d57aea220 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,41 @@ # Release Notes +## [v1.10.15](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.15) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is updated to `30` all plugins must update to be compatible. + +### Configs + +- Added `pebble` as an allowed option to `--db-type` + +### Fixes + +- Fixed C-chain tracer API panic + +### What's Changed + +- Reduce allocations on insert and remove by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2201 +- `merkledb` -- shift nit by @danlaine in https://github.com/ava-labs/avalanchego/pull/2218 +- Update `golangci-lint` to `v1.55.1` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2228 +- Add json marshal tests to existing serialization tests in `platformvm/txs` pkg by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2227 +- Move all blst function usage to `bls` pkg by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2222 +- `merkledb` -- don't pass `BranchFactor` to `encodeDBNode` by @danlaine in https://github.com/ava-labs/avalanchego/pull/2217 +- Add `utils.Err` helper by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2212 +- Enable `perfsprint` linter by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2229 +- Trim down size of secp256k1 `Factory` struct by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2223 +- Fix test typos by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2233 +- P2P AppRequestFailed protobuf definition by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2111 +- Remove error from Router AppGossip by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2238 +- Document host and port behavior in help text by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2236 +- Remove `database.Manager` by @danlaine in https://github.com/ava-labs/avalanchego/pull/2239 +- Add `BaseTx` support to platformvm by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2232 +- Add `pebble` as valid value for `--db-type`. by @danlaine in https://github.com/ava-labs/avalanchego/pull/2244 +- Add nullable option to codec by @nytzuga in https://github.com/ava-labs/avalanchego/pull/2171 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.14...v1.10.15 + ## [v1.10.14](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.14) This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. @@ -687,7 +723,7 @@ This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/av - Add workflow to mark stale issues and PRs by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1443 - Enforce inlining functions with a single error return in `require.NoError` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1500 - `x/sync` / `x/merkledb` -- add `SyncableDB` interface by @danlaine in https://github.com/ava-labs/avalanchego/pull/1555 -- Rename beacon to boostrapper, define bootstrappers in JSON file for cross-language compatiblity by @gyuho in https://github.com/ava-labs/avalanchego/pull/1439 +- Rename beacon to boostrapper, define bootstrappers in JSON file for cross-language compatibility by @gyuho in https://github.com/ava-labs/avalanchego/pull/1439 - add P-chain height indexing by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1447 - Add P-chain `GetBlockByHeight` API method by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1448 - `x/sync` -- use for sending Range Proofs by @danlaine in https://github.com/ava-labs/avalanchego/pull/1537 diff --git a/api/admin/client.go b/api/admin/client.go index 0dcb3e720546..77f3835f060c 100644 --- a/api/admin/client.go +++ b/api/admin/client.go @@ -25,7 +25,7 @@ type Client interface { GetChainAliases(ctx context.Context, chainID string, options ...rpc.Option) ([]string, error) Stacktrace(context.Context, ...rpc.Option) error LoadVMs(context.Context, ...rpc.Option) (map[ids.ID][]string, map[ids.ID]string, error) - SetLoggerLevel(ctx context.Context, loggerName, logLevel, displayLevel string, options ...rpc.Option) error + SetLoggerLevel(ctx context.Context, loggerName, logLevel, displayLevel string, options ...rpc.Option) (map[string]LogAndDisplayLevels, error) GetLoggerLevel(ctx context.Context, loggerName string, options ...rpc.Option) (map[string]LogAndDisplayLevels, error) GetConfig(ctx context.Context, options ...rpc.Option) (interface{}, error) } @@ -96,7 +96,7 @@ func (c *client) SetLoggerLevel( logLevel, displayLevel string, options ...rpc.Option, -) error { +) (map[string]LogAndDisplayLevels, error) { var ( logLevelArg logging.Level displayLevelArg logging.Level @@ -105,20 +105,22 @@ func (c *client) SetLoggerLevel( if len(logLevel) > 0 { logLevelArg, err = logging.ToLevel(logLevel) if err != nil { - return err + return nil, err } } if len(displayLevel) > 0 { displayLevelArg, err = logging.ToLevel(displayLevel) if err != nil { - return err + return nil, err } } - return c.requester.SendRequest(ctx, "admin.setLoggerLevel", &SetLoggerLevelArgs{ + res := &LoggerLevelReply{} + err = c.requester.SendRequest(ctx, "admin.setLoggerLevel", &SetLoggerLevelArgs{ LoggerName: loggerName, LogLevel: &logLevelArg, DisplayLevel: &displayLevelArg, - }, &api.EmptyReply{}, options...) + }, res, options...) + return res.LoggerLevels, err } func (c *client) GetLoggerLevel( @@ -126,7 +128,7 @@ func (c *client) GetLoggerLevel( loggerName string, options ...rpc.Option, ) (map[string]LogAndDisplayLevels, error) { - res := &GetLoggerLevelReply{} + res := &LoggerLevelReply{} err := c.requester.SendRequest(ctx, "admin.getLoggerLevel", &GetLoggerLevelArgs{ LoggerName: loggerName, }, res, options...) diff --git a/api/admin/client_test.go b/api/admin/client_test.go index 4302bd5350fa..d005c49b448a 100644 --- a/api/admin/client_test.go +++ b/api/admin/client_test.go @@ -63,8 +63,8 @@ func (mc *mockClient) SendRequest(_ context.Context, _ string, _ interface{}, re case *LoadVMsReply: response := mc.response.(*LoadVMsReply) *p = *response - case *GetLoggerLevelReply: - response := mc.response.(*GetLoggerLevelReply) + case *LoggerLevelReply: + response := mc.response.(*LoggerLevelReply) *p = *response case *interface{}: response := mc.response.(*interface{}) @@ -212,54 +212,72 @@ func TestReloadInstalledVMs(t *testing.T) { func TestSetLoggerLevel(t *testing.T) { type test struct { - name string - logLevel string - displayLevel string - serviceErr error - clientErr error + name string + logLevel string + displayLevel string + serviceResponse map[string]LogAndDisplayLevels + serviceErr error + clientErr error } tests := []test{ { name: "Happy path", logLevel: "INFO", displayLevel: "INFO", - serviceErr: nil, - clientErr: nil, + serviceResponse: map[string]LogAndDisplayLevels{ + "Happy path": {LogLevel: logging.Info, DisplayLevel: logging.Info}, + }, + serviceErr: nil, + clientErr: nil, }, { - name: "Service errors", - logLevel: "INFO", - displayLevel: "INFO", - serviceErr: errTest, - clientErr: errTest, + name: "Service errors", + logLevel: "INFO", + displayLevel: "INFO", + serviceResponse: nil, + serviceErr: errTest, + clientErr: errTest, }, { - name: "Invalid log level", - logLevel: "invalid", - displayLevel: "INFO", - serviceErr: nil, - clientErr: logging.ErrUnknownLevel, + name: "Invalid log level", + logLevel: "invalid", + displayLevel: "INFO", + serviceResponse: nil, + serviceErr: nil, + clientErr: logging.ErrUnknownLevel, }, { - name: "Invalid display level", - logLevel: "INFO", - displayLevel: "invalid", - serviceErr: nil, - clientErr: logging.ErrUnknownLevel, + name: "Invalid display level", + logLevel: "INFO", + displayLevel: "invalid", + serviceResponse: nil, + serviceErr: nil, + clientErr: logging.ErrUnknownLevel, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + c := client{ - requester: NewMockClient(&api.EmptyReply{}, tt.serviceErr), + requester: NewMockClient( + &LoggerLevelReply{ + LoggerLevels: tt.serviceResponse, + }, + tt.serviceErr, + ), } - err := c.SetLoggerLevel( + res, err := c.SetLoggerLevel( context.Background(), "", tt.logLevel, tt.displayLevel, ) - require.ErrorIs(t, err, tt.clientErr) + require.ErrorIs(err, tt.clientErr) + if tt.clientErr != nil { + return + } + require.Equal(tt.serviceResponse, res) }) } } @@ -296,7 +314,7 @@ func TestGetLoggerLevel(t *testing.T) { c := client{ requester: NewMockClient( - &GetLoggerLevelReply{ + &LoggerLevelReply{ LoggerLevels: tt.serviceResponse, }, tt.serviceErr, diff --git a/api/admin/service.go b/api/admin/service.go index 43e70c37c72c..f94a426b366c 100644 --- a/api/admin/service.go +++ b/api/admin/service.go @@ -224,13 +224,21 @@ func (a *Admin) Stacktrace(_ *http.Request, _ *struct{}, _ *api.EmptyReply) erro return perms.WriteFile(stacktraceFile, stacktrace, perms.ReadWrite) } -// See SetLoggerLevel type SetLoggerLevelArgs struct { LoggerName string `json:"loggerName"` LogLevel *logging.Level `json:"logLevel"` DisplayLevel *logging.Level `json:"displayLevel"` } +type LogAndDisplayLevels struct { + LogLevel logging.Level `json:"logLevel"` + DisplayLevel logging.Level `json:"displayLevel"` +} + +type LoggerLevelReply struct { + LoggerLevels map[string]LogAndDisplayLevels `json:"loggerLevels"` +} + // SetLoggerLevel sets the log level and/or display level for loggers. // If len([args.LoggerName]) == 0, sets the log/display level of all loggers. // Otherwise, sets the log/display level of the loggers named in that argument. @@ -240,7 +248,7 @@ type SetLoggerLevelArgs struct { // Sets the display level of these loggers to args.LogLevel. // If args.DisplayLevel == nil, doesn't set the display level of these loggers. // If args.DisplayLevel != nil, must be a valid string representation of a log level. -func (a *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, _ *api.EmptyReply) error { +func (a *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, reply *LoggerLevelReply) error { a.Log.Debug("API called", zap.String("service", "admin"), zap.String("method", "setLoggerLevel"), @@ -256,14 +264,7 @@ func (a *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, _ *api a.lock.Lock() defer a.lock.Unlock() - var loggerNames []string - if len(args.LoggerName) > 0 { - loggerNames = []string{args.LoggerName} - } else { - // Empty name means all loggers - loggerNames = a.LogFactory.GetLoggerNames() - } - + loggerNames := a.getLoggerNames(args.LoggerName) for _, name := range loggerNames { if args.LogLevel != nil { if err := a.LogFactory.SetLogLevel(name, *args.LogLevel); err != nil { @@ -276,26 +277,18 @@ func (a *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, _ *api } } } - return nil -} -type LogAndDisplayLevels struct { - LogLevel logging.Level `json:"logLevel"` - DisplayLevel logging.Level `json:"displayLevel"` + var err error + reply.LoggerLevels, err = a.getLogLevels(loggerNames) + return err } -// See GetLoggerLevel type GetLoggerLevelArgs struct { LoggerName string `json:"loggerName"` } -// See GetLoggerLevel -type GetLoggerLevelReply struct { - LoggerLevels map[string]LogAndDisplayLevels `json:"loggerLevels"` -} - // GetLogLevel returns the log level and display level of all loggers. -func (a *Admin) GetLoggerLevel(_ *http.Request, args *GetLoggerLevelArgs, reply *GetLoggerLevelReply) error { +func (a *Admin) GetLoggerLevel(_ *http.Request, args *GetLoggerLevelArgs, reply *LoggerLevelReply) error { a.Log.Debug("API called", zap.String("service", "admin"), zap.String("method", "getLoggerLevels"), @@ -305,30 +298,11 @@ func (a *Admin) GetLoggerLevel(_ *http.Request, args *GetLoggerLevelArgs, reply a.lock.RLock() defer a.lock.RUnlock() - reply.LoggerLevels = make(map[string]LogAndDisplayLevels) - var loggerNames []string - // Empty name means all loggers - if len(args.LoggerName) > 0 { - loggerNames = []string{args.LoggerName} - } else { - loggerNames = a.LogFactory.GetLoggerNames() - } + loggerNames := a.getLoggerNames(args.LoggerName) - for _, name := range loggerNames { - logLevel, err := a.LogFactory.GetLogLevel(name) - if err != nil { - return err - } - displayLevel, err := a.LogFactory.GetDisplayLevel(name) - if err != nil { - return err - } - reply.LoggerLevels[name] = LogAndDisplayLevels{ - LogLevel: logLevel, - DisplayLevel: displayLevel, - } - } - return nil + var err error + reply.LoggerLevels, err = a.getLogLevels(loggerNames) + return err } // GetConfig returns the config that the node was started with. @@ -375,3 +349,30 @@ func (a *Admin) LoadVMs(r *http.Request, _ *struct{}, reply *LoadVMsReply) error reply.NewVMs, err = ids.GetRelevantAliases(a.VMManager, loadedVMs) return err } + +func (a *Admin) getLoggerNames(loggerName string) []string { + if len(loggerName) == 0 { + // Empty name means all loggers + return a.LogFactory.GetLoggerNames() + } + return []string{loggerName} +} + +func (a *Admin) getLogLevels(loggerNames []string) (map[string]LogAndDisplayLevels, error) { + loggerLevels := make(map[string]LogAndDisplayLevels) + for _, name := range loggerNames { + logLevel, err := a.LogFactory.GetLogLevel(name) + if err != nil { + return nil, err + } + displayLevel, err := a.LogFactory.GetDisplayLevel(name) + if err != nil { + return nil, err + } + loggerLevels[name] = LogAndDisplayLevels{ + LogLevel: logLevel, + DisplayLevel: displayLevel, + } + } + return loggerLevels, nil +} diff --git a/api/info/service.go b/api/info/service.go index 65b5fb2fdf7f..47112e55b630 100644 --- a/api/info/service.go +++ b/api/info/service.go @@ -208,7 +208,7 @@ type PeersArgs struct { type Peer struct { peer.Info - Benched []ids.ID `json:"benched"` + Benched []string `json:"benched"` } // PeersReply are the results from calling Peers @@ -229,9 +229,18 @@ func (i *Info) Peers(_ *http.Request, args *PeersArgs, reply *PeersReply) error peers := i.networking.PeerInfo(args.NodeIDs) peerInfo := make([]Peer, len(peers)) for index, peer := range peers { + benchedIDs := i.benchlist.GetBenched(peer.ID) + benchedAliases := make([]string, len(benchedIDs)) + for idx, id := range benchedIDs { + alias, err := i.chainManager.PrimaryAlias(id) + if err != nil { + return fmt.Errorf("failed to get primary alias for chain ID %s: %w", id, err) + } + benchedAliases[idx] = alias + } peerInfo[index] = Peer{ Info: peer, - Benched: i.benchlist.GetBenched(peer.ID), + Benched: benchedAliases, } } diff --git a/api/keystore/keystore.go b/api/keystore/keystore.go index fe3bee0e9dc0..cd7f0b8a8f21 100644 --- a/api/keystore/keystore.go +++ b/api/keystore/keystore.go @@ -14,7 +14,6 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/encdb" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/json" @@ -105,13 +104,12 @@ type keystore struct { bcDB database.Database } -func New(log logging.Logger, dbManager manager.Manager) Keystore { - currentDB := dbManager.Current() +func New(log logging.Logger, db database.Database) Keystore { return &keystore{ log: log, usernameToPassword: make(map[string]*password.Hash), - userDB: prefixdb.New(usersPrefix, currentDB.Database), - bcDB: prefixdb.New(bcsPrefix, currentDB.Database), + userDB: prefixdb.New(usersPrefix, db), + bcDB: prefixdb.New(bcsPrefix, db), } } diff --git a/api/keystore/service.go b/api/keystore/service.go index c0e823c22e7f..d4c845743bbb 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -10,11 +10,8 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/api" - "github.com/ava-labs/avalanchego/database/manager" - "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/version" ) type service struct { @@ -115,17 +112,3 @@ func (s *service) ExportUser(_ *http.Request, args *ExportUserArgs, reply *Expor reply.Encoding = args.Encoding return nil } - -// CreateTestKeystore returns a new keystore that can be utilized for testing -func CreateTestKeystore() (Keystore, error) { - dbManager, err := manager.NewManagerFromDBs([]*manager.VersionedDatabase{ - { - Database: memdb.New(), - Version: version.Semantic1_0_0, - }, - }) - if err != nil { - return nil, err - } - return New(logging.NoLog{}, dbManager), nil -} diff --git a/api/keystore/service_test.go b/api/keystore/service_test.go index a32fd6a716ad..842ab7d76cc7 100644 --- a/api/keystore/service_test.go +++ b/api/keystore/service_test.go @@ -4,15 +4,17 @@ package keystore import ( - "fmt" + "encoding/hex" "math/rand" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/api" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/password" ) @@ -23,8 +25,7 @@ var strongPassword = "N_+=_jJ;^(<;{4,:*m6CET}'&N;83FYK.wtNpwp-Jt" // #nosec G101 func TestServiceListNoUsers(t *testing.T) { require := require.New(t) - ks, err := CreateTestKeystore() - require.NoError(err) + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} reply := ListUsersReply{} @@ -35,8 +36,7 @@ func TestServiceListNoUsers(t *testing.T) { func TestServiceCreateUser(t *testing.T) { require := require.New(t) - ks, err := CreateTestKeystore() - require.NoError(err) + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} { @@ -58,7 +58,7 @@ func TestServiceCreateUser(t *testing.T) { func genStr(n int) string { b := make([]byte, n) rand.Read(b) // #nosec G404 - return fmt.Sprintf("%x", b)[:n] + return hex.EncodeToString(b)[:n] } // TestServiceCreateUserArgsCheck generates excessively long usernames or @@ -66,8 +66,7 @@ func genStr(n int) string { func TestServiceCreateUserArgsCheck(t *testing.T) { require := require.New(t) - ks, err := CreateTestKeystore() - require.NoError(err) + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} { @@ -100,8 +99,7 @@ func TestServiceCreateUserArgsCheck(t *testing.T) { func TestServiceCreateUserWeakPassword(t *testing.T) { require := require.New(t) - ks, err := CreateTestKeystore() - require.NoError(err) + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} { @@ -117,8 +115,7 @@ func TestServiceCreateUserWeakPassword(t *testing.T) { func TestServiceCreateDuplicate(t *testing.T) { require := require.New(t) - ks, err := CreateTestKeystore() - require.NoError(err) + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} { @@ -140,12 +137,11 @@ func TestServiceCreateDuplicate(t *testing.T) { func TestServiceCreateUserNoName(t *testing.T) { require := require.New(t) - ks, err := CreateTestKeystore() - require.NoError(err) + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} reply := api.EmptyReply{} - err = s.CreateUser(nil, &api.UserPass{ + err := s.CreateUser(nil, &api.UserPass{ Password: strongPassword, }, &reply) require.ErrorIs(err, errEmptyUsername) @@ -154,8 +150,7 @@ func TestServiceCreateUserNoName(t *testing.T) { func TestServiceUseBlockchainDB(t *testing.T) { require := require.New(t) - ks, err := CreateTestKeystore() - require.NoError(err) + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} { @@ -185,8 +180,7 @@ func TestServiceExportImport(t *testing.T) { encodings := []formatting.Encoding{formatting.Hex} for _, encoding := range encodings { - ks, err := CreateTestKeystore() - require.NoError(err) + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} { @@ -212,8 +206,7 @@ func TestServiceExportImport(t *testing.T) { exportReply := ExportUserReply{} require.NoError(s.ExportUser(nil, &exportArgs, &exportReply)) - newKS, err := CreateTestKeystore() - require.NoError(err) + newKS := New(logging.NoLog{}, memdb.New()) newS := service{ks: newKS.(*keystore)} { @@ -324,8 +317,7 @@ func TestServiceDeleteUser(t *testing.T) { t.Run(tt.desc, func(t *testing.T) { require := require.New(t) - ksIntf, err := CreateTestKeystore() - require.NoError(err) + ksIntf := New(logging.NoLog{}, memdb.New()) ks := ksIntf.(*keystore) s := service{ks: ks} @@ -333,7 +325,7 @@ func TestServiceDeleteUser(t *testing.T) { require.NoError(tt.setup(ks)) } got := &api.EmptyReply{} - err = s.DeleteUser(nil, tt.request, got) + err := s.DeleteUser(nil, tt.request, got) require.ErrorIs(err, tt.expectedErr) if tt.expectedErr != nil { return diff --git a/api/server/metrics.go b/api/server/metrics.go index 6556c3a00763..9859494f3ae4 100644 --- a/api/server/metrics.go +++ b/api/server/metrics.go @@ -9,7 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) type metrics struct { @@ -46,13 +46,12 @@ func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, e ), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( registerer.Register(m.numProcessing), registerer.Register(m.numCalls), registerer.Register(m.totalDuration), ) - return m, errs.Err + return m, err } func (m *metrics) wrapHandler(chainName string, handler http.Handler) http.Handler { diff --git a/app/app.go b/app/app.go index 76cf137034b3..af651235ba77 100644 --- a/app/app.go +++ b/app/app.go @@ -54,11 +54,45 @@ type App interface { ExitCode() (int, error) } -func New(config node.Config) App { - return &app{ - config: config, - node: &node.Node{}, +func New(config node.Config) (App, error) { + // Set the data directory permissions to be read write. + if err := perms.ChmodR(config.DatabaseConfig.Path, true, perms.ReadWriteExecute); err != nil { + return nil, fmt.Errorf("failed to restrict the permissions of the database directory with: %w", err) + } + if err := perms.ChmodR(config.LoggingConfig.Directory, true, perms.ReadWriteExecute); err != nil { + return nil, fmt.Errorf("failed to restrict the permissions of the log directory with: %w", err) + } + + logFactory := logging.NewFactory(config.LoggingConfig) + log, err := logFactory.Make("main") + if err != nil { + logFactory.Close() + return nil, fmt.Errorf("failed to initialize log: %w", err) } + + // update fd limit + fdLimit := config.FdLimit + if err := ulimit.Set(fdLimit, log); err != nil { + log.Fatal("failed to set fd-limit", + zap.Error(err), + ) + logFactory.Close() + return nil, err + } + + n, err := node.New(&config, logFactory, log) + if err != nil { + log.Stop() + logFactory.Close() + return nil, fmt.Errorf("failed to initialize node: %w", err) + } + + return &app{ + config: config, + node: n, + log: log, + logFactory: logFactory, + }, nil } func Run(app App) int { @@ -99,56 +133,33 @@ func Run(app App) int { // app is a wrapper around a node that runs in this process type app struct { - config node.Config - node *node.Node - exitWG sync.WaitGroup + config node.Config + node *node.Node + log logging.Logger + logFactory logging.Factory + exitWG sync.WaitGroup } // Start the business logic of the node (as opposed to config reading, etc). // Does not block until the node is done. Errors returned from this method // are not logged. func (a *app) Start() error { - // Set the data directory permissions to be read write. - if err := perms.ChmodR(a.config.DatabaseConfig.Path, true, perms.ReadWriteExecute); err != nil { - return fmt.Errorf("failed to restrict the permissions of the database directory with: %w", err) - } - if err := perms.ChmodR(a.config.LoggingConfig.Directory, true, perms.ReadWriteExecute); err != nil { - return fmt.Errorf("failed to restrict the permissions of the log directory with: %w", err) - } - - // we want to create the logger after the plugin has started the app - logFactory := logging.NewFactory(a.config.LoggingConfig) - log, err := logFactory.Make("main") - if err != nil { - logFactory.Close() - return err - } - - // update fd limit - fdLimit := a.config.FdLimit - if err := ulimit.Set(fdLimit, log); err != nil { - log.Fatal("failed to set fd-limit", - zap.Error(err), - ) - logFactory.Close() - return err - } - // Track if sybil control is enforced if !a.config.SybilProtectionEnabled { - log.Warn("sybil control is not enforced") + a.log.Warn("sybil control is not enforced") } // TODO move this to config // SupportsNAT() for NoRouter is false. // Which means we tried to perform a NAT activity but we were not successful. if a.config.AttemptedNATTraversal && !a.config.Nat.SupportsNAT() { - log.Warn("UPnP and NAT-PMP router attach failed, you may not be listening publicly. " + + a.log.Warn("UPnP and NAT-PMP router attach failed, " + + "you may not be listening publicly. " + "Please confirm the settings in your router") } if ip := a.config.IPPort.IPPort().IP; ip.IsLoopback() || ip.IsPrivate() { - log.Warn("P2P IP is private, you will not be publicly discoverable", + a.log.Warn("P2P IP is private, you will not be publicly discoverable", zap.Stringer("ip", ip), ) } @@ -159,23 +170,23 @@ func (a *app) Start() error { if !hostIsPublic { ip, err := ips.Lookup(a.config.HTTPHost) if err != nil { - log.Fatal("failed to lookup HTTP host", + a.log.Fatal("failed to lookup HTTP host", zap.String("host", a.config.HTTPHost), zap.Error(err), ) - logFactory.Close() + a.logFactory.Close() return err } hostIsPublic = !ip.IsLoopback() && !ip.IsPrivate() - log.Debug("finished HTTP host lookup", + a.log.Debug("finished HTTP host lookup", zap.String("host", a.config.HTTPHost), zap.Stringer("ip", ip), zap.Bool("isPublic", hostIsPublic), ) } - mapper := nat.NewPortMapper(log, a.config.Nat) + mapper := nat.NewPortMapper(a.log, a.config.Nat) // Open staking port we want for NAT traversal to have the external port // (config.IP.Port) to connect to our internal listening port @@ -192,7 +203,7 @@ func (a *app) Start() error { // Don't open the HTTP port if the HTTP server is private if hostIsPublic { - log.Warn("HTTP server is binding to a potentially public host. "+ + a.log.Warn("HTTP server is binding to a potentially public host. "+ "You may be vulnerable to a DoS attack if your HTTP port is publicly accessible", zap.String("host", a.config.HTTPHost), ) @@ -213,18 +224,7 @@ func (a *app) Start() error { // Regularly update our public IP. // Note that if the node config said to not dynamically resolve and // update our public IP, [p.config.IPUdater] is a no-op implementation. - go a.config.IPUpdater.Dispatch(log) - - if err := a.node.Initialize(&a.config, log, logFactory); err != nil { - log.Fatal("error initializing node", - zap.Error(err), - ) - mapper.UnmapAllPorts() - a.config.IPUpdater.Stop() - log.Stop() - logFactory.Close() - return err - } + go a.config.IPUpdater.Dispatch(a.log) // [p.ExitCode] will block until [p.exitWG.Done] is called a.exitWG.Add(1) @@ -233,8 +233,8 @@ func (a *app) Start() error { if r := recover(); r != nil { fmt.Println("caught panic", r) } - log.Stop() - logFactory.Close() + a.log.Stop() + a.logFactory.Close() a.exitWG.Done() }() defer func() { @@ -244,11 +244,11 @@ func (a *app) Start() error { // If [p.node.Dispatch()] panics, then we should log the panic and // then re-raise the panic. This is why the above defer is broken // into two parts. - log.StopOnPanic() + a.log.StopOnPanic() }() err := a.node.Dispatch() - log.Debug("dispatch returned", + a.log.Debug("dispatch returned", zap.Error(err), ) }() diff --git a/chains/linearizable_vm.go b/chains/linearizable_vm.go index ebe2652cfb1d..f4fc93f7a696 100644 --- a/chains/linearizable_vm.go +++ b/chains/linearizable_vm.go @@ -7,13 +7,12 @@ import ( "context" "github.com/ava-labs/avalanchego/api/metrics" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - - dbManager "github.com/ava-labs/avalanchego/database/manager" ) var ( @@ -32,7 +31,7 @@ type initializeOnLinearizeVM struct { registerer metrics.OptionalGatherer ctx *snow.Context - dbManager dbManager.Manager + db database.Database genesisBytes []byte upgradeBytes []byte configBytes []byte @@ -47,7 +46,7 @@ func (vm *initializeOnLinearizeVM) Linearize(ctx context.Context, stopVertexID i return vm.vmToInitialize.Initialize( ctx, vm.ctx, - vm.dbManager, + vm.db, vm.genesisBytes, vm.upgradeBytes, vm.configBytes, @@ -74,7 +73,7 @@ func NewLinearizeOnInitializeVM(vm vertex.LinearizableVMWithEngine) *linearizeOn func (vm *linearizeOnInitializeVM) Initialize( ctx context.Context, _ *snow.Context, - _ dbManager.Manager, + _ database.Database, _ []byte, _ []byte, _ []byte, diff --git a/chains/manager.go b/chains/manager.go index 2234e82aadcc..3d5792fad843 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -23,6 +23,8 @@ import ( "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/api/server" "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/meterdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" @@ -57,7 +59,6 @@ import ( "github.com/ava-labs/avalanchego/vms/proposervm" "github.com/ava-labs/avalanchego/vms/tracedvm" - dbManager "github.com/ava-labs/avalanchego/database/manager" timetracker "github.com/ava-labs/avalanchego/snow/networking/tracker" aveng "github.com/ava-labs/avalanchego/snow/engine/avalanche" @@ -180,7 +181,7 @@ type ManagerConfig struct { BlockAcceptorGroup snow.AcceptorGroup TxAcceptorGroup snow.AcceptorGroup VertexAcceptorGroup snow.AcceptorGroup - DBManager dbManager.Manager + DB database.Database MsgCreator message.OutboundMsgBuilder // message creator, shared with network Router router.Router // Routes incoming messages to the appropriate chain Net network.Network // Sends consensus messages to other validators @@ -596,18 +597,16 @@ func (m *manager) createAvalancheChain( State: snow.Initializing, }) - meterDBManager, err := m.DBManager.NewMeterDBManager("db", ctx.Registerer) + meterDB, err := meterdb.New("db", ctx.Registerer, m.DB) if err != nil { return nil, err } - prefixDBManager := meterDBManager.NewPrefixDBManager(ctx.ChainID[:]) - vmDBManager := prefixDBManager.NewPrefixDBManager(vmDBPrefix) - - db := prefixDBManager.Current() - vertexDB := prefixdb.New(vertexDBPrefix, db.Database) - vertexBootstrappingDB := prefixdb.New(vertexBootstrappingDBPrefix, db.Database) - txBootstrappingDB := prefixdb.New(txBootstrappingDBPrefix, db.Database) - blockBootstrappingDB := prefixdb.New(blockBootstrappingDBPrefix, db.Database) + prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) + vmDB := prefixdb.New(vmDBPrefix, prefixDB) + vertexDB := prefixdb.New(vertexDBPrefix, prefixDB) + vertexBootstrappingDB := prefixdb.New(vertexBootstrappingDBPrefix, prefixDB) + txBootstrappingDB := prefixdb.New(txBootstrappingDBPrefix, prefixDB) + blockBootstrappingDB := prefixdb.New(blockBootstrappingDBPrefix, prefixDB) vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", ctx.AvalancheRegisterer) if err != nil { @@ -730,7 +729,7 @@ func (m *manager) createAvalancheChain( err = dagVM.Initialize( context.TODO(), ctx.Context, - vmDBManager, + vmDB, genesisData, chainConfig.Upgrade, chainConfig.Config, @@ -796,7 +795,7 @@ func (m *manager) createAvalancheChain( registerer: snowmanRegisterer, ctx: ctx.Context, - dbManager: vmDBManager, + db: vmDB, genesisBytes: genesisData, upgradeBytes: chainConfig.Upgrade, configBytes: chainConfig.Config, @@ -842,23 +841,14 @@ func (m *manager) createAvalancheChain( startupTracker := tracker.NewStartup(connectedBeacons, (3*bootstrapWeight+3)/4) vdrs.RegisterCallbackListener(ctx.SubnetID, startupTracker) - snowmanCommonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: sampleK, - Alpha: bootstrapWeight/2 + 1, // must be > 50% - StartupTracker: startupTracker, - Sender: snowmanMessageSender, - BootstrapTracker: sb, - Timer: h, - RetryBootstrap: m.RetryBootstrap, - RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, - MaxTimeGetAncestors: m.BootstrapMaxTimeGetAncestors, - AncestorsMaxContainersSent: m.BootstrapAncestorsMaxContainersSent, - AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - SharedCfg: &common.SharedConfig{}, - } - snowGetHandler, err := snowgetter.New(vmWrappingProposerVM, snowmanCommonCfg) + snowGetHandler, err := snowgetter.New( + vmWrappingProposerVM, + snowmanMessageSender, + ctx.Log, + m.BootstrapMaxTimeGetAncestors, + m.BootstrapAncestorsMaxContainersSent, + ctx.Registerer, + ) if err != nil { return nil, fmt.Errorf("couldn't initialize snow base message handler: %w", err) } @@ -871,10 +861,10 @@ func (m *manager) createAvalancheChain( // Create engine, bootstrapper and state-syncer in this order, // to make sure start callbacks are duly initialized snowmanEngineConfig := smeng.Config{ - Ctx: snowmanCommonCfg.Ctx, + Ctx: ctx, AllGetsServer: snowGetHandler, VM: vmWrappingProposerVM, - Sender: snowmanCommonCfg.Sender, + Sender: snowmanMessageSender, Validators: vdrs, Params: consensusParams, Consensus: snowmanConsensus, @@ -890,7 +880,20 @@ func (m *manager) createAvalancheChain( // create bootstrap gear bootstrapCfg := smbootstrap.Config{ - Config: snowmanCommonCfg, + Config: common.Config{ + Ctx: ctx, + Beacons: vdrs, + SampleK: sampleK, + Alpha: bootstrapWeight/2 + 1, // must be > 50% + StartupTracker: startupTracker, + Sender: snowmanMessageSender, + BootstrapTracker: sb, + Timer: h, + RetryBootstrap: m.RetryBootstrap, + RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, + AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, + SharedCfg: &common.SharedConfig{}, + }, AllGetsServer: snowGetHandler, Blocked: blockBlocker, VM: vmWrappingProposerVM, @@ -907,24 +910,14 @@ func (m *manager) createAvalancheChain( snowmanBootstrapper = common.TraceBootstrapableEngine(snowmanBootstrapper, m.Tracer) } - avalancheCommonCfg := common.Config{ - Ctx: ctx, - Beacons: vdrs, - SampleK: sampleK, - StartupTracker: startupTracker, - Alpha: bootstrapWeight/2 + 1, // must be > 50% - Sender: avalancheMessageSender, - BootstrapTracker: sb, - Timer: h, - RetryBootstrap: m.RetryBootstrap, - RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, - MaxTimeGetAncestors: m.BootstrapMaxTimeGetAncestors, - AncestorsMaxContainersSent: m.BootstrapAncestorsMaxContainersSent, - AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - SharedCfg: &common.SharedConfig{}, - } - - avaGetHandler, err := avagetter.New(vtxManager, avalancheCommonCfg) + avaGetHandler, err := avagetter.New( + vtxManager, + avalancheMessageSender, + ctx.Log, + m.BootstrapMaxTimeGetAncestors, + m.BootstrapAncestorsMaxContainersSent, + ctx.AvalancheRegisterer, + ) if err != nil { return nil, fmt.Errorf("couldn't initialize avalanche base message handler: %w", err) } @@ -936,16 +929,20 @@ func (m *manager) createAvalancheChain( } // create bootstrap gear - _, specifiedLinearizationTime := version.CortinaTimes[ctx.NetworkID] - specifiedLinearizationTime = specifiedLinearizationTime && ctx.ChainID == m.XChainID avalancheBootstrapperConfig := avbootstrap.Config{ - Config: avalancheCommonCfg, - AllGetsServer: avaGetHandler, - VtxBlocked: vtxBlocker, - TxBlocked: txBlocker, - Manager: vtxManager, - VM: linearizableVM, - LinearizeOnStartup: !specifiedLinearizationTime, + AllGetsServer: avaGetHandler, + Ctx: ctx, + Beacons: vdrs, + StartupTracker: startupTracker, + Sender: avalancheMessageSender, + AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, + VtxBlocked: vtxBlocker, + TxBlocked: txBlocker, + Manager: vtxManager, + VM: linearizableVM, + } + if ctx.ChainID == m.XChainID { + avalancheBootstrapperConfig.StopVertexID = version.CortinaXChainStopVertexID[ctx.NetworkID] } avalancheBootstrapper, err := avbootstrap.New( @@ -1004,15 +1001,13 @@ func (m *manager) createSnowmanChain( State: snow.Initializing, }) - meterDBManager, err := m.DBManager.NewMeterDBManager("db", ctx.Registerer) + meterDB, err := meterdb.New("db", ctx.Registerer, m.DB) if err != nil { return nil, err } - prefixDBManager := meterDBManager.NewPrefixDBManager(ctx.ChainID[:]) - vmDBManager := prefixDBManager.NewPrefixDBManager(vmDBPrefix) - - db := prefixDBManager.Current() - bootstrappingDB := prefixdb.New(bootstrappingDB, db.Database) + prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) + vmDB := prefixdb.New(vmDBPrefix, prefixDB) + bootstrappingDB := prefixdb.New(bootstrappingDB, prefixDB) blocked, err := queue.NewWithMissing(bootstrappingDB, "block", ctx.Registerer) if err != nil { @@ -1145,7 +1140,7 @@ func (m *manager) createSnowmanChain( if err := vm.Initialize( context.TODO(), ctx.Context, - vmDBManager, + vmDB, genesisData, chainConfig.Upgrade, chainConfig.Config, @@ -1193,24 +1188,14 @@ func (m *manager) createSnowmanChain( startupTracker := tracker.NewStartup(connectedBeacons, (3*bootstrapWeight+3)/4) beacons.RegisterCallbackListener(ctx.SubnetID, startupTracker) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: beacons, - SampleK: sampleK, - StartupTracker: startupTracker, - Alpha: bootstrapWeight/2 + 1, // must be > 50% - Sender: messageSender, - BootstrapTracker: sb, - Timer: h, - RetryBootstrap: m.RetryBootstrap, - RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, - MaxTimeGetAncestors: m.BootstrapMaxTimeGetAncestors, - AncestorsMaxContainersSent: m.BootstrapAncestorsMaxContainersSent, - AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - SharedCfg: &common.SharedConfig{}, - } - - snowGetHandler, err := snowgetter.New(vm, commonCfg) + snowGetHandler, err := snowgetter.New( + vm, + messageSender, + ctx.Log, + m.BootstrapMaxTimeGetAncestors, + m.BootstrapAncestorsMaxContainersSent, + ctx.Registerer, + ) if err != nil { return nil, fmt.Errorf("couldn't initialize snow base message handler: %w", err) } @@ -1223,14 +1208,14 @@ func (m *manager) createSnowmanChain( // Create engine, bootstrapper and state-syncer in this order, // to make sure start callbacks are duly initialized engineConfig := smeng.Config{ - Ctx: commonCfg.Ctx, + Ctx: ctx, AllGetsServer: snowGetHandler, VM: vm, - Sender: commonCfg.Sender, + Sender: messageSender, Validators: vdrs, Params: consensusParams, Consensus: consensus, - PartialSync: m.PartialSyncPrimaryNetwork && commonCfg.Ctx.ChainID == constants.PlatformChainID, + PartialSync: m.PartialSyncPrimaryNetwork && ctx.ChainID == constants.PlatformChainID, } engine, err := smeng.New(engineConfig) if err != nil { @@ -1241,6 +1226,21 @@ func (m *manager) createSnowmanChain( engine = smeng.TraceEngine(engine, m.Tracer) } + commonCfg := common.Config{ + Ctx: ctx, + Beacons: beacons, + SampleK: sampleK, + StartupTracker: startupTracker, + Alpha: bootstrapWeight/2 + 1, // must be > 50% + Sender: messageSender, + BootstrapTracker: sb, + Timer: h, + RetryBootstrap: m.RetryBootstrap, + RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, + AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, + SharedCfg: &common.SharedConfig{}, + } + // create bootstrap gear bootstrapCfg := smbootstrap.Config{ Config: commonCfg, diff --git a/codec/reflectcodec/struct_fielder.go b/codec/reflectcodec/struct_fielder.go index 27edb58a1793..d266b60a3ebf 100644 --- a/codec/reflectcodec/struct_fielder.go +++ b/codec/reflectcodec/struct_fielder.go @@ -18,6 +18,10 @@ const ( // TagValue is the value the tag must have to be serialized. TagValue = "true" + + // TagValue is the value the tag must have to be serialized, this variant + // includes the nullable option + TagWithNullableValue = "true,nullable" ) var _ StructFielder = (*structFielder)(nil) @@ -25,6 +29,7 @@ var _ StructFielder = (*structFielder)(nil) type FieldDesc struct { Index int MaxSliceLen uint32 + Nullable bool } // StructFielder handles discovery of serializable fields in a struct. @@ -82,10 +87,19 @@ func (s *structFielder) GetSerializedFields(t reflect.Type) ([]FieldDesc, error) // Multiple tags per fields can be specified. // Serialize/Deserialize field if it has // any tag with the right value - captureField := false + var ( + captureField bool + nullable bool + ) for _, tag := range s.tags { - if field.Tag.Get(tag) == TagValue { + switch field.Tag.Get(tag) { + case TagValue: + captureField = true + case TagWithNullableValue: captureField = true + nullable = true + } + if captureField { break } } @@ -107,6 +121,7 @@ func (s *structFielder) GetSerializedFields(t reflect.Type) ([]FieldDesc, error) serializedFields = append(serializedFields, FieldDesc{ Index: i, MaxSliceLen: maxSliceLen, + Nullable: nullable, }) } s.serializedFieldIndices[t] = serializedFields // cache result diff --git a/codec/reflectcodec/type_codec.go b/codec/reflectcodec/type_codec.go index ac9ca25c16e7..9f9037f43d4e 100644 --- a/codec/reflectcodec/type_codec.go +++ b/codec/reflectcodec/type_codec.go @@ -13,18 +13,23 @@ import ( "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" ) -// DefaultTagName that enables serialization. -const DefaultTagName = "serialize" +const ( + // DefaultTagName that enables serialization. + DefaultTagName = "serialize" + initialSliceLen = 16 +) var ( _ codec.Codec = (*genericCodec)(nil) - errMarshalNil = errors.New("can't marshal nil pointer or interface") - errUnmarshalNil = errors.New("can't unmarshal nil") - errNeedPointer = errors.New("argument to unmarshal must be a pointer") + errMarshalNil = errors.New("can't marshal nil pointer or interface") + errUnmarshalNil = errors.New("can't unmarshal nil") + errNeedPointer = errors.New("argument to unmarshal must be a pointer") + errRecursiveInterfaceTypes = errors.New("recursive interface types") ) type TypeCodec interface { @@ -85,12 +90,18 @@ func (c *genericCodec) Size(value interface{}) (int, error) { return 0, errMarshalNil // can't marshal nil } - size, _, err := c.size(reflect.ValueOf(value)) + size, _, err := c.size(reflect.ValueOf(value), false /*=nullable*/, nil /*=typeStack*/) return size, err } -// size returns the size of the value along with whether the value is constant sized. -func (c *genericCodec) size(value reflect.Value) (int, bool, error) { +// size returns the size of the value along with whether the value is constant +// sized. This function takes into account a `nullable` property which allows +// pointers and interfaces to serialize nil values +func (c *genericCodec) size( + value reflect.Value, + nullable bool, + typeStack set.Set[reflect.Type], +) (int, bool, error) { switch valueKind := value.Kind(); valueKind { case reflect.Uint8: return wrappers.ByteLen, true, nil @@ -114,24 +125,41 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { return wrappers.StringLen(value.String()), false, nil case reflect.Ptr: if value.IsNil() { - // Can't marshal nil pointers (but nil slices are fine) - return 0, false, errMarshalNil + if !nullable { + return 0, false, errMarshalNil + } + return wrappers.BoolLen, false, nil + } + + size, constSize, err := c.size(value.Elem(), false /*=nullable*/, typeStack) + if nullable { + return wrappers.BoolLen + size, false, err } - return c.size(value.Elem()) + return size, constSize, err case reflect.Interface: if value.IsNil() { - // Can't marshal nil interfaces (but nil slices are fine) - return 0, false, errMarshalNil + if !nullable { + return 0, false, errMarshalNil + } + return wrappers.BoolLen, false, nil } + underlyingValue := value.Interface() underlyingType := reflect.TypeOf(underlyingValue) + if typeStack.Contains(underlyingType) { + return 0, false, fmt.Errorf("%w: %s", errRecursiveInterfaceTypes, underlyingType) + } + typeStack.Add(underlyingType) + prefixSize := c.typer.PrefixSize(underlyingType) - valueSize, _, err := c.size(value.Elem()) - if err != nil { - return 0, false, err + valueSize, _, err := c.size(value.Elem(), false /*=nullable*/, typeStack) + + typeStack.Remove(underlyingType) + if nullable { + return wrappers.BoolLen + prefixSize + valueSize, false, err } - return prefixSize + valueSize, false, nil + return prefixSize + valueSize, false, err case reflect.Slice: numElts := value.Len() @@ -139,7 +167,7 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { return wrappers.IntLen, false, nil } - size, constSize, err := c.size(value.Index(0)) + size, constSize, err := c.size(value.Index(0), nullable, typeStack) if err != nil { return 0, false, err } @@ -151,7 +179,7 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { } for i := 1; i < numElts; i++ { - innerSize, _, err := c.size(value.Index(i)) + innerSize, _, err := c.size(value.Index(i), nullable, typeStack) if err != nil { return 0, false, err } @@ -165,7 +193,7 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { return 0, true, nil } - size, constSize, err := c.size(value.Index(0)) + size, constSize, err := c.size(value.Index(0), nullable, typeStack) if err != nil { return 0, false, err } @@ -177,7 +205,7 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { } for i := 1; i < numElts; i++ { - innerSize, _, err := c.size(value.Index(i)) + innerSize, _, err := c.size(value.Index(i), nullable, typeStack) if err != nil { return 0, false, err } @@ -196,7 +224,7 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { constSize = true ) for _, fieldDesc := range serializedFields { - innerSize, innerConstSize, err := c.size(value.Field(fieldDesc.Index)) + innerSize, innerConstSize, err := c.size(value.Field(fieldDesc.Index), fieldDesc.Nullable, typeStack) if err != nil { return 0, false, err } @@ -211,11 +239,11 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { return wrappers.IntLen, false, nil } - keySize, keyConstSize, err := c.size(iter.Key()) + keySize, keyConstSize, err := c.size(iter.Key(), false /*=nullable*/, typeStack) if err != nil { return 0, false, err } - valueSize, valueConstSize, err := c.size(iter.Value()) + valueSize, valueConstSize, err := c.size(iter.Value(), nullable, typeStack) if err != nil { return 0, false, err } @@ -230,7 +258,7 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { totalValueSize = valueSize ) for iter.Next() { - valueSize, _, err := c.size(iter.Value()) + valueSize, _, err := c.size(iter.Value(), nullable, typeStack) if err != nil { return 0, false, err } @@ -244,7 +272,7 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { totalKeySize = keySize ) for iter.Next() { - keySize, _, err := c.size(iter.Key()) + keySize, _, err := c.size(iter.Key(), false /*=nullable*/, typeStack) if err != nil { return 0, false, err } @@ -255,11 +283,11 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { default: totalSize := wrappers.IntLen + keySize + valueSize for iter.Next() { - keySize, _, err := c.size(iter.Key()) + keySize, _, err := c.size(iter.Key(), false /*=nullable*/, typeStack) if err != nil { return 0, false, err } - valueSize, _, err := c.size(iter.Value()) + valueSize, _, err := c.size(iter.Value(), nullable, typeStack) if err != nil { return 0, false, err } @@ -279,13 +307,19 @@ func (c *genericCodec) MarshalInto(value interface{}, p *wrappers.Packer) error return errMarshalNil // can't marshal nil } - return c.marshal(reflect.ValueOf(value), p, c.maxSliceLen) + return c.marshal(reflect.ValueOf(value), p, c.maxSliceLen, false /*=nullable*/, nil /*=typeStack*/) } // marshal writes the byte representation of [value] to [p] -// [value]'s underlying value must not be a nil pointer or interface +// // c.lock should be held for the duration of this function -func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSliceLen uint32) error { +func (c *genericCodec) marshal( + value reflect.Value, + p *wrappers.Packer, + maxSliceLen uint32, + nullable bool, + typeStack set.Set[reflect.Type], +) error { switch valueKind := value.Kind(); valueKind { case reflect.Uint8: p.PackByte(uint8(value.Uint())) @@ -318,22 +352,41 @@ func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSlice p.PackBool(value.Bool()) return p.Err case reflect.Ptr: - if value.IsNil() { // Can't marshal nil (except nil slices) + isNil := value.IsNil() + if nullable { + p.PackBool(isNil) + if isNil || p.Err != nil { + return p.Err + } + } else if isNil { return errMarshalNil } - return c.marshal(value.Elem(), p, c.maxSliceLen) + + return c.marshal(value.Elem(), p, c.maxSliceLen, false /*=nullable*/, typeStack) case reflect.Interface: - if value.IsNil() { // Can't marshal nil (except nil slices) + isNil := value.IsNil() + if nullable { + p.PackBool(isNil) + if isNil || p.Err != nil { + return p.Err + } + } else if isNil { return errMarshalNil } + underlyingValue := value.Interface() underlyingType := reflect.TypeOf(underlyingValue) + if typeStack.Contains(underlyingType) { + return fmt.Errorf("%w: %s", errRecursiveInterfaceTypes, underlyingType) + } + typeStack.Add(underlyingType) if err := c.typer.PackPrefix(p, underlyingType); err != nil { return err } - if err := c.marshal(value.Elem(), p, c.maxSliceLen); err != nil { + if err := c.marshal(value.Elem(), p, c.maxSliceLen, false /*=nullable*/, typeStack); err != nil { return err } + typeStack.Remove(underlyingType) return p.Err case reflect.Slice: numElts := value.Len() // # elements in the slice/array. 0 if this slice is nil. @@ -361,7 +414,7 @@ func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSlice return p.Err } for i := 0; i < numElts; i++ { // Process each element in the slice - if err := c.marshal(value.Index(i), p, c.maxSliceLen); err != nil { + if err := c.marshal(value.Index(i), p, c.maxSliceLen, nullable, typeStack); err != nil { return err } } @@ -381,7 +434,7 @@ func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSlice ) } for i := 0; i < numElts; i++ { // Process each element in the array - if err := c.marshal(value.Index(i), p, c.maxSliceLen); err != nil { + if err := c.marshal(value.Index(i), p, c.maxSliceLen, nullable, typeStack); err != nil { return err } } @@ -392,7 +445,7 @@ func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSlice return err } for _, fieldDesc := range serializedFields { // Go through all fields of this struct that are serialized - if err := c.marshal(value.Field(fieldDesc.Index), p, fieldDesc.MaxSliceLen); err != nil { // Serialize the field and write to byte array + if err := c.marshal(value.Field(fieldDesc.Index), p, fieldDesc.MaxSliceLen, fieldDesc.Nullable, typeStack); err != nil { // Serialize the field and write to byte array return err } } @@ -423,7 +476,7 @@ func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSlice startOffset := p.Offset endOffset := p.Offset for i, key := range keys { - if err := c.marshal(key, p, c.maxSliceLen); err != nil { + if err := c.marshal(key, p, c.maxSliceLen, false /*=nullable*/, typeStack); err != nil { return err } if p.Err != nil { @@ -456,7 +509,7 @@ func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSlice } // serialize and pack value - if err := c.marshal(value.MapIndex(key.key), p, c.maxSliceLen); err != nil { + if err := c.marshal(value.MapIndex(key.key), p, c.maxSliceLen, nullable, typeStack); err != nil { return err } } @@ -467,8 +520,8 @@ func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSlice } } -// Unmarshal unmarshals [bytes] into [dest], where -// [dest] must be a pointer or interface +// Unmarshal unmarshals [bytes] into [dest], where [dest] must be a pointer or +// interface func (c *genericCodec) Unmarshal(bytes []byte, dest interface{}) error { if dest == nil { return errUnmarshalNil @@ -481,7 +534,7 @@ func (c *genericCodec) Unmarshal(bytes []byte, dest interface{}) error { if destPtr.Kind() != reflect.Ptr { return errNeedPointer } - if err := c.unmarshal(&p, destPtr.Elem(), c.maxSliceLen); err != nil { + if err := c.unmarshal(&p, destPtr.Elem(), c.maxSliceLen, false /*=nullable*/, nil /*=typeStack*/); err != nil { return err } if p.Offset != len(bytes) { @@ -495,8 +548,19 @@ func (c *genericCodec) Unmarshal(bytes []byte, dest interface{}) error { } // Unmarshal from p.Bytes into [value]. [value] must be addressable. +// +// The [nullable] property affects how pointers and interfaces are unmarshalled, +// as an extra byte would be used to unmarshal nil values for pointers and +// interaces +// // c.lock should be held for the duration of this function -func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSliceLen uint32) error { +func (c *genericCodec) unmarshal( + p *wrappers.Packer, + value reflect.Value, + maxSliceLen uint32, + nullable bool, + typeStack set.Set[reflect.Type], +) error { switch value.Kind() { case reflect.Uint8: value.SetUint(uint64(p.UnpackByte())) @@ -573,18 +637,22 @@ func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSli } numElts := int(numElts32) + sliceType := value.Type() + innerType := sliceType.Elem() + // If this is a slice of bytes, manually unpack the bytes rather // than calling unmarshal on each byte. This improves performance. - if elemKind := value.Type().Elem().Kind(); elemKind == reflect.Uint8 { + if elemKind := innerType.Kind(); elemKind == reflect.Uint8 { value.SetBytes(p.UnpackFixedBytes(numElts)) return p.Err } - // set [value] to be a slice of the appropriate type/capacity (right now it is nil) - value.Set(reflect.MakeSlice(value.Type(), numElts, numElts)) - // Unmarshal each element into the appropriate index of the slice + // Unmarshal each element and append it into the slice. + value.Set(reflect.MakeSlice(sliceType, 0, initialSliceLen)) + zeroValue := reflect.Zero(innerType) for i := 0; i < numElts; i++ { - if err := c.unmarshal(p, value.Index(i), c.maxSliceLen); err != nil { - return fmt.Errorf("couldn't unmarshal slice element: %w", err) + value.Set(reflect.Append(value, zeroValue)) + if err := c.unmarshal(p, value.Index(i), c.maxSliceLen, nullable, typeStack); err != nil { + return err } } return nil @@ -601,8 +669,8 @@ func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSli return nil } for i := 0; i < numElts; i++ { - if err := c.unmarshal(p, value.Index(i), c.maxSliceLen); err != nil { - return fmt.Errorf("couldn't unmarshal array element: %w", err) + if err := c.unmarshal(p, value.Index(i), c.maxSliceLen, nullable, typeStack); err != nil { + return err } } return nil @@ -613,15 +681,29 @@ func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSli } return nil case reflect.Interface: + if nullable { + isNil := p.UnpackBool() + if isNil || p.Err != nil { + return p.Err + } + } + intfImplementor, err := c.typer.UnpackPrefix(p, value.Type()) if err != nil { return err } + intfImplementorType := intfImplementor.Type() + if typeStack.Contains(intfImplementorType) { + return fmt.Errorf("%w: %s", errRecursiveInterfaceTypes, intfImplementorType) + } + typeStack.Add(intfImplementorType) + // Unmarshal into the struct - if err := c.unmarshal(p, intfImplementor, c.maxSliceLen); err != nil { - return fmt.Errorf("couldn't unmarshal interface: %w", err) + if err := c.unmarshal(p, intfImplementor, c.maxSliceLen, false /*=nullable*/, typeStack); err != nil { + return err } - // And assign the filled struct to the value + + typeStack.Remove(intfImplementorType) value.Set(intfImplementor) return nil case reflect.Struct: @@ -632,19 +714,26 @@ func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSli } // Go through the fields and umarshal into them for _, fieldDesc := range serializedFieldIndices { - if err := c.unmarshal(p, value.Field(fieldDesc.Index), fieldDesc.MaxSliceLen); err != nil { - return fmt.Errorf("couldn't unmarshal struct: %w", err) + if err := c.unmarshal(p, value.Field(fieldDesc.Index), fieldDesc.MaxSliceLen, fieldDesc.Nullable, typeStack); err != nil { + return err } } return nil case reflect.Ptr: + if nullable { + isNil := p.UnpackBool() + if isNil || p.Err != nil { + return p.Err + } + } + // Get the type this pointer points to t := value.Type().Elem() // Create a new pointer to a new value of the underlying type v := reflect.New(t) // Fill the value - if err := c.unmarshal(p, v.Elem(), c.maxSliceLen); err != nil { - return fmt.Errorf("couldn't unmarshal pointer: %w", err) + if err := c.unmarshal(p, v.Elem(), c.maxSliceLen, false /*=nullable*/, typeStack); err != nil { + return err } // Assign to the top-level struct's member value.Set(v) @@ -671,15 +760,15 @@ func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSli ) // Set [value] to be a new map of the appropriate type. - value.Set(reflect.MakeMapWithSize(mapType, numElts)) + value.Set(reflect.MakeMap(mapType)) for i := 0; i < numElts; i++ { mapKey := reflect.New(mapKeyType).Elem() keyStartOffset := p.Offset - if err := c.unmarshal(p, mapKey, c.maxSliceLen); err != nil { - return fmt.Errorf("couldn't unmarshal map key (%s): %w", mapKeyType, err) + if err := c.unmarshal(p, mapKey, c.maxSliceLen, false /*=nullable*/, typeStack); err != nil { + return err } // Get the key's byte representation and check that the new key is @@ -696,8 +785,8 @@ func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSli // Get the value mapValue := reflect.New(mapValueType).Elem() - if err := c.unmarshal(p, mapValue, c.maxSliceLen); err != nil { - return fmt.Errorf("couldn't unmarshal map value for key %s: %w", mapKey, err) + if err := c.unmarshal(p, mapValue, c.maxSliceLen, nullable, typeStack); err != nil { + return err } // Assign the key-value pair in the map diff --git a/codec/reflectcodec/type_codec_test.go b/codec/reflectcodec/type_codec_test.go new file mode 100644 index 000000000000..42b256c4a6c9 --- /dev/null +++ b/codec/reflectcodec/type_codec_test.go @@ -0,0 +1,30 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package reflectcodec + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSizeWithNil(t *testing.T) { + require := require.New(t) + var x *int32 + y := int32(1) + c := genericCodec{} + _, _, err := c.size(reflect.ValueOf(x), false /*=nullable*/, nil /*=typeStack*/) + require.ErrorIs(err, errMarshalNil) + len, _, err := c.size(reflect.ValueOf(x), true /*=nullable*/, nil /*=typeStack*/) + require.Empty(err) + require.Equal(1, len) + x = &y + len, _, err = c.size(reflect.ValueOf(y), true /*=nullable*/, nil /*=typeStack*/) + require.Empty(err) + require.Equal(4, len) + len, _, err = c.size(reflect.ValueOf(x), true /*=nullable*/, nil /*=typeStack*/) + require.Empty(err) + require.Equal(5, len) +} diff --git a/codec/test_codec.go b/codec/test_codec.go index 7177e08e81b1..341912a823af 100644 --- a/codec/test_codec.go +++ b/codec/test_codec.go @@ -23,6 +23,7 @@ var ( TestBigArray, TestPointerToStruct, TestSliceOfStruct, + TestStructWithNullable, TestInterface, TestSliceOfInterface, TestArrayOfInterface, @@ -63,7 +64,8 @@ type Foo interface { } type MyInnerStruct struct { - Str string `serialize:"true"` + Str string `serialize:"true"` + NumberNotProvided *int32 `serialize:"true,nullable"` } func (*MyInnerStruct) Foo() int { @@ -86,6 +88,15 @@ type MyInnerStruct3 struct { F Foo `serialize:"true"` } +type MyStructWithNullable struct { + Interface any `serialize:"true,nullable"` + Int32 *int32 `serialize:"true,nullable"` + Int64 *int64 `serialize:"true,nullable"` + Int32Slice []*int32 `serialize:"true,nullable"` + Int32Array [2]*int32 `serialize:"true,nullable"` + Int32Map map[int32]*int32 `serialize:"true,nullable"` +} + type myStruct struct { InnerStruct MyInnerStruct `serialize:"true"` InnerStruct2 *MyInnerStruct `serialize:"true"` @@ -145,21 +156,23 @@ func TestStruct(codec GeneralCodec, t testing.TB) { myMap7["key"] = "value" myMap7[int32(1)] = int32(2) + number := int32(8) + myStructInstance := myStruct{ - InnerStruct: MyInnerStruct{"hello"}, - InnerStruct2: &MyInnerStruct{"yello"}, + InnerStruct: MyInnerStruct{"hello", nil}, + InnerStruct2: &MyInnerStruct{"yello", nil}, Member1: 1, Member2: 2, MySlice: []byte{1, 2, 3, 4}, MySlice2: []string{"one", "two", "three"}, - MySlice3: []MyInnerStruct{{"abc"}, {"ab"}, {"c"}}, + MySlice3: []MyInnerStruct{{"abc", nil}, {"ab", &number}, {"c", nil}}, MySlice4: []*MyInnerStruct2{{true}, {}}, MySlice5: []Foo{&MyInnerStruct2{true}, &MyInnerStruct2{}}, MyArray: [4]byte{5, 6, 7, 8}, MyArray2: [5]string{"four", "five", "six", "seven"}, - MyArray3: [3]MyInnerStruct{{"d"}, {"e"}, {"f"}}, + MyArray3: [3]MyInnerStruct{{"d", nil}, {"e", nil}, {"f", nil}}, MyArray4: [2]*MyInnerStruct2{{}, {true}}, - MyInterface: &MyInnerStruct{"yeet"}, + MyInterface: &MyInnerStruct{"yeet", &number}, InnerStruct3: MyInnerStruct3{ Str: "str", M1: MyInnerStruct{ @@ -414,20 +427,79 @@ func TestPointerToStruct(codec GeneralCodec, t testing.TB) { require.Equal(myPtr, myPtrUnmarshaled) } +func TestStructWithNullable(codec GeneralCodec, t testing.TB) { + require := require.New(t) + n1 := int32(5) + n2 := int64(10) + struct1 := MyStructWithNullable{ + Interface: nil, + Int32: &n1, + Int64: &n2, + Int32Slice: []*int32{ + nil, + nil, + &n1, + }, + Int32Array: [2]*int32{ + nil, + &n1, + }, + Int32Map: map[int32]*int32{ + 1: nil, + 2: &n1, + }, + } + + require.NoError(codec.RegisterType(&MyStructWithNullable{})) + manager := NewDefaultManager() + require.NoError(manager.RegisterCodec(0, codec)) + + bytes, err := manager.Marshal(0, struct1) + require.NoError(err) + + bytesLen, err := manager.Size(0, struct1) + require.NoError(err) + require.Len(bytes, bytesLen) + + var struct1Unmarshaled MyStructWithNullable + version, err := manager.Unmarshal(bytes, &struct1Unmarshaled) + require.NoError(err) + require.Zero(version) + require.Equal(struct1, struct1Unmarshaled) + + struct1 = MyStructWithNullable{ + Int32Slice: []*int32{}, + Int32Map: map[int32]*int32{}, + } + bytes, err = manager.Marshal(0, struct1) + require.NoError(err) + + bytesLen, err = manager.Size(0, struct1) + require.NoError(err) + require.Len(bytes, bytesLen) + + var struct1Unmarshaled2 MyStructWithNullable + version, err = manager.Unmarshal(bytes, &struct1Unmarshaled2) + require.NoError(err) + require.Zero(version) + require.Equal(struct1, struct1Unmarshaled2) +} + // Test marshalling a slice of structs func TestSliceOfStruct(codec GeneralCodec, t testing.TB) { require := require.New(t) - + n1 := int32(-1) + n2 := int32(0xff) mySlice := []MyInnerStruct3{ { Str: "One", - M1: MyInnerStruct{"Two"}, - F: &MyInnerStruct{"Three"}, + M1: MyInnerStruct{"Two", &n1}, + F: &MyInnerStruct{"Three", &n2}, }, { Str: "Four", - M1: MyInnerStruct{"Five"}, - F: &MyInnerStruct{"Six"}, + M1: MyInnerStruct{"Five", nil}, + F: &MyInnerStruct{"Six", nil}, }, } require.NoError(codec.RegisterType(&MyInnerStruct{})) diff --git a/config/config.go b/config/config.go index 1ecbe33f33f8..1b0eff0f96a4 100644 --- a/config/config.go +++ b/config/config.go @@ -928,7 +928,8 @@ func getDatabaseConfig(v *viper.Viper, networkID uint32) (node.DatabaseConfig, e } return node.DatabaseConfig{ - Name: v.GetString(DBTypeKey), + Name: v.GetString(DBTypeKey), + ReadOnly: v.GetBool(DBReadOnlyKey), Path: filepath.Join( GetExpandedArg(v, DBPathKey), constants.NetworkName(networkID), diff --git a/config/flags.go b/config/flags.go index 254d70fea2dd..edece3ada4a3 100644 --- a/config/flags.go +++ b/config/flags.go @@ -15,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/database/leveldb" "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/pebble" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/trace" @@ -103,7 +104,8 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Uint64(AddSubnetDelegatorFeeKey, genesis.LocalParams.AddSubnetDelegatorFee, "Transaction fee, in nAVAX, for transactions that add new subnet delegators") // Database - fs.String(DBTypeKey, leveldb.Name, fmt.Sprintf("Database type to use. Should be one of {%s, %s}", leveldb.Name, memdb.Name)) + fs.String(DBTypeKey, leveldb.Name, fmt.Sprintf("Database type to use. Must be one of {%s, %s, %s}", leveldb.Name, memdb.Name, pebble.Name)) + fs.Bool(DBReadOnlyKey, false, "If true, database writes are to memory and never persisted. May still initialize database directory/files on disk if they don't exist") fs.String(DBPathKey, defaultDBDir, "Path to database directory") fs.String(DBConfigFileKey, "", fmt.Sprintf("Path to database config file. Ignored if %s is specified", DBConfigContentKey)) fs.String(DBConfigContentKey, "", "Specifies base64 encoded database config content") @@ -204,8 +206,8 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Uint64(OutboundThrottlerNodeMaxAtLargeBytesKey, constants.DefaultOutboundThrottlerNodeMaxAtLargeBytes, "Max number of bytes a node can take from the outbound message throttler's at-large allocation. Must be at least the max message size") // HTTP APIs - fs.String(HTTPHostKey, "127.0.0.1", "Address of the HTTP server") - fs.Uint(HTTPPortKey, DefaultHTTPPort, "Port of the HTTP server") + fs.String(HTTPHostKey, "127.0.0.1", "Address of the HTTP server. If the address is empty or a literal unspecified IP address, the server will bind on all available unicast and anycast IP addresses of the local system") + fs.Uint(HTTPPortKey, DefaultHTTPPort, "Port of the HTTP server. If the port is 0 a port number is automatically chosen") fs.Bool(HTTPSEnabledKey, false, "Upgrade the HTTP server to HTTPs") fs.String(HTTPSKeyFileKey, "", fmt.Sprintf("TLS private key file for the HTTPs server. Ignored if %s is specified", HTTPSKeyContentKey)) fs.String(HTTPSKeyContentKey, "", "Specifies base64 encoded TLS private key for the HTTPs server") @@ -248,8 +250,8 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Duration(NetworkHealthMaxOutstandingDurationKey, 5*time.Minute, "Node reports unhealthy if there has been a request outstanding for this duration") // Staking - fs.String(StakingHostKey, "", "Address of the consensus server") // Bind to all interfaces by default. - fs.Uint(StakingPortKey, DefaultStakingPort, "Port of the consensus server") + fs.String(StakingHostKey, "", "Address of the consensus server. If the address is empty or a literal unspecified IP address, the server will bind on all available unicast and anycast IP addresses of the local system") // Bind to all interfaces by default. + fs.Uint(StakingPortKey, DefaultStakingPort, "Port of the consensus server. If the port is 0 a port number is automatically chosen") fs.Bool(StakingEphemeralCertEnabledKey, false, "If true, the node uses an ephemeral staking TLS key and certificate, and has an ephemeral node ID") fs.String(StakingTLSKeyPathKey, defaultStakingTLSKeyPath, fmt.Sprintf("Path to the TLS private key for staking. Ignored if %s is specified", StakingTLSKeyContentKey)) fs.String(StakingTLSKeyContentKey, "", "Specifies base64 encoded TLS private key for staking") diff --git a/config/keys.go b/config/keys.go index a62e960f16bb..7263cf0a6bdd 100644 --- a/config/keys.go +++ b/config/keys.go @@ -34,6 +34,7 @@ const ( StakeMintingPeriodKey = "stake-minting-period" StakeSupplyCapKey = "stake-supply-cap" DBTypeKey = "db-type" + DBReadOnlyKey = "db-read-only" DBPathKey = "db-dir" DBConfigFileKey = "db-config-file" DBConfigContentKey = "db-config-file-content" diff --git a/database/corruptabledb/db_test.go b/database/corruptabledb/db_test.go index d4c14f782986..b58c65b4b162 100644 --- a/database/corruptabledb/db_test.go +++ b/database/corruptabledb/db_test.go @@ -26,16 +26,21 @@ func TestInterface(t *testing.T) { } } -func FuzzKeyValue(f *testing.F) { +func newDB() *Database { baseDB := memdb.New() - db := New(baseDB) - database.FuzzKeyValue(f, db) + return New(baseDB) +} + +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, newDB()) } func FuzzNewIteratorWithPrefix(f *testing.F) { - baseDB := memdb.New() - db := New(baseDB) - database.FuzzNewIteratorWithPrefix(f, db) + database.FuzzNewIteratorWithPrefix(f, newDB()) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, newDB()) } // TestCorruption tests to make sure corruptabledb wrapper works as expected. @@ -70,9 +75,7 @@ func TestCorruption(t *testing.T) { return err }, } - baseDB := memdb.New() - // wrap this db - corruptableDB := New(baseDB) + corruptableDB := newDB() _ = corruptableDB.handleError(errTest) for name, testFn := range tests { t.Run(name, func(tt *testing.T) { @@ -176,9 +179,7 @@ func TestIterator(t *testing.T) { ctrl := gomock.NewController(t) // Make a database - baseDB := memdb.New() - corruptableDB := New(baseDB) - + corruptableDB := newDB() // Put a key-value pair in the database. require.NoError(corruptableDB.Put([]byte{0}, []byte{1})) diff --git a/database/encdb/db_test.go b/database/encdb/db_test.go index 177259f5c7f2..f49dd7ebb28d 100644 --- a/database/encdb/db_test.go +++ b/database/encdb/db_test.go @@ -24,28 +24,30 @@ func TestInterface(t *testing.T) { } } -func FuzzKeyValue(f *testing.F) { +func newDB(t testing.TB) database.Database { unencryptedDB := memdb.New() db, err := New([]byte(testPassword), unencryptedDB) - require.NoError(f, err) - database.FuzzKeyValue(f, db) + require.NoError(t, err) + return db +} + +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, newDB(f)) } func FuzzNewIteratorWithPrefix(f *testing.F) { - unencryptedDB := memdb.New() - db, err := New([]byte(testPassword), unencryptedDB) - require.NoError(f, err) - database.FuzzNewIteratorWithPrefix(f, db) + database.FuzzNewIteratorWithPrefix(f, newDB(f)) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, newDB(f)) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) for _, bench := range database.Benchmarks { - unencryptedDB := memdb.New() - db, err := New([]byte(testPassword), unencryptedDB) - require.NoError(b, err) - bench(b, db, "encdb", keys, values) + bench(b, newDB(b), "encdb", keys, values) } } } diff --git a/database/leveldb/db_test.go b/database/leveldb/db_test.go index bf6bdeac7f27..23733dacaff4 100644 --- a/database/leveldb/db_test.go +++ b/database/leveldb/db_test.go @@ -26,34 +26,39 @@ func TestInterface(t *testing.T) { } } -func FuzzKeyValue(f *testing.F) { - folder := f.TempDir() +func newDB(t testing.TB) database.Database { + folder := t.TempDir() db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(f, err) + require.NoError(t, err) + return db +} +func FuzzKeyValue(f *testing.F) { + db := newDB(f) defer db.Close() database.FuzzKeyValue(f, db) } func FuzzNewIteratorWithPrefix(f *testing.F) { - folder := f.TempDir() - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(f, err) - + db := newDB(f) defer db.Close() database.FuzzNewIteratorWithPrefix(f, db) } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + db := newDB(f) + defer db.Close() + + database.FuzzNewIteratorWithStartAndPrefix(f, db) +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) for _, bench := range database.Benchmarks { - folder := b.TempDir() - - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(b, err) + db := newDB(b) bench(b, db, "leveldb", keys, values) diff --git a/database/leveldb/metrics.go b/database/leveldb/metrics.go index 8b2971a374c9..11bca8ddb07e 100644 --- a/database/leveldb/metrics.go +++ b/database/leveldb/metrics.go @@ -10,7 +10,7 @@ import ( "github.com/syndtr/goleveldb/leveldb" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) var levelLabels = []string{"level"} @@ -180,8 +180,7 @@ func newMetrics(namespace string, reg prometheus.Registerer) (metrics, error) { currentStats: &leveldb.DBStats{}, } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( reg.Register(m.writesDelayedCount), reg.Register(m.writesDelayedDuration), reg.Register(m.writeIsDelayed), @@ -206,7 +205,7 @@ func newMetrics(namespace string, reg prometheus.Registerer) (metrics, error) { reg.Register(m.nonLevel0Compactions), reg.Register(m.seekCompactions), ) - return m, errs.Err + return m, err } func (db *Database) updateMetrics() error { diff --git a/database/manager/manager.go b/database/manager/manager.go deleted file mode 100644 index fd9c36969b79..000000000000 --- a/database/manager/manager.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package manager - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/corruptabledb" - "github.com/ava-labs/avalanchego/database/leveldb" - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/meterdb" - "github.com/ava-labs/avalanchego/database/prefixdb" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" -) - -var ( - errNonSortedAndUniqueDBs = errors.New("managed databases were not sorted and unique") - errNoDBs = errors.New("no dbs given") -) - -var _ Manager = (*manager)(nil) - -type Manager interface { - // Current returns the database with the current database version. - Current() *VersionedDatabase - - // Previous returns the database prior to the current database and true if a - // previous database exists. - Previous() (*VersionedDatabase, bool) - - // GetDatabases returns all the managed databases in order from current to - // the oldest version. - GetDatabases() []*VersionedDatabase - - // Close all of the databases controlled by the manager. - Close() error - - // NewPrefixDBManager returns a new database manager with each of its - // databases prefixed with [prefix]. - NewPrefixDBManager(prefix []byte) Manager - - // NewNestedPrefixDBManager returns a new database manager where each of its - // databases has the nested prefix [prefix] applied to it. - NewNestedPrefixDBManager(prefix []byte) Manager - - // NewMeterDBManager returns a new database manager with each of its - // databases wrapped with a meterdb instance to support metrics on database - // performance. - NewMeterDBManager(namespace string, registerer prometheus.Registerer) (Manager, error) - - // NewCompleteMeterDBManager wraps each database instance with a meterdb - // instance. The namespace is concatenated with the version of the database. - // Note: calling this more than once with the same [namespace] will cause a - // conflict error for the [registerer]. - NewCompleteMeterDBManager(namespace string, registerer prometheus.Registerer) (Manager, error) -} - -type manager struct { - // databases with the current version at index 0 and prior versions in - // descending order - // invariant: len(databases) > 0 - databases []*VersionedDatabase -} - -// NewLevelDB creates a database manager of levelDBs at [filePath] by creating a -// database instance from each directory with a version <= [currentVersion]. If -// [includePreviousVersions], opens previous database versions and includes them -// in the returned Manager. -func NewLevelDB( - dbDirPath string, - dbConfig []byte, - log logging.Logger, - currentVersion *version.Semantic, - namespace string, - reg prometheus.Registerer, -) (Manager, error) { - return new( - leveldb.New, - dbDirPath, - dbConfig, - log, - currentVersion, - namespace, - reg, - ) -} - -// new creates a database manager at [filePath] by creating a database instance -// from each directory with a version <= [currentVersion]. If -// [includePreviousVersions], opens previous database versions and includes them -// in the returned Manager. -func new( - newDB func(string, []byte, logging.Logger, string, prometheus.Registerer) (database.Database, error), - dbDirPath string, - dbConfig []byte, - log logging.Logger, - currentVersion *version.Semantic, - namespace string, - reg prometheus.Registerer, -) (Manager, error) { - currentDBPath := filepath.Join(dbDirPath, currentVersion.String()) - - currentDB, err := newDB(currentDBPath, dbConfig, log, namespace, reg) - if err != nil { - return nil, fmt.Errorf("couldn't create db at %s: %w", currentDBPath, err) - } - - wrappedDB := corruptabledb.New(currentDB) - - manager := &manager{ - databases: []*VersionedDatabase{ - { - Database: wrappedDB, - Version: currentVersion, - }, - }, - } - - // Open old database versions and add them to [manager] - err = filepath.Walk(dbDirPath, func(path string, info os.FileInfo, err error) error { - // the walkFn is called with a non-nil error argument if an os.Lstat - // or Readdirnames call returns an error. Both cases are considered - // fatal in the traversal. - // Reference: https://golang.org/pkg/path/filepath/#WalkFunc - if err != nil { - return err - } - // Skip the root directory - if path == dbDirPath { - return nil - } - - // If the database directory contains any files, ignore them. - if !info.IsDir() { - return nil - } - _, dbName := filepath.Split(path) - dbVersion, err := version.Parse(dbName) - if err != nil { - // If the database directory contains any directories that don't - // match the expected version format, ignore them. - return filepath.SkipDir - } - - // If [dbVersion] is greater than or equal to the specified version - // skip over creating the new database to avoid creating the same db - // twice or creating a database with a version ahead of the desired one. - if cmp := dbVersion.Compare(currentVersion); cmp >= 0 { - return filepath.SkipDir - } - - versionStr := strings.ReplaceAll(dbName, ".", "_") - var dbNamespace string - if len(namespace) > 0 { - dbNamespace = fmt.Sprintf("%s_%s", namespace, versionStr) - } else { - dbNamespace = versionStr - } - - db, err := newDB(path, dbConfig, log, dbNamespace, reg) - if err != nil { - return fmt.Errorf("couldn't create db at %s: %w", path, err) - } - - manager.databases = append(manager.databases, &VersionedDatabase{ - Database: corruptabledb.New(db), - Version: dbVersion, - }) - - return filepath.SkipDir - }) - utils.Sort(manager.databases) - - // If an error occurred walking [dbDirPath] close the - // database manager and return the original error here. - if err != nil { - _ = manager.Close() - return nil, err - } - - return manager, nil -} - -// NewMemDB returns a database manager with a single memdb instance with -// [currentVersion]. -func NewMemDB(currentVersion *version.Semantic) Manager { - return &manager{ - databases: []*VersionedDatabase{ - { - Database: memdb.New(), - Version: currentVersion, - }, - }, - } -} - -// NewManagerFromDBs -func NewManagerFromDBs(dbs []*VersionedDatabase) (Manager, error) { - if len(dbs) == 0 { - return nil, errNoDBs - } - utils.Sort(dbs) - sortedAndUnique := utils.IsSortedAndUnique(dbs) - if !sortedAndUnique { - return nil, errNonSortedAndUniqueDBs - } - return &manager{ - databases: dbs, - }, nil -} - -func (m *manager) Current() *VersionedDatabase { - return m.databases[0] -} - -func (m *manager) Previous() (*VersionedDatabase, bool) { - if len(m.databases) < 2 { - return nil, false - } - return m.databases[1], true -} - -func (m *manager) GetDatabases() []*VersionedDatabase { - return m.databases -} - -func (m *manager) Close() error { - errs := wrappers.Errs{} - for _, db := range m.databases { - errs.Add(db.Close()) - } - return errs.Err -} - -// NewPrefixDBManager creates a new manager with each database instance prefixed -// by [prefix] -func (m *manager) NewPrefixDBManager(prefix []byte) Manager { - m, _ = m.wrapManager(func(vdb *VersionedDatabase) (*VersionedDatabase, error) { - return &VersionedDatabase{ - Database: prefixdb.New(prefix, vdb.Database), - Version: vdb.Version, - }, nil - }) - return m -} - -// NewNestedPrefixDBManager creates a new manager with each database instance -// wrapped with a nested prfix of [prefix] -func (m *manager) NewNestedPrefixDBManager(prefix []byte) Manager { - m, _ = m.wrapManager(func(vdb *VersionedDatabase) (*VersionedDatabase, error) { - return &VersionedDatabase{ - Database: prefixdb.NewNested(prefix, vdb.Database), - Version: vdb.Version, - }, nil - }) - return m -} - -// NewMeterDBManager wraps the current database instance with a meterdb instance. -// Note: calling this more than once with the same [namespace] will cause a conflict error for the [registerer] -func (m *manager) NewMeterDBManager(namespace string, registerer prometheus.Registerer) (Manager, error) { - currentDB := m.Current() - currentMeterDB, err := meterdb.New(namespace, registerer, currentDB.Database) - if err != nil { - return nil, err - } - newManager := &manager{ - databases: make([]*VersionedDatabase, len(m.databases)), - } - copy(newManager.databases[1:], m.databases[1:]) - // Overwrite the current database with the meter DB - newManager.databases[0] = &VersionedDatabase{ - Database: currentMeterDB, - Version: currentDB.Version, - } - return newManager, nil -} - -// NewCompleteMeterDBManager wraps each database instance with a meterdb instance. The namespace -// is concatenated with the version of the database. Note: calling this more than once -// with the same [namespace] will cause a conflict error for the [registerer] -func (m *manager) NewCompleteMeterDBManager(namespace string, registerer prometheus.Registerer) (Manager, error) { - return m.wrapManager(func(vdb *VersionedDatabase) (*VersionedDatabase, error) { - mdb, err := meterdb.New(fmt.Sprintf("%s_%s", namespace, strings.ReplaceAll(vdb.Version.String(), ".", "_")), registerer, vdb.Database) - if err != nil { - return nil, err - } - return &VersionedDatabase{ - Database: mdb, - Version: vdb.Version, - }, nil - }) -} - -// wrapManager returns a new database manager with each managed database wrapped -// by the [wrap] function. If an error is returned by wrap, the error is -// returned immediately. If [wrap] never returns an error, then wrapManager is -// guaranteed to never return an error. The function wrap must return a database -// that can be closed without closing the underlying database. -func (m *manager) wrapManager(wrap func(db *VersionedDatabase) (*VersionedDatabase, error)) (*manager, error) { - newManager := &manager{ - databases: make([]*VersionedDatabase, 0, len(m.databases)), - } - for _, db := range m.databases { - wrappedDB, err := wrap(db) - if err != nil { - // ignore additional errors in favor of returning the original error - _ = newManager.Close() - return nil, err - } - newManager.databases = append(newManager.databases, wrappedDB) - } - return newManager, nil -} diff --git a/database/manager/manager_test.go b/database/manager/manager_test.go deleted file mode 100644 index 5e7986b27416..000000000000 --- a/database/manager/manager_test.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package manager - -import ( - "os" - "path/filepath" - "testing" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/database/leveldb" - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/meterdb" - "github.com/ava-labs/avalanchego/database/prefixdb" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/metric" - "github.com/ava-labs/avalanchego/version" -) - -func TestNewSingleLevelDB(t *testing.T) { - require := require.New(t) - dir := t.TempDir() - - v1 := version.Semantic1_0_0 - - dbPath := filepath.Join(dir, v1.String()) - db, err := leveldb.New(dbPath, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(err) - - require.NoError(db.Close()) - - manager, err := NewLevelDB(dir, nil, logging.NoLog{}, v1, "", prometheus.NewRegistry()) - require.NoError(err) - - semDB := manager.Current() - require.Zero(semDB.Version.Compare(v1)) - - _, exists := manager.Previous() - require.False(exists) - require.Len(manager.GetDatabases(), 1) - - require.NoError(manager.Close()) -} - -func TestNewCreatesSingleDB(t *testing.T) { - require := require.New(t) - - dir := t.TempDir() - - v1 := version.Semantic1_0_0 - - manager, err := NewLevelDB(dir, nil, logging.NoLog{}, v1, "", prometheus.NewRegistry()) - require.NoError(err) - - semDB := manager.Current() - require.Zero(semDB.Version.Compare(v1)) - - _, exists := manager.Previous() - require.False(exists) - - require.Len(manager.GetDatabases(), 1) - - require.NoError(manager.Close()) -} - -func TestNewInvalidMemberPresent(t *testing.T) { - require := require.New(t) - - dir := t.TempDir() - - v1 := &version.Semantic{ - Major: 1, - Minor: 1, - Patch: 0, - } - v2 := &version.Semantic{ - Major: 1, - Minor: 2, - Patch: 0, - } - - dbPath1 := filepath.Join(dir, v1.String()) - db1, err := leveldb.New(dbPath1, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(err) - - dbPath2 := filepath.Join(dir, v2.String()) - db2, err := leveldb.New(dbPath2, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(err) - - require.NoError(db2.Close()) - - _, err = NewLevelDB(dir, nil, logging.NoLog{}, v2, "", prometheus.NewRegistry()) - require.ErrorIs(err, leveldb.ErrCouldNotOpen) - - require.NoError(db1.Close()) - - f, err := os.Create(filepath.Join(dir, "dummy")) - require.NoError(err) - - require.NoError(f.Close()) - - db, err := NewLevelDB(dir, nil, logging.NoLog{}, v1, "", prometheus.NewRegistry()) - require.NoError(err, "expected not to error with a non-directory file being present") - - require.NoError(db.Close()) -} - -func TestNewSortsDatabases(t *testing.T) { - require := require.New(t) - - dir := t.TempDir() - - vers := []*version.Semantic{ - { - Major: 2, - Minor: 1, - Patch: 2, - }, - { - Major: 2, - Minor: 0, - Patch: 2, - }, - { - Major: 1, - Minor: 3, - Patch: 2, - }, - { - Major: 1, - Minor: 0, - Patch: 2, - }, - { - Major: 1, - Minor: 0, - Patch: 1, - }, - } - - for _, version := range vers { - dbPath := filepath.Join(dir, version.String()) - db, err := leveldb.New(dbPath, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(err) - - require.NoError(db.Close()) - } - - manager, err := NewLevelDB(dir, nil, logging.NoLog{}, vers[0], "", prometheus.NewRegistry()) - require.NoError(err) - - defer func() { - require.NoError(manager.Close()) - }() - - semDB := manager.Current() - require.Zero(semDB.Version.Compare(vers[0])) - - prev, exists := manager.Previous() - require.True(exists) - require.Zero(prev.Version.Compare(vers[1])) - - dbs := manager.GetDatabases() - require.Len(dbs, len(vers)) - - for i, db := range dbs { - require.Zero(db.Version.Compare(vers[i])) - } -} - -func TestPrefixDBManager(t *testing.T) { - require := require.New(t) - - db := memdb.New() - - prefix0 := []byte{0} - db0 := prefixdb.New(prefix0, db) - - prefix1 := []byte{1} - db1 := prefixdb.New(prefix1, db0) - - k0 := []byte{'s', 'c', 'h', 'n', 'i'} - v0 := []byte{'t', 'z', 'e', 'l'} - k1 := []byte{'c', 'u', 'r', 'r', 'y'} - v1 := []byte{'w', 'u', 'r', 's', 't'} - - require.NoError(db0.Put(k0, v0)) - require.NoError(db1.Put(k1, v1)) - require.NoError(db0.Close()) - require.NoError(db1.Close()) - - m := &manager{databases: []*VersionedDatabase{ - { - Database: db, - Version: version.Semantic1_0_0, - }, - }} - - m0 := m.NewPrefixDBManager(prefix0) - m1 := m0.NewPrefixDBManager(prefix1) - - val, err := m0.Current().Database.Get(k0) - require.NoError(err) - require.Equal(v0, val) - - val, err = m1.Current().Database.Get(k1) - require.NoError(err) - require.Equal(v1, val) -} - -func TestNestedPrefixDBManager(t *testing.T) { - require := require.New(t) - - db := memdb.New() - - prefix0 := []byte{0} - db0 := prefixdb.NewNested(prefix0, db) - - prefix1 := []byte{1} - db1 := prefixdb.NewNested(prefix1, db0) - - k0 := []byte{'s', 'c', 'h', 'n', 'i'} - v0 := []byte{'t', 'z', 'e', 'l'} - k1 := []byte{'c', 'u', 'r', 'r', 'y'} - v1 := []byte{'w', 'u', 'r', 's', 't'} - - require.NoError(db0.Put(k0, v0)) - require.NoError(db1.Put(k1, v1)) - require.NoError(db0.Close()) - require.NoError(db1.Close()) - - m := &manager{databases: []*VersionedDatabase{ - { - Database: db, - Version: version.Semantic1_0_0, - }, - }} - - m0 := m.NewNestedPrefixDBManager(prefix0) - m1 := m0.NewNestedPrefixDBManager(prefix1) - - val, err := m0.Current().Database.Get(k0) - require.NoError(err) - require.Equal(v0, val) - - val, err = m1.Current().Database.Get(k1) - require.NoError(err) - require.Equal(v1, val) -} - -func TestMeterDBManager(t *testing.T) { - require := require.New(t) - - registry := prometheus.NewRegistry() - - m := &manager{databases: []*VersionedDatabase{ - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 2, - Minor: 0, - Patch: 0, - }, - }, - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 1, - Minor: 5, - Patch: 0, - }, - }, - { - Database: memdb.New(), - Version: version.Semantic1_0_0, - }, - }} - - // Create meterdb manager with fresh registry and confirm - // that there are no errors registering metrics for multiple - // versioned databases. - manager, err := m.NewMeterDBManager("", registry) - require.NoError(err) - - dbs := manager.GetDatabases() - require.Len(dbs, 3) - - require.IsType(&meterdb.Database{}, dbs[0].Database) - require.IsType(&memdb.Database{}, dbs[1].Database) - require.IsType(&memdb.Database{}, dbs[2].Database) - - // Confirm that the error from a name conflict is handled correctly - _, err = m.NewMeterDBManager("", registry) - require.ErrorIs(err, metric.ErrFailedRegistering) -} - -func TestCompleteMeterDBManager(t *testing.T) { - require := require.New(t) - - registry := prometheus.NewRegistry() - - m := &manager{databases: []*VersionedDatabase{ - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 2, - Minor: 0, - Patch: 0, - }, - }, - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 1, - Minor: 5, - Patch: 0, - }, - }, - { - Database: memdb.New(), - Version: version.Semantic1_0_0, - }, - }} - - // Create complete meterdb manager with fresh registry and confirm - // that there are no errors registering metrics for multiple - // versioned databases. - manager, err := m.NewCompleteMeterDBManager("", registry) - require.NoError(err) - - dbs := manager.GetDatabases() - require.Len(dbs, 3) - - require.IsType(&meterdb.Database{}, dbs[0].Database) - require.IsType(&meterdb.Database{}, dbs[1].Database) - require.IsType(&meterdb.Database{}, dbs[2].Database) - - // Confirm that the error from a name conflict is handled correctly - _, err = m.NewCompleteMeterDBManager("", registry) - require.ErrorIs(err, metric.ErrFailedRegistering) -} - -func TestNewManagerFromDBs(t *testing.T) { - require := require.New(t) - - versions := []*version.Semantic{ - { - Major: 3, - Minor: 2, - Patch: 0, - }, - { - Major: 1, - Minor: 2, - Patch: 0, - }, - { - Major: 1, - Minor: 1, - Patch: 1, - }, - } - m, err := NewManagerFromDBs( - []*VersionedDatabase{ - { - Database: memdb.New(), - Version: versions[2], - }, - { - Database: memdb.New(), - Version: versions[1], - }, - { - Database: memdb.New(), - Version: versions[0], - }, - }) - require.NoError(err) - - dbs := m.GetDatabases() - require.Len(dbs, len(versions)) - for i, db := range dbs { - require.Zero(db.Version.Compare(versions[i])) - } -} - -func TestNewManagerFromNoDBs(t *testing.T) { - require := require.New(t) - // Should error if no dbs are given - _, err := NewManagerFromDBs(nil) - require.ErrorIs(err, errNoDBs) -} - -func TestNewManagerFromNonUniqueDBs(t *testing.T) { - require := require.New(t) - - _, err := NewManagerFromDBs( - []*VersionedDatabase{ - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 1, - Minor: 1, - Patch: 0, - }, - }, - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 1, - Minor: 1, - Patch: 0, - }, // Duplicate - }, - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 1, - Minor: 2, - Patch: 0, - }, - }, - }) - require.ErrorIs(err, errNonSortedAndUniqueDBs) -} diff --git a/database/manager/versioned_database.go b/database/manager/versioned_database.go deleted file mode 100644 index 6ff983a95a92..000000000000 --- a/database/manager/versioned_database.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package manager - -import ( - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/version" -) - -var _ utils.Sortable[*VersionedDatabase] = (*VersionedDatabase)(nil) - -type VersionedDatabase struct { - Database database.Database - Version *version.Semantic -} - -// Close the underlying database -func (db *VersionedDatabase) Close() error { - return db.Database.Close() -} - -// Note this sorts in descending order (newest version --> oldest version) -func (db *VersionedDatabase) Less(other *VersionedDatabase) bool { - return db.Version.Compare(other.Version) > 0 -} diff --git a/database/memdb/db_test.go b/database/memdb/db_test.go index b0497758f5c2..10d8ebe2be25 100644 --- a/database/memdb/db_test.go +++ b/database/memdb/db_test.go @@ -23,6 +23,10 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { database.FuzzNewIteratorWithPrefix(f, New()) } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, New()) +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) diff --git a/database/meterdb/db_test.go b/database/meterdb/db_test.go index ddd613353946..a54d25c21542 100644 --- a/database/meterdb/db_test.go +++ b/database/meterdb/db_test.go @@ -24,28 +24,30 @@ func TestInterface(t *testing.T) { } } -func FuzzKeyValue(f *testing.F) { +func newDB(t testing.TB) database.Database { baseDB := memdb.New() db, err := New("", prometheus.NewRegistry(), baseDB) - require.NoError(f, err) - database.FuzzKeyValue(f, db) + require.NoError(t, err) + return db +} + +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, newDB(f)) } func FuzzNewIteratorWithPrefix(f *testing.F) { - baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) - require.NoError(f, err) - database.FuzzNewIteratorWithPrefix(f, db) + database.FuzzNewIteratorWithPrefix(f, newDB(f)) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, newDB(f)) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) for _, bench := range database.Benchmarks { - baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) - require.NoError(b, err) - bench(b, db, "meterdb", keys, values) + bench(b, newDB(b), "meterdb", keys, values) } } } diff --git a/database/pebble/db.go b/database/pebble/db.go index 13ab1db04977..7aa718082a35 100644 --- a/database/pebble/db.go +++ b/database/pebble/db.go @@ -24,9 +24,13 @@ import ( "github.com/ava-labs/avalanchego/utils/units" ) -// pebbleByteOverHead is the number of bytes of constant overhead that -// should be added to a batch size per operation. -const pebbleByteOverHead = 8 +const ( + Name = "pebble" + + // pebbleByteOverHead is the number of bytes of constant overhead that + // should be added to a batch size per operation. + pebbleByteOverHead = 8 +) var ( _ database.Database = (*Database)(nil) @@ -73,10 +77,12 @@ type Config struct { } // TODO: Add metrics -func New(file string, configBytes []byte, log logging.Logger, _ string, _ prometheus.Registerer) (*Database, error) { - var cfg Config - if err := json.Unmarshal(configBytes, &cfg); err != nil { - return nil, err +func New(file string, configBytes []byte, log logging.Logger, _ string, _ prometheus.Registerer) (database.Database, error) { + cfg := DefaultConfig + if len(configBytes) > 0 { + if err := json.Unmarshal(configBytes, &cfg); err != nil { + return nil, err + } } opts := &pebble.Options{ diff --git a/database/pebble/db_test.go b/database/pebble/db_test.go index c72a9d687c88..043caa23c813 100644 --- a/database/pebble/db_test.go +++ b/database/pebble/db_test.go @@ -18,7 +18,7 @@ func newDB(t testing.TB) *Database { folder := t.TempDir() db, err := New(folder, DefaultConfigBytes, logging.NoLog{}, "pebble", prometheus.NewRegistry()) require.NoError(t, err) - return db + return db.(*Database) } func TestInterface(t *testing.T) { @@ -41,6 +41,12 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { _ = db.Close() } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + db := newDB(f) + database.FuzzNewIteratorWithStartAndPrefix(f, db) + _ = db.Close() +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) diff --git a/database/prefixdb/db_test.go b/database/prefixdb/db_test.go index 065e1d7a00c8..a0539b8e0105 100644 --- a/database/prefixdb/db_test.go +++ b/database/prefixdb/db_test.go @@ -30,6 +30,10 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { database.FuzzNewIteratorWithPrefix(f, New([]byte(""), memdb.New())) } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, New([]byte(""), memdb.New())) +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) diff --git a/database/rpcdb/db_test.go b/database/rpcdb/db_test.go index 763b95b83f75..baabc9a69fbf 100644 --- a/database/rpcdb/db_test.go +++ b/database/rpcdb/db_test.go @@ -75,6 +75,13 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { db.closeFn() } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + db := setupDB(f) + database.FuzzNewIteratorWithStartAndPrefix(f, db.client) + + db.closeFn() +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) diff --git a/database/test_database.go b/database/test_database.go index 2e68f53341b8..fc7e644ae5fd 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -1266,7 +1266,74 @@ func FuzzNewIteratorWithPrefix(f *testing.F, db Database) { require.Equal(expected[string(iter.Key())], val) numIterElts++ } - require.Equal(len(expectedList), numIterElts) + require.Len(expectedList, numIterElts) + + // Clear the database for the next fuzz iteration. + require.NoError(AtomicClear(db, db)) + }) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F, db Database) { + const ( + maxKeyLen = 32 + maxValueLen = 32 + ) + + f.Fuzz(func( + t *testing.T, + randSeed int64, + start []byte, + prefix []byte, + numKeyValues uint, + ) { + require := require.New(t) + r := rand.New(rand.NewSource(randSeed)) // #nosec G404 + + expected := map[string][]byte{} + + // Put a bunch of key-values + for i := 0; i < int(numKeyValues); i++ { + key := make([]byte, r.Intn(maxKeyLen)) + _, _ = r.Read(key) // #nosec G404 + + value := make([]byte, r.Intn(maxValueLen)) + _, _ = r.Read(value) // #nosec G404 + + if len(value) == 0 { + // Consistently treat zero length values as nil + // so that we can compare [expected] and [got] with + // require.Equal, which treats nil and empty byte + // as being unequal, whereas the database treats + // them as being equal. + value = nil + } + + if bytes.HasPrefix(key, prefix) && bytes.Compare(key, start) >= 0 { + expected[string(key)] = value + } + + require.NoError(db.Put(key, value)) + } + + expectedList := maps.Keys(expected) + slices.Sort(expectedList) + + iter := db.NewIteratorWithStartAndPrefix(start, prefix) + defer iter.Release() + + // Assert the iterator returns the expected key-values. + numIterElts := 0 + for iter.Next() { + val := iter.Value() + if len(val) == 0 { + val = nil + } + keyStr := string(iter.Key()) + require.Equal(expectedList[numIterElts], keyStr) + require.Equal(expected[keyStr], val) + numIterElts++ + } + require.Len(expectedList, numIterElts) // Clear the database for the next fuzz iteration. require.NoError(AtomicClear(db, db)) diff --git a/database/versiondb/db_test.go b/database/versiondb/db_test.go index fdea2934b8d2..c2f57caacf61 100644 --- a/database/versiondb/db_test.go +++ b/database/versiondb/db_test.go @@ -27,6 +27,10 @@ func FuzzNewIteratorWithPrefix(f *testing.F) { database.FuzzNewIteratorWithPrefix(f, New(memdb.New())) } +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, New(memdb.New())) +} + func TestIterate(t *testing.T) { require := require.New(t) diff --git a/genesis/config.go b/genesis/config.go index c14b3b77f771..2a7063f87940 100644 --- a/genesis/config.go +++ b/genesis/config.go @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/wrappers" ) var ( @@ -172,30 +171,28 @@ func init() { unparsedFujiConfig := UnparsedConfig{} unparsedLocalConfig := UnparsedConfig{} - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( json.Unmarshal(mainnetGenesisConfigJSON, &unparsedMainnetConfig), json.Unmarshal(fujiGenesisConfigJSON, &unparsedFujiConfig), json.Unmarshal(localGenesisConfigJSON, &unparsedLocalConfig), ) - if errs.Errored() { - panic(errs.Err) + if err != nil { + panic(err) } - mainnetConfig, err := unparsedMainnetConfig.Parse() - errs.Add(err) - MainnetConfig = mainnetConfig - - fujiConfig, err := unparsedFujiConfig.Parse() - errs.Add(err) - FujiConfig = fujiConfig + MainnetConfig, err = unparsedMainnetConfig.Parse() + if err != nil { + panic(err) + } - localConfig, err := unparsedLocalConfig.Parse() - errs.Add(err) - LocalConfig = localConfig + FujiConfig, err = unparsedFujiConfig.Parse() + if err != nil { + panic(err) + } - if errs.Errored() { - panic(errs.Err) + LocalConfig, err = unparsedLocalConfig.Parse() + if err != nil { + panic(err) } } diff --git a/genesis/genesis.go b/genesis/genesis.go index 821f72502488..e59b8c6fbbbc 100644 --- a/genesis/genesis.go +++ b/genesis/genesis.go @@ -418,8 +418,8 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { delegationFee := json.Uint32(staker.DelegationFee) platformvmArgs.Validators = append(platformvmArgs.Validators, - api.PermissionlessValidator{ - Staker: api.Staker{ + api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(genesisTime.Unix()), EndTime: json.Uint64(endStakingTime.Unix()), NodeID: staker.NodeID, diff --git a/genesis/genesis_local.go b/genesis/genesis_local.go index 7a136360e79a..de650009fd26 100644 --- a/genesis/genesis_local.go +++ b/genesis/genesis_local.go @@ -72,10 +72,9 @@ func init() { ewoqBytes, err := cb58.Decode(EWOQKeyStr) errs.Add(err) - factory := secp256k1.Factory{} - VMRQKey, err = factory.ToPrivateKey(vmrqBytes) + VMRQKey, err = secp256k1.ToPrivateKey(vmrqBytes) errs.Add(err) - EWOQKey, err = factory.ToPrivateKey(ewoqBytes) + EWOQKey, err = secp256k1.ToPrivateKey(ewoqBytes) errs.Add(err) if errs.Err != nil { diff --git a/genesis/genesis_test.go b/genesis/genesis_test.go index 54cd7891787e..b18bc7f521a3 100644 --- a/genesis/genesis_test.go +++ b/genesis/genesis_test.go @@ -5,6 +5,7 @@ package genesis import ( "encoding/base64" + "encoding/hex" "encoding/json" "fmt" "os" @@ -244,7 +245,7 @@ func TestGenesisFromFile(t *testing.T) { genesisBytes, _, err := FromFile(test.networkID, customFile, genesisStakingCfg) require.ErrorIs(err, test.expectedErr) if test.expectedErr == nil { - genesisHash := fmt.Sprintf("%x", hashing.ComputeHash256(genesisBytes)) + genesisHash := hex.EncodeToString(hashing.ComputeHash256(genesisBytes)) require.Equal(test.expectedHash, genesisHash, "genesis hash mismatch") _, err = genesis.Parse(genesisBytes) @@ -330,7 +331,7 @@ func TestGenesisFromFlag(t *testing.T) { genesisBytes, _, err := FromFlag(test.networkID, content, genesisStakingCfg) require.ErrorIs(err, test.expectedErr) if test.expectedErr == nil { - genesisHash := fmt.Sprintf("%x", hashing.ComputeHash256(genesisBytes)) + genesisHash := hex.EncodeToString(hashing.ComputeHash256(genesisBytes)) require.Equal(test.expectedHash, genesisHash, "genesis hash mismatch") _, err = genesis.Parse(genesisBytes) diff --git a/go.mod b/go.mod index f351efbef090..bf06bbc74e83 100644 --- a/go.mod +++ b/go.mod @@ -11,8 +11,8 @@ require ( github.com/DataDog/zstd v1.5.2 github.com/Microsoft/go-winio v0.5.2 github.com/NYTimes/gziphandler v1.1.1 - github.com/ava-labs/coreth v0.12.7-rc.1 - github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 + github.com/ava-labs/coreth v0.12.9-rc.0 + github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 github.com/btcsuite/btcd/btcutil v1.1.3 github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 @@ -44,10 +44,11 @@ require ( github.com/spf13/cobra v1.0.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.12.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.4 github.com/supranational/blst v0.3.11 github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a github.com/thepudds/fzgen v0.4.2 + github.com/tyler-smith/go-bip32 v1.0.0 go.opentelemetry.io/otel v1.11.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0 @@ -56,7 +57,7 @@ require ( go.opentelemetry.io/otel/trace v1.11.0 go.uber.org/goleak v1.2.1 go.uber.org/mock v0.2.0 - go.uber.org/zap v1.24.0 + go.uber.org/zap v1.26.0 golang.org/x/crypto v0.14.0 golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df golang.org/x/net v0.17.0 @@ -72,8 +73,9 @@ require ( require ( github.com/BurntSushi/toml v1.2.1 // indirect + github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e // indirect + github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec // indirect github.com/VictoriaMetrics/fastcache v1.10.0 // indirect - github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect @@ -138,12 +140,11 @@ require ( github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - github.com/zondax/hid v0.9.1 // indirect - github.com/zondax/ledger-go v0.14.1 // indirect + github.com/zondax/hid v0.9.2 // indirect + github.com/zondax/ledger-go v0.14.3 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.8.0 // indirect + go.uber.org/multierr v1.10.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect diff --git a/go.sum b/go.sum index d301d4b6be81..573d618f19cb 100644 --- a/go.sum +++ b/go.sum @@ -45,6 +45,10 @@ github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3 github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e h1:ahyvB3q25YnZWly5Gq1ekg6jcmWaGj/vG/MhF4aisoc= +github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:kGUqhHd//musdITWjFvNTHn90WG9bMLBEPQZ17Cmlpw= +github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec h1:1Qb69mGp/UtRPn422BH4/Y4Q3SLUrD9KHuDkm8iodFc= +github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec/go.mod h1:CD8UlnlLDiqb36L110uqiP2iSflVjx9g/3U9hCI4q2U= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= @@ -62,13 +66,11 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/coreth v0.12.7-rc.1 h1:fvjow2Jqkq1RNtW4v2Kx0DdTVp+3+fCY421TxpDDRfM= -github.com/ava-labs/coreth v0.12.7-rc.1/go.mod h1:sNbwitXv4AhLvWpSqy6V8yzkhGFeWBQFD31/xiRDJ5M= -github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 h1:EdxD90j5sClfL5Ngpz2TlnbnkNYdFPDXa0jDOjam65c= -github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7/go.mod h1:XhiXSrh90sHUbkERzaxEftCmUz53eCijshDLZ4fByVM= +github.com/ava-labs/coreth v0.12.9-rc.0 h1:Xvk/iJTY2MSBkkiOs9Eo92nxd67VXzRjaC/WmQXRIb0= +github.com/ava-labs/coreth v0.12.9-rc.0/go.mod h1:rECKQfGFDeodrwGPlJSvFUJDbVr30jSMIVjQLi6pNX4= +github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 h1:mg9Uw6oZFJKytJxgxnl3uxZOs/SB8CVHg6Io4Tf99Zc= +github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34/go.mod h1:pJxaT9bUgeRNVmNRgtCHb7sFDIRKy7CzTQVi8gGNT6g= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -113,6 +115,8 @@ github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86c github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cmars/basen v0.0.0-20150613233007-fe3947df716e h1:0XBUw73chJ1VYSsfvcPvVT7auykAJce9FpRr10L6Qhw= +github.com/cmars/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:P13beTBKr5Q18lJe1rIoLUqjM+CB1zYrRg44ZqGuQSA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -573,8 +577,8 @@ github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.5-0.20170601210322-f6abca593680/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -583,8 +587,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= @@ -599,6 +603,8 @@ github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITn github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tyler-smith/go-bip32 v1.0.0 h1:sDR9juArbUgX+bO/iblgZnMPeWY1KZMUC2AFUJdv5KE= +github.com/tyler-smith/go-bip32 v1.0.0/go.mod h1:onot+eHknzV4BVPwrzqY5OoVpyCvnwD7lMawL5aQupE= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= @@ -632,10 +638,10 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/zondax/hid v0.9.1 h1:gQe66rtmyZ8VeGFcOpbuH3r7erYtNEAezCAYu8LdkJo= -github.com/zondax/hid v0.9.1/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= -github.com/zondax/ledger-go v0.14.1 h1:Pip65OOl4iJ84WTpA4BKChvOufMhhbxED3BaihoZN4c= -github.com/zondax/ledger-go v0.14.1/go.mod h1:fZ3Dqg6qcdXWSOJFKMG8GCTnD7slO/RL2feOQv8K320= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= +github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -661,19 +667,17 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/mock v0.2.0 h1:TaP3xedm7JaAgScZO7tlvlKrqT0p7I6OsdGB5YNSMDU= go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20170613210332-850760c427c5/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1107,6 +1111,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/ids/test_generator.go b/ids/test_generator.go index 2f8edf3567d8..29ecfa8d409c 100644 --- a/ids/test_generator.go +++ b/ids/test_generator.go @@ -23,3 +23,12 @@ func GenerateTestShortID() ShortID { func GenerateTestNodeID() NodeID { return NodeID(GenerateTestShortID()) } + +// BuildTestNodeID is an utility to build NodeID from bytes in UTs +// It must not be used in production code. In production code we should +// use ToNodeID, which performs proper length checking. +func BuildTestNodeID(src []byte) NodeID { + res := NodeID{} + copy(res[:], src) + return res +} diff --git a/indexer/index.go b/indexer/index.go index fae7ebcb12e1..5361754bf73d 100644 --- a/indexer/index.go +++ b/indexer/index.go @@ -17,10 +17,10 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" ) // Maximum number of containers IDs that can be fetched at a time in a call to @@ -114,14 +114,12 @@ func newIndex( // Close this index func (i *index) Close() error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( i.indexToContainer.Close(), i.containerToIndex.Close(), i.vDB.Close(), i.baseDB.Close(), ) - return errs.Err } // Index that the given transaction is accepted diff --git a/ipcs/eventsocket.go b/ipcs/eventsocket.go index 37b370c36918..109b42bb34af 100644 --- a/ipcs/eventsocket.go +++ b/ipcs/eventsocket.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/ipcs/socket" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -133,12 +134,10 @@ func newEventIPCSocket( url: url, socket: socket.NewSocket(url, ctx.log), unregisterFn: func() error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( snowmanAcceptorGroup.DeregisterAcceptor(chainID, ipcName), avalancheAcceptorGroup.DeregisterAcceptor(chainID, ipcName), ) - return errs.Err }, } @@ -175,9 +174,10 @@ func (eis *eventSocket) Accept(_ *snow.ConsensusContext, _ ids.ID, container []b // stop unregisters the event handler and closes the eventSocket func (eis *eventSocket) stop() error { eis.log.Info("closing Chain IPC") - errs := wrappers.Errs{} - errs.Add(eis.unregisterFn(), eis.socket.Close()) - return errs.Err + return utils.Err( + eis.unregisterFn(), + eis.socket.Close(), + ) } // URL returns the URL of the socket diff --git a/ipcs/socket/socket_test.go b/ipcs/socket/socket_test.go index 4204d032285a..a56329b28c3e 100644 --- a/ipcs/socket/socket_test.go +++ b/ipcs/socket/socket_test.go @@ -8,6 +8,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/logging" ) func TestSocketSendAndReceive(t *testing.T) { @@ -21,7 +23,7 @@ func TestSocketSendAndReceive(t *testing.T) { ) // Create socket and client; wait for client to connect - socket := NewSocket(socketName, nil) + socket := NewSocket(socketName, logging.NoLog{}) socket.accept, connCh = newTestAcceptFn(t) require.NoError(socket.Listen()) diff --git a/main/main.go b/main/main.go index 5d85530177dd..2b07898f9072 100644 --- a/main/main.go +++ b/main/main.go @@ -41,11 +41,16 @@ func main() { os.Exit(1) } - nodeApp := app.New(nodeConfig) // Create node wrapper if term.IsTerminal(int(os.Stdout.Fd())) { fmt.Println(app.Header) } + nodeApp, err := app.New(nodeConfig) + if err != nil { + fmt.Printf("couldn't start node: %s\n", err) + os.Exit(1) + } + exitCode := app.Run(nodeApp) os.Exit(exitCode) } diff --git a/network/metrics.go b/network/metrics.go index b35c60f0a602..3e566a31c99f 100644 --- a/network/metrics.go +++ b/network/metrics.go @@ -11,9 +11,9 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/peer" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" ) type metrics struct { @@ -147,8 +147,7 @@ func newMetrics(namespace string, registerer prometheus.Registerer, initialSubne peerConnectedStartTimes: make(map[ids.NodeID]float64), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( registerer.Register(m.numTracked), registerer.Register(m.numPeers), registerer.Register(m.numSubnetPeers), @@ -182,7 +181,7 @@ func newMetrics(namespace string, registerer prometheus.Registerer, initialSubne m.nodeSubnetUptimeRewardingStake.WithLabelValues(subnetIDStr).Set(0) } - return m, errs.Err + return m, err } func (m *metrics) markConnected(peer peer.Peer) { diff --git a/network/network.go b/network/network.go index a0fd6cced124..3f89e0ea00ef 100644 --- a/network/network.go +++ b/network/network.go @@ -732,7 +732,6 @@ func (n *network) Peers(peerID ids.NodeID) ([]ips.ClaimedIPPort, error) { func (n *network) Dispatch() error { go n.runTimers() // Periodically perform operations go n.inboundConnUpgradeThrottler.Dispatch() - errs := wrappers.Errs{} for { // Continuously accept new connections if n.onCloseCtx.Err() != nil { break @@ -798,6 +797,7 @@ func (n *network) Dispatch() error { connected := n.connectedPeers.Sample(n.connectedPeers.Len(), peer.NoPrecondition) n.peersLock.RUnlock() + errs := wrappers.Errs{} for _, peer := range append(connecting, connected...) { errs.Add(peer.AwaitClosed(context.TODO())) } diff --git a/network/p2p/gossip/gossip.go b/network/p2p/gossip/gossip.go index 2e987e529dbe..94d49260da40 100644 --- a/network/p2p/gossip/gossip.go +++ b/network/p2p/gossip/gossip.go @@ -16,8 +16,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/wrappers" ) var ( @@ -83,13 +83,11 @@ func NewPullGossiper[T any, U GossipableAny[T]]( }), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( metrics.Register(p.receivedN), metrics.Register(p.receivedBytes), ) - - return p, errs.Err + return p, err } type PullGossiper[T any, U GossipableAny[T]] struct { diff --git a/network/p2p/gossip/handler.go b/network/p2p/gossip/handler.go index 987fa2e2ed41..ecaf58434bc2 100644 --- a/network/p2p/gossip/handler.go +++ b/network/p2p/gossip/handler.go @@ -17,7 +17,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/proto/pb/sdk" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) var ( @@ -52,13 +52,11 @@ func NewHandler[T Gossipable]( }), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( metrics.Register(h.sentN), metrics.Register(h.sentBytes), ) - - return h, errs.Err + return h, err } type Handler[T Gossipable] struct { diff --git a/network/p2p/handler.go b/network/p2p/handler.go index 9212864e5436..b85195a5255c 100644 --- a/network/p2p/handler.go +++ b/network/p2p/handler.go @@ -30,7 +30,7 @@ type Handler interface { ctx context.Context, nodeID ids.NodeID, gossipBytes []byte, - ) error + ) // AppRequest is called when handling an AppRequest message. // Returns the bytes for the response corresponding to [requestBytes] AppRequest( @@ -50,11 +50,10 @@ type Handler interface { ) ([]byte, error) } +// NoOpHandler drops all messages type NoOpHandler struct{} -func (NoOpHandler) AppGossip(context.Context, ids.NodeID, []byte) error { - return nil -} +func (NoOpHandler) AppGossip(context.Context, ids.NodeID, []byte) {} func (NoOpHandler) AppRequest(context.Context, ids.NodeID, time.Time, []byte) ([]byte, error) { return nil, nil @@ -68,14 +67,16 @@ func (NoOpHandler) CrossChainAppRequest(context.Context, ids.ID, time.Time, []by type ValidatorHandler struct { Handler ValidatorSet ValidatorSet + Log logging.Logger } -func (v ValidatorHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) error { +func (v ValidatorHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { if !v.ValidatorSet.Has(ctx, nodeID) { - return ErrNotValidator + v.Log.Debug("dropping message", zap.Stringer("nodeID", nodeID)) + return } - return v.Handler.AppGossip(ctx, nodeID, gossipBytes) + v.Handler.AppGossip(ctx, nodeID, gossipBytes) } func (v ValidatorHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { @@ -88,14 +89,15 @@ func (v ValidatorHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, dea // responder automatically sends the response for a given request type responder struct { + Handler handlerID uint64 - handler Handler log logging.Logger sender common.AppSender } +// AppRequest calls the underlying handler and sends back the response to nodeID func (r *responder) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { - appResponse, err := r.handler.AppRequest(ctx, nodeID, deadline, request) + appResponse, err := r.Handler.AppRequest(ctx, nodeID, deadline, request) if err != nil { r.log.Debug("failed to handle message", zap.Stringer("messageOp", message.AppRequestOp), @@ -111,21 +113,10 @@ func (r *responder) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID return r.sender.SendAppResponse(ctx, nodeID, requestID, appResponse) } -func (r *responder) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) error { - err := r.handler.AppGossip(ctx, nodeID, msg) - if err != nil { - r.log.Debug("failed to handle message", - zap.Stringer("messageOp", message.AppGossipOp), - zap.Stringer("nodeID", nodeID), - zap.Uint64("handlerID", r.handlerID), - zap.Binary("message", msg), - ) - } - return nil -} - +// CrossChainAppRequest calls the underlying handler and sends back the response +// to chainID func (r *responder) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, request []byte) error { - appResponse, err := r.handler.CrossChainAppRequest(ctx, chainID, deadline, request) + appResponse, err := r.Handler.CrossChainAppRequest(ctx, chainID, deadline, request) if err != nil { r.log.Debug("failed to handle message", zap.Stringer("messageOp", message.CrossChainAppRequestOp), diff --git a/network/p2p/handler_test.go b/network/p2p/handler_test.go index 539076cb7062..b7a1b6bec899 100644 --- a/network/p2p/handler_test.go +++ b/network/p2p/handler_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" ) @@ -32,20 +33,20 @@ func TestValidatorHandlerAppGossip(t *testing.T) { name string validatorSet ValidatorSet nodeID ids.NodeID - expected error + expected bool }{ { name: "message dropped", validatorSet: testValidatorSet{}, nodeID: nodeID, - expected: ErrNotValidator, }, { name: "message handled", validatorSet: testValidatorSet{ validators: validatorSet, }, - nodeID: nodeID, + nodeID: nodeID, + expected: true, }, } @@ -53,13 +54,19 @@ func TestValidatorHandlerAppGossip(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) + called := false handler := ValidatorHandler{ - Handler: NoOpHandler{}, + Handler: testHandler{ + appGossipF: func(context.Context, ids.NodeID, []byte) { + called = true + }, + }, ValidatorSet: tt.validatorSet, + Log: logging.NoLog{}, } - err := handler.AppGossip(context.Background(), tt.nodeID, []byte("foobar")) - require.ErrorIs(err, tt.expected) + handler.AppGossip(context.Background(), tt.nodeID, []byte("foobar")) + require.Equal(tt.expected, called) }) } } @@ -96,6 +103,7 @@ func TestValidatorHandlerAppRequest(t *testing.T) { handler := ValidatorHandler{ Handler: NoOpHandler{}, ValidatorSet: tt.validatorSet, + Log: logging.NoLog{}, } _, err := handler.AppRequest(context.Background(), tt.nodeID, time.Time{}, []byte("foobar")) diff --git a/network/p2p/mocks/mock_handler.go b/network/p2p/mocks/mock_handler.go index b15c77b79896..0d4147d23183 100644 --- a/network/p2p/mocks/mock_handler.go +++ b/network/p2p/mocks/mock_handler.go @@ -40,11 +40,9 @@ func (m *MockHandler) EXPECT() *MockHandlerMockRecorder { } // AppGossip mocks base method. -func (m *MockHandler) AppGossip(arg0 context.Context, arg1 ids.NodeID, arg2 []byte) error { +func (m *MockHandler) AppGossip(arg0 context.Context, arg1 ids.NodeID, arg2 []byte) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppGossip", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 + m.ctrl.Call(m, "AppGossip", arg0, arg1, arg2) } // AppGossip indicates an expected call of AppGossip. diff --git a/network/p2p/router.go b/network/p2p/router.go index bd1f334cda7c..b689b7ae1a17 100644 --- a/network/p2p/router.go +++ b/network/p2p/router.go @@ -49,6 +49,7 @@ type pendingCrossChainAppRequest struct { CrossChainAppResponseCallback } +// meteredHandler emits metrics for a Handler type meteredHandler struct { *responder *metrics @@ -173,8 +174,8 @@ func (r *Router) RegisterAppProtocol(handlerID uint64, handler Handler, nodeSamp r.handlers[handlerID] = &meteredHandler{ responder: &responder{ + Handler: handler, handlerID: handlerID, - handler: handler, log: r.log, sender: r.sender, }, @@ -198,6 +199,11 @@ func (r *Router) RegisterAppProtocol(handlerID uint64, handler Handler, nodeSamp }, nil } +// AppRequest routes an AppRequest to a Handler based on the handler prefix. The +// message is dropped if no matching handler can be found. +// +// Any error condition propagated outside Handler application logic is +// considered fatal func (r *Router) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { start := time.Now() parsedMsg, handler, ok := r.parse(request) @@ -212,6 +218,7 @@ func (r *Router) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID ui return nil } + // call the corresponding handler and send back a response to nodeID if err := handler.AppRequest(ctx, nodeID, requestID, deadline, parsedMsg); err != nil { return err } @@ -220,10 +227,16 @@ func (r *Router) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID ui return nil } +// AppRequestFailed routes an AppRequestFailed message to the callback +// corresponding to requestID. +// +// Any error condition propagated outside Handler application logic is +// considered fatal func (r *Router) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { start := time.Now() pending, ok := r.clearAppRequest(requestID) if !ok { + // we should never receive a timeout without a corresponding requestID return ErrUnrequestedResponse } @@ -232,10 +245,16 @@ func (r *Router) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, reques return nil } +// AppResponse routes an AppResponse message to the callback corresponding to +// requestID. +// +// Any error condition propagated outside Handler application logic is +// considered fatal func (r *Router) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { start := time.Now() pending, ok := r.clearAppRequest(requestID) if !ok { + // we should never receive a timeout without a corresponding requestID return ErrUnrequestedResponse } @@ -244,6 +263,11 @@ func (r *Router) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID u return nil } +// AppGossip routes an AppGossip message to a Handler based on the handler +// prefix. The message is dropped if no matching handler can be found. +// +// Any error condition propagated outside Handler application logic is +// considered fatal func (r *Router) AppGossip(ctx context.Context, nodeID ids.NodeID, gossip []byte) error { start := time.Now() parsedMsg, handler, ok := r.parse(gossip) @@ -256,14 +280,18 @@ func (r *Router) AppGossip(ctx context.Context, nodeID ids.NodeID, gossip []byte return nil } - if err := handler.AppGossip(ctx, nodeID, parsedMsg); err != nil { - return err - } + handler.AppGossip(ctx, nodeID, parsedMsg) handler.metrics.appGossipTime.Observe(float64(time.Since(start))) return nil } +// CrossChainAppRequest routes a CrossChainAppRequest message to a Handler +// based on the handler prefix. The message is dropped if no matching handler +// can be found. +// +// Any error condition propagated outside Handler application logic is +// considered fatal func (r *Router) CrossChainAppRequest( ctx context.Context, chainID ids.ID, @@ -292,10 +320,16 @@ func (r *Router) CrossChainAppRequest( return nil } +// CrossChainAppRequestFailed routes a CrossChainAppRequestFailed message to +// the callback corresponding to requestID. +// +// Any error condition propagated outside Handler application logic is +// considered fatal func (r *Router) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { start := time.Now() pending, ok := r.clearCrossChainAppRequest(requestID) if !ok { + // we should never receive a timeout without a corresponding requestID return ErrUnrequestedResponse } @@ -304,10 +338,16 @@ func (r *Router) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, return nil } +// CrossChainAppResponse routes a CrossChainAppResponse message to the callback +// corresponding to requestID. +// +// Any error condition propagated outside Handler application logic is +// considered fatal func (r *Router) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { start := time.Now() pending, ok := r.clearCrossChainAppRequest(requestID) if !ok { + // we should never receive a timeout without a corresponding requestID return ErrUnrequestedResponse } diff --git a/network/p2p/throttler_handler.go b/network/p2p/throttler_handler.go index e7b4d8f26082..4dd142c400c1 100644 --- a/network/p2p/throttler_handler.go +++ b/network/p2p/throttler_handler.go @@ -9,7 +9,10 @@ import ( "fmt" "time" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" ) var ( @@ -20,14 +23,16 @@ var ( type ThrottlerHandler struct { Handler Throttler Throttler + Log logging.Logger } -func (t ThrottlerHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) error { +func (t ThrottlerHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { if !t.Throttler.Handle(nodeID) { - return fmt.Errorf("dropping message from %s: %w", nodeID, ErrThrottled) + t.Log.Debug("dropping message", zap.Stringer("nodeID", nodeID)) + return } - return t.Handler.AppGossip(ctx, nodeID, gossipBytes) + t.Handler.AppGossip(ctx, nodeID, gossipBytes) } func (t ThrottlerHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { diff --git a/network/p2p/throttler_handler_test.go b/network/p2p/throttler_handler_test.go index af9c3fda7194..1e4ed9578bf6 100644 --- a/network/p2p/throttler_handler_test.go +++ b/network/p2p/throttler_handler_test.go @@ -11,34 +11,44 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" ) +var _ Handler = (*testHandler)(nil) + func TestThrottlerHandlerAppGossip(t *testing.T) { tests := []struct { - name string - Throttler Throttler - expectedErr error + name string + Throttler Throttler + expected bool }{ { - name: "throttled", + name: "not throttled", Throttler: NewSlidingWindowThrottler(time.Second, 1), + expected: true, }, { - name: "throttler errors", - Throttler: NewSlidingWindowThrottler(time.Second, 0), - expectedErr: ErrThrottled, + name: "throttled", + Throttler: NewSlidingWindowThrottler(time.Second, 0), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) + called := false handler := ThrottlerHandler{ - Handler: NoOpHandler{}, + Handler: testHandler{ + appGossipF: func(context.Context, ids.NodeID, []byte) { + called = true + }, + }, Throttler: tt.Throttler, + Log: logging.NoLog{}, } - err := handler.AppGossip(context.Background(), ids.GenerateTestNodeID(), []byte("foobar")) - require.ErrorIs(err, tt.expectedErr) + + handler.AppGossip(context.Background(), ids.GenerateTestNodeID(), []byte("foobar")) + require.Equal(tt.expected, called) }) } } @@ -50,11 +60,11 @@ func TestThrottlerHandlerAppRequest(t *testing.T) { expectedErr error }{ { - name: "throttled", + name: "not throttled", Throttler: NewSlidingWindowThrottler(time.Second, 1), }, { - name: "throttler errors", + name: "throttled", Throttler: NewSlidingWindowThrottler(time.Second, 0), expectedErr: ErrThrottled, }, @@ -66,9 +76,40 @@ func TestThrottlerHandlerAppRequest(t *testing.T) { handler := ThrottlerHandler{ Handler: NoOpHandler{}, Throttler: tt.Throttler, + Log: logging.NoLog{}, } _, err := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, []byte("foobar")) require.ErrorIs(err, tt.expectedErr) }) } } + +type testHandler struct { + appGossipF func(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) + appRequestF func(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) + crossChainAppRequestF func(ctx context.Context, chainID ids.ID, deadline time.Time, requestBytes []byte) ([]byte, error) +} + +func (t testHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { + if t.appGossipF == nil { + return + } + + t.appGossipF(ctx, nodeID, gossipBytes) +} + +func (t testHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { + if t.appRequestF == nil { + return nil, nil + } + + return t.appRequestF(ctx, nodeID, deadline, requestBytes) +} + +func (t testHandler) CrossChainAppRequest(ctx context.Context, chainID ids.ID, deadline time.Time, requestBytes []byte) ([]byte, error) { + if t.crossChainAppRequestF == nil { + return nil, nil + } + + return t.crossChainAppRequestF(ctx, chainID, deadline, requestBytes) +} diff --git a/network/peer/gossip_tracker_metrics.go b/network/peer/gossip_tracker_metrics.go index be167ebfec3d..e80f31765b9c 100644 --- a/network/peer/gossip_tracker_metrics.go +++ b/network/peer/gossip_tracker_metrics.go @@ -6,7 +6,7 @@ package peer import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) type gossipTrackerMetrics struct { @@ -32,11 +32,9 @@ func newGossipTrackerMetrics(registerer prometheus.Registerer, namespace string) ), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( registerer.Register(m.trackedPeersSize), registerer.Register(m.validatorsSize), ) - - return m, errs.Err + return m, err } diff --git a/network/peer/set_test.go b/network/peer/set_test.go index f26b1d19f8ec..fb67d25ef05f 100644 --- a/network/peer/set_test.go +++ b/network/peer/set_test.go @@ -18,24 +18,24 @@ func TestSet(t *testing.T) { set := NewSet() peer1 := &peer{ - id: ids.NodeID{0x01}, + id: ids.BuildTestNodeID([]byte{0x01}), observedUptimes: map[ids.ID]uint32{constants.PrimaryNetworkID: 0}, } updatedPeer1 := &peer{ - id: ids.NodeID{0x01}, + id: ids.BuildTestNodeID([]byte{0x01}), observedUptimes: map[ids.ID]uint32{constants.PrimaryNetworkID: 1}, } peer2 := &peer{ - id: ids.NodeID{0x02}, + id: ids.BuildTestNodeID([]byte{0x02}), } unknownPeer := &peer{ - id: ids.NodeID{0xff}, + id: ids.BuildTestNodeID([]byte{0xff}), } peer3 := &peer{ - id: ids.NodeID{0x03}, + id: ids.BuildTestNodeID([]byte{0x03}), } peer4 := &peer{ - id: ids.NodeID{0x04}, + id: ids.BuildTestNodeID([]byte{0x04}), } // add of first peer is handled @@ -105,10 +105,10 @@ func TestSetSample(t *testing.T) { set := NewSet() peer1 := &peer{ - id: ids.NodeID{0x01}, + id: ids.BuildTestNodeID([]byte{0x01}), } peer2 := &peer{ - id: ids.NodeID{0x02}, + id: ids.BuildTestNodeID([]byte{0x02}), } // Case: Empty diff --git a/network/peer/upgrader.go b/network/peer/upgrader.go index 47a6d0bc7c68..b601ee370947 100644 --- a/network/peer/upgrader.go +++ b/network/peer/upgrader.go @@ -60,12 +60,12 @@ func (t *tlsClientUpgrader) Upgrade(conn net.Conn) (ids.NodeID, net.Conn, *staki func connToIDAndCert(conn *tls.Conn, invalidCerts prometheus.Counter) (ids.NodeID, net.Conn, *staking.Certificate, error) { if err := conn.Handshake(); err != nil { - return ids.NodeID{}, nil, nil, err + return ids.EmptyNodeID, nil, nil, err } state := conn.ConnectionState() if len(state.PeerCertificates) == 0 { - return ids.NodeID{}, nil, nil, errNoCert + return ids.EmptyNodeID, nil, nil, errNoCert } tlsCert := state.PeerCertificates[0] @@ -75,7 +75,7 @@ func connToIDAndCert(conn *tls.Conn, invalidCerts prometheus.Counter) (ids.NodeI peerCert, err := staking.ParseCertificate(tlsCert.Raw) if err != nil { invalidCerts.Inc() - return ids.NodeID{}, nil, nil, err + return ids.EmptyNodeID, nil, nil, err } // We validate the certificate here to attempt to make the validity of the @@ -84,7 +84,7 @@ func connToIDAndCert(conn *tls.Conn, invalidCerts prometheus.Counter) (ids.NodeI // healthy. if err := staking.ValidateCertificate(peerCert); err != nil { invalidCerts.Inc() - return ids.NodeID{}, nil, nil, err + return ids.EmptyNodeID, nil, nil, err } nodeID := ids.NodeIDFromCert(peerCert) diff --git a/network/throttling/inbound_resource_throttler.go b/network/throttling/inbound_resource_throttler.go index a12e8562dde4..42873fe42d6a 100644 --- a/network/throttling/inbound_resource_throttler.go +++ b/network/throttling/inbound_resource_throttler.go @@ -13,8 +13,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" ) const epsilon = time.Millisecond @@ -80,13 +80,12 @@ func newSystemThrottlerMetrics(namespace string, reg prometheus.Registerer) (*sy Help: "Number of nodes we're waiting to read a message from because their usage is too high", }), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( reg.Register(m.totalWaits), reg.Register(m.totalNoWaits), reg.Register(m.awaitingAcquire), ) - return m, errs.Err + return m, err } func NewSystemThrottler( diff --git a/network/throttling/outbound_msg_throttler.go b/network/throttling/outbound_msg_throttler.go index 62e8821660bf..6f5ad24561f3 100644 --- a/network/throttling/outbound_msg_throttler.go +++ b/network/throttling/outbound_msg_throttler.go @@ -11,10 +11,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/wrappers" ) var ( @@ -204,15 +204,13 @@ func (m *outboundMsgThrottlerMetrics) initialize(namespace string, registerer pr Name: "throttler_outbound_awaiting_release", Help: "Number of messages waiting to be sent", }) - errs := wrappers.Errs{} - errs.Add( + return utils.Err( registerer.Register(m.acquireSuccesses), registerer.Register(m.acquireFailures), registerer.Register(m.remainingAtLargeBytes), registerer.Register(m.remainingVdrBytes), registerer.Register(m.awaitingRelease), ) - return errs.Err } func NewNoOutboundThrottler() OutboundMsgThrottler { diff --git a/node/config.go b/node/config.go index f78987822c33..736bf08b74dc 100644 --- a/node/config.go +++ b/node/config.go @@ -131,6 +131,9 @@ type BootstrapConfig struct { } type DatabaseConfig struct { + // If true, all writes are to memory and are discarded at node shutdown. + ReadOnly bool `json:"readOnly"` + // Path to database Path string `json:"path"` diff --git a/node/insecure_validator_manager.go b/node/insecure_validator_manager.go index bd69529619dc..d2cdab94cc89 100644 --- a/node/insecure_validator_manager.go +++ b/node/insecure_validator_manager.go @@ -27,7 +27,7 @@ func (i *insecureValidatorManager) Connected(vdrID ids.NodeID, nodeVersion *vers // peer as a validator. Because each validator needs a txID associated // with it, we hack one together by padding the nodeID with zeroes. dummyTxID := ids.Empty - copy(dummyTxID[:], vdrID[:]) + copy(dummyTxID[:], vdrID.Bytes()) err := i.vdrs.AddStaker(constants.PrimaryNetworkID, vdrID, nil, dummyTxID, i.weight) if err != nil { diff --git a/node/node.go b/node/node.go index 4200a33699ab..ba876d09b2d4 100644 --- a/node/node.go +++ b/node/node.go @@ -15,6 +15,7 @@ import ( "net" "os" "path/filepath" + "strconv" "sync" "time" @@ -37,9 +38,11 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/leveldb" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/meterdb" + "github.com/ava-labs/avalanchego/database/pebble" "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/indexer" @@ -71,7 +74,6 @@ import ( "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/avm" @@ -92,12 +94,164 @@ var ( genesisHashKey = []byte("genesisID") ungracefulShutdown = []byte("ungracefulShutdown") - indexerDBPrefix = []byte{0x00} + indexerDBPrefix = []byte{0x00} + keystoreDBPrefix = []byte("keystore") errInvalidTLSKey = errors.New("invalid TLS key") errShuttingDown = errors.New("server shutting down") ) +// New returns an instance of Node +func New( + config *Config, + logFactory logging.Factory, + logger logging.Logger, +) (*Node, error) { + tlsCert := config.StakingTLSCert.Leaf + stakingCert := staking.CertificateFromX509(tlsCert) + if err := staking.ValidateCertificate(stakingCert); err != nil { + return nil, fmt.Errorf("invalid staking certificate: %w", err) + } + + n := &Node{ + Log: logger, + LogFactory: logFactory, + ID: ids.NodeIDFromCert(stakingCert), + Config: config, + } + + n.DoneShuttingDown.Add(1) + + pop := signer.NewProofOfPossession(n.Config.StakingSigningKey) + logger.Info("initializing node", + zap.Stringer("version", version.CurrentApp), + zap.Stringer("nodeID", n.ID), + zap.Stringer("stakingKeyType", tlsCert.PublicKeyAlgorithm), + zap.Reflect("nodePOP", pop), + zap.Reflect("providedFlags", n.Config.ProvidedFlags), + zap.Reflect("config", n.Config), + ) + + var err error + n.VMFactoryLog, err = logFactory.Make("vm-factory") + if err != nil { + return nil, fmt.Errorf("problem creating vm logger: %w", err) + } + + n.VMManager = vms.NewManager(n.VMFactoryLog, config.VMAliaser) + + if err := n.initBootstrappers(); err != nil { // Configure the bootstrappers + return nil, fmt.Errorf("problem initializing node beacons: %w", err) + } + + // Set up tracer + n.tracer, err = trace.New(n.Config.TraceConfig) + if err != nil { + return nil, fmt.Errorf("couldn't initialize tracer: %w", err) + } + + if n.Config.TraceConfig.Enabled { + n.Config.ConsensusRouter = router.Trace(n.Config.ConsensusRouter, n.tracer) + } + + n.initMetrics() + + if err := n.initAPIServer(); err != nil { // Start the API Server + return nil, fmt.Errorf("couldn't initialize API server: %w", err) + } + + if err := n.initMetricsAPI(); err != nil { // Start the Metrics API + return nil, fmt.Errorf("couldn't initialize metrics API: %w", err) + } + + if err := n.initDatabase(); err != nil { // Set up the node's database + return nil, fmt.Errorf("problem initializing database: %w", err) + } + + if err := n.initKeystoreAPI(); err != nil { // Start the Keystore API + return nil, fmt.Errorf("couldn't initialize keystore API: %w", err) + } + + n.initSharedMemory() // Initialize shared memory + + // message.Creator is shared between networking, chainManager and the engine. + // It must be initiated before networking (initNetworking), chain manager (initChainManager) + // and the engine (initChains) but after the metrics (initMetricsAPI) + // message.Creator currently record metrics under network namespace + n.networkNamespace = "network" + n.msgCreator, err = message.NewCreator( + n.Log, + n.MetricsRegisterer, + n.networkNamespace, + n.Config.NetworkConfig.CompressionType, + n.Config.NetworkConfig.MaximumInboundMessageTimeout, + ) + if err != nil { + return nil, fmt.Errorf("problem initializing message creator: %w", err) + } + + n.vdrs = validators.NewManager() + if !n.Config.SybilProtectionEnabled { + n.vdrs = newOverriddenManager(constants.PrimaryNetworkID, n.vdrs) + } + if err := n.initResourceManager(n.MetricsRegisterer); err != nil { + return nil, fmt.Errorf("problem initializing resource manager: %w", err) + } + n.initCPUTargeter(&config.CPUTargeterConfig) + n.initDiskTargeter(&config.DiskTargeterConfig) + if err := n.initNetworking(); err != nil { // Set up networking layer. + return nil, fmt.Errorf("problem initializing networking: %w", err) + } + + n.initEventDispatchers() + + // Start the Health API + // Has to be initialized before chain manager + // [n.Net] must already be set + if err := n.initHealthAPI(); err != nil { + return nil, fmt.Errorf("couldn't initialize health API: %w", err) + } + if err := n.addDefaultVMAliases(); err != nil { + return nil, fmt.Errorf("couldn't initialize API aliases: %w", err) + } + if err := n.initChainManager(n.Config.AvaxAssetID); err != nil { // Set up the chain manager + return nil, fmt.Errorf("couldn't initialize chain manager: %w", err) + } + if err := n.initVMs(); err != nil { // Initialize the VM registry. + return nil, fmt.Errorf("couldn't initialize VM registry: %w", err) + } + if err := n.initAdminAPI(); err != nil { // Start the Admin API + return nil, fmt.Errorf("couldn't initialize admin API: %w", err) + } + if err := n.initInfoAPI(); err != nil { // Start the Info API + return nil, fmt.Errorf("couldn't initialize info API: %w", err) + } + if err := n.initIPCs(); err != nil { // Start the IPCs + return nil, fmt.Errorf("couldn't initialize IPCs: %w", err) + } + if err := n.initIPCAPI(); err != nil { // Start the IPC API + return nil, fmt.Errorf("couldn't initialize the IPC API: %w", err) + } + if err := n.initChainAliases(n.Config.GenesisBytes); err != nil { + return nil, fmt.Errorf("couldn't initialize chain aliases: %w", err) + } + if err := n.initAPIAliases(n.Config.GenesisBytes); err != nil { + return nil, fmt.Errorf("couldn't initialize API aliases: %w", err) + } + if err := n.initIndexer(); err != nil { + return nil, fmt.Errorf("couldn't initialize indexer: %w", err) + } + + n.health.Start(context.TODO(), n.Config.HealthCheckFreq) + n.initProfiler() + + // Start the Platform chain + if err := n.initChains(n.Config.GenesisBytes); err != nil { + return nil, fmt.Errorf("couldn't initialize chains: %w", err) + } + return n, nil +} + // Node is an instance of an Avalanche node. type Node struct { Log logging.Logger @@ -109,8 +263,7 @@ type Node struct { ID ids.NodeID // Storage for this node - DBManager manager.Manager - DB database.Database + DB database.Database // Profiles the process. Nil if continuous profiling is disabled. profiler profiler.ContinuousProfiler @@ -245,7 +398,7 @@ func (n *Node) initNetworking() error { // // 1: https://apple.stackexchange.com/questions/393715/do-you-want-the-application-main-to-accept-incoming-network-connections-pop // 2: https://github.com/golang/go/issues/56998 - listenAddress := net.JoinHostPort(n.Config.ListenHost, fmt.Sprintf("%d", currentIPPort.Port)) + listenAddress := net.JoinHostPort(n.Config.ListenHost, strconv.FormatUint(uint64(currentIPPort.Port), 10)) listener, err := net.Listen(constants.NetworkType, listenAddress) if err != nil { @@ -302,7 +455,7 @@ func (n *Node) initNetworking() error { // a validator. Because each validator needs a txID associated with it, // we hack one together by just padding our nodeID with zeroes. dummyTxID := ids.Empty - copy(dummyTxID[:], n.ID[:]) + copy(dummyTxID[:], n.ID.Bytes()) err := n.vdrs.AddStaker( constants.PrimaryNetworkID, @@ -500,41 +653,46 @@ func (n *Node) Dispatch() error { */ func (n *Node) initDatabase() error { - // start the db manager - var ( - dbManager manager.Manager - err error - ) + // start the db switch n.Config.DatabaseConfig.Name { case leveldb.Name: - dbManager, err = manager.NewLevelDB(n.Config.DatabaseConfig.Path, n.Config.DatabaseConfig.Config, n.Log, version.CurrentDatabase, "db_internal", n.MetricsRegisterer) + // Prior to v1.10.15, the only on-disk database was leveldb, and its + // files went to [dbPath]/[networkID]/v1.4.5. + dbPath := filepath.Join(n.Config.DatabaseConfig.Path, version.CurrentDatabase.String()) + var err error + n.DB, err = leveldb.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, "db_internal", n.MetricsRegisterer) + if err != nil { + return fmt.Errorf("couldn't create leveldb at %s: %w", dbPath, err) + } case memdb.Name: - dbManager = manager.NewMemDB(version.CurrentDatabase) + n.DB = memdb.New() + case pebble.Name: + dbPath := filepath.Join(n.Config.DatabaseConfig.Path, pebble.Name) + var err error + n.DB, err = pebble.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, "db_internal", n.MetricsRegisterer) + if err != nil { + return fmt.Errorf("couldn't create pebbledb at %s: %w", dbPath, err) + } default: - err = fmt.Errorf( - "db-type was %q but should have been one of {%s, %s}", + return fmt.Errorf( + "db-type was %q but should have been one of {%s, %s, %s}", n.Config.DatabaseConfig.Name, leveldb.Name, memdb.Name, + pebble.Name, ) } - if err != nil { - return err + + if n.Config.ReadOnly && n.Config.DatabaseConfig.Name != memdb.Name { + n.DB = versiondb.New(n.DB) } - meterDBManager, err := dbManager.NewMeterDBManager("db", n.MetricsRegisterer) + var err error + n.DB, err = meterdb.New("db", n.MetricsRegisterer, n.DB) if err != nil { return err } - n.DBManager = meterDBManager - - currentDB := dbManager.Current() - n.Log.Info("initializing database", - zap.Stringer("dbVersion", currentDB.Version), - ) - n.DB = currentDB.Database - rawExpectedGenesisHash := hashing.ComputeHash256(n.Config.GenesisBytes) rawGenesisHash, err := n.DB.Get(genesisHashKey) @@ -559,6 +717,10 @@ func (n *Node) initDatabase() error { return fmt.Errorf("db contains invalid genesis hash. DB Genesis: %s Generated Genesis: %s", genesisHash, expectedGenesisHash) } + n.Log.Info("initializing database", + zap.Stringer("genesisHash", genesisHash), + ) + ok, err := n.DB.Has(ungracefulShutdown) if err != nil { return fmt.Errorf("failed to read ungraceful shutdown key: %w", err) @@ -679,7 +841,7 @@ func (n *Node) initMetrics() { func (n *Node) initAPIServer() error { n.Log.Info("initializing API server") - listenAddress := net.JoinHostPort(n.Config.HTTPHost, fmt.Sprintf("%d", n.Config.HTTPPort)) + listenAddress := net.JoinHostPort(n.Config.HTTPHost, strconv.FormatUint(uint64(n.Config.HTTPPort), 10)) listener, err := net.Listen("tcp", listenAddress) if err != nil { return err @@ -830,7 +992,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { BlockAcceptorGroup: n.BlockAcceptorGroup, TxAcceptorGroup: n.TxAcceptorGroup, VertexAcceptorGroup: n.VertexAcceptorGroup, - DBManager: n.DBManager, + DB: n.DB, MsgCreator: n.msgCreator, Router: n.Config.ConsensusRouter, Net: n.Net, @@ -860,7 +1022,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { BootstrapAncestorsMaxContainersSent: n.Config.BootstrapAncestorsMaxContainersSent, BootstrapAncestorsMaxContainersReceived: n.Config.BootstrapAncestorsMaxContainersReceived, ApricotPhase4Time: version.GetApricotPhase4Time(n.Config.NetworkID), - ApricotPhase4MinPChainHeight: version.GetApricotPhase4MinPChainHeight(n.Config.NetworkID), + ApricotPhase4MinPChainHeight: version.ApricotPhase4MinPChainHeight[n.Config.NetworkID], ResourceTracker: n.resourceTracker, StateSyncBeacons: n.Config.StateSyncIDs, TracingEnabled: n.Config.TraceConfig.Enabled, @@ -894,8 +1056,7 @@ func (n *Node) initVMs() error { }) // Register the VMs that Avalanche supports - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( vmRegisterer.Register(context.TODO(), constants.PlatformVMID, &platformvm.Factory{ Config: platformconfig.Config{ Chains: n.chainManager, @@ -940,8 +1101,8 @@ func (n *Node) initVMs() error { n.VMManager.RegisterFactory(context.TODO(), nftfx.ID, &nftfx.Factory{}), n.VMManager.RegisterFactory(context.TODO(), propertyfx.ID, &propertyfx.Factory{}), ) - if errs.Errored() { - return errs.Err + if err != nil { + return err } // initialize vm runtime manager @@ -981,8 +1142,7 @@ func (n *Node) initSharedMemory() { // Assumes n.APIServer is already set func (n *Node) initKeystoreAPI() error { n.Log.Info("initializing keystore") - keystoreDB := n.DBManager.NewPrefixDBManager([]byte("keystore")) - n.keystore = keystore.New(n.Log, keystoreDB) + n.keystore = keystore.New(n.Log, prefixdb.New(keystoreDBPrefix, n.DB)) handler, err := n.keystore.CreateHandler() if err != nil { return err @@ -1332,154 +1492,6 @@ func (n *Node) initDiskTargeter( ) } -// Initialize this node -func (n *Node) Initialize( - config *Config, - logger logging.Logger, - logFactory logging.Factory, -) error { - tlsCert := config.StakingTLSCert.Leaf - stakingCert := staking.CertificateFromX509(tlsCert) - if err := staking.ValidateCertificate(stakingCert); err != nil { - return fmt.Errorf("invalid staking certificate: %w", err) - } - - n.Log = logger - n.Config = config - n.ID = ids.NodeIDFromCert(stakingCert) - n.LogFactory = logFactory - n.DoneShuttingDown.Add(1) - - pop := signer.NewProofOfPossession(n.Config.StakingSigningKey) - n.Log.Info("initializing node", - zap.Stringer("version", version.CurrentApp), - zap.Stringer("nodeID", n.ID), - zap.Stringer("stakingKeyType", tlsCert.PublicKeyAlgorithm), - zap.Reflect("nodePOP", pop), - zap.Reflect("providedFlags", n.Config.ProvidedFlags), - zap.Reflect("config", n.Config), - ) - - var err error - n.VMFactoryLog, err = logFactory.Make("vm-factory") - if err != nil { - return fmt.Errorf("problem creating vm logger: %w", err) - } - - n.VMManager = vms.NewManager(n.VMFactoryLog, config.VMAliaser) - - if err := n.initBootstrappers(); err != nil { // Configure the bootstrappers - return fmt.Errorf("problem initializing node beacons: %w", err) - } - - // Set up tracer - n.tracer, err = trace.New(n.Config.TraceConfig) - if err != nil { - return fmt.Errorf("couldn't initialize tracer: %w", err) - } - - if n.Config.TraceConfig.Enabled { - n.Config.ConsensusRouter = router.Trace(n.Config.ConsensusRouter, n.tracer) - } - - n.initMetrics() - - if err := n.initAPIServer(); err != nil { // Start the API Server - return fmt.Errorf("couldn't initialize API server: %w", err) - } - - if err := n.initMetricsAPI(); err != nil { // Start the Metrics API - return fmt.Errorf("couldn't initialize metrics API: %w", err) - } - - if err := n.initDatabase(); err != nil { // Set up the node's database - return fmt.Errorf("problem initializing database: %w", err) - } - - if err := n.initKeystoreAPI(); err != nil { // Start the Keystore API - return fmt.Errorf("couldn't initialize keystore API: %w", err) - } - - n.initSharedMemory() // Initialize shared memory - - // message.Creator is shared between networking, chainManager and the engine. - // It must be initiated before networking (initNetworking), chain manager (initChainManager) - // and the engine (initChains) but after the metrics (initMetricsAPI) - // message.Creator currently record metrics under network namespace - n.networkNamespace = "network" - n.msgCreator, err = message.NewCreator( - n.Log, - n.MetricsRegisterer, - n.networkNamespace, - n.Config.NetworkConfig.CompressionType, - n.Config.NetworkConfig.MaximumInboundMessageTimeout, - ) - if err != nil { - return fmt.Errorf("problem initializing message creator: %w", err) - } - - n.vdrs = validators.NewManager() - if !n.Config.SybilProtectionEnabled { - n.vdrs = newOverriddenManager(constants.PrimaryNetworkID, n.vdrs) - } - if err := n.initResourceManager(n.MetricsRegisterer); err != nil { - return fmt.Errorf("problem initializing resource manager: %w", err) - } - n.initCPUTargeter(&config.CPUTargeterConfig) - n.initDiskTargeter(&config.DiskTargeterConfig) - if err := n.initNetworking(); err != nil { // Set up networking layer. - return fmt.Errorf("problem initializing networking: %w", err) - } - - n.initEventDispatchers() - - // Start the Health API - // Has to be initialized before chain manager - // [n.Net] must already be set - if err := n.initHealthAPI(); err != nil { - return fmt.Errorf("couldn't initialize health API: %w", err) - } - if err := n.addDefaultVMAliases(); err != nil { - return fmt.Errorf("couldn't initialize API aliases: %w", err) - } - if err := n.initChainManager(n.Config.AvaxAssetID); err != nil { // Set up the chain manager - return fmt.Errorf("couldn't initialize chain manager: %w", err) - } - if err := n.initVMs(); err != nil { // Initialize the VM registry. - return fmt.Errorf("couldn't initialize VM registry: %w", err) - } - if err := n.initAdminAPI(); err != nil { // Start the Admin API - return fmt.Errorf("couldn't initialize admin API: %w", err) - } - if err := n.initInfoAPI(); err != nil { // Start the Info API - return fmt.Errorf("couldn't initialize info API: %w", err) - } - if err := n.initIPCs(); err != nil { // Start the IPCs - return fmt.Errorf("couldn't initialize IPCs: %w", err) - } - if err := n.initIPCAPI(); err != nil { // Start the IPC API - return fmt.Errorf("couldn't initialize the IPC API: %w", err) - } - if err := n.initChainAliases(n.Config.GenesisBytes); err != nil { - return fmt.Errorf("couldn't initialize chain aliases: %w", err) - } - if err := n.initAPIAliases(n.Config.GenesisBytes); err != nil { - return fmt.Errorf("couldn't initialize API aliases: %w", err) - } - if err := n.initIndexer(); err != nil { - return fmt.Errorf("couldn't initialize indexer: %w", err) - } - - n.health.Start(context.TODO(), n.Config.HealthCheckFreq) - n.initProfiler() - - // Start the Platform chain - if err := n.initChains(n.Config.GenesisBytes); err != nil { - return fmt.Errorf("couldn't initialize chains: %w", err) - } - return nil -} - // Shutdown this node // May be called multiple times func (n *Node) Shutdown(exitCode int) { @@ -1548,7 +1560,7 @@ func (n *Node) shutdown() { n.Log.Info("cleaning up plugin runtimes") n.runtimeManager.Stop(context.TODO()) - if n.DBManager != nil { + if n.DB != nil { if err := n.DB.Delete(ungracefulShutdown); err != nil { n.Log.Error( "failed to delete ungraceful shutdown key", @@ -1556,7 +1568,7 @@ func (n *Node) shutdown() { ) } - if err := n.DBManager.Close(); err != nil { + if err := n.DB.Close(); err != nil { n.Log.Warn("error during DB shutdown", zap.Error(err), ) diff --git a/proto/README.md b/proto/README.md index 51cf916dd7d8..34d4228571eb 100644 --- a/proto/README.md +++ b/proto/README.md @@ -1,6 +1,6 @@ # Avalanche gRPC -Now Serving: **Protocol Version 29** +Now Serving: **Protocol Version 30** Protobuf files are hosted at [https://buf.build/ava-labs/avalanche](https://buf.build/ava-labs/avalanche) and diff --git a/proto/p2p/p2p.proto b/proto/p2p/p2p.proto index 14d52ed9a203..4f37af848fdb 100644 --- a/proto/p2p/p2p.proto +++ b/proto/p2p/p2p.proto @@ -58,6 +58,7 @@ message Message { AppGossip app_gossip = 32; PeerListAck peer_list_ack = 33; + AppError app_error = 34; } } @@ -396,7 +397,8 @@ message Chits { // AppRequest is a VM-defined request. // -// Remote peers must respond to AppRequest with corresponding AppResponse +// Remote peers must respond to AppRequest with a corresponding AppResponse or +// AppError message AppRequest { // Chain being requested from bytes chain_id = 1; @@ -418,6 +420,18 @@ message AppResponse { bytes app_bytes = 3; } +// AppError is a VM-defined error sent in response to AppRequest +message AppError { + // Chain the message is for + bytes chain_id = 1; + // Request id of the original AppRequest + uint32 request_id = 2; + // VM defined error code. VMs may define error codes > 0. + sint32 error_code = 3; + // VM defined error message + string error_message = 4; +} + // AppGossip is a VM-defined message message AppGossip { // Chain the message is for diff --git a/proto/pb/p2p/p2p.pb.go b/proto/pb/p2p/p2p.pb.go index 9990de1f5c30..235637d1dd0c 100644 --- a/proto/pb/p2p/p2p.pb.go +++ b/proto/pb/p2p/p2p.pb.go @@ -109,6 +109,7 @@ type Message struct { // *Message_AppResponse // *Message_AppGossip // *Message_PeerListAck + // *Message_AppError Message isMessage_Message `protobuf_oneof:"message"` } @@ -326,6 +327,13 @@ func (x *Message) GetPeerListAck() *PeerListAck { return nil } +func (x *Message) GetAppError() *AppError { + if x, ok := x.GetMessage().(*Message_AppError); ok { + return x.AppError + } + return nil +} + type isMessage_Message interface { isMessage_Message() } @@ -441,6 +449,10 @@ type Message_PeerListAck struct { PeerListAck *PeerListAck `protobuf:"bytes,33,opt,name=peer_list_ack,json=peerListAck,proto3,oneof"` } +type Message_AppError struct { + AppError *AppError `protobuf:"bytes,34,opt,name=app_error,json=appError,proto3,oneof"` +} + func (*Message_CompressedGzip) isMessage_Message() {} func (*Message_CompressedZstd) isMessage_Message() {} @@ -491,6 +503,8 @@ func (*Message_AppGossip) isMessage_Message() {} func (*Message_PeerListAck) isMessage_Message() {} +func (*Message_AppError) isMessage_Message() {} + // Ping reports a peer's perceived uptime percentage. // // Peers should respond to Ping with a Pong. @@ -2308,7 +2322,8 @@ func (x *Chits) GetPreferredIdAtHeight() []byte { // AppRequest is a VM-defined request. // -// Remote peers must respond to AppRequest with corresponding AppResponse +// Remote peers must respond to AppRequest with a corresponding AppResponse or +// AppError type AppRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2451,6 +2466,82 @@ func (x *AppResponse) GetAppBytes() []byte { return nil } +// AppError is a VM-defined error sent in response to AppRequest +type AppError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Chain the message is for + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Request id of the original AppRequest + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // VM defined error code. VMs may define error codes > 0. + ErrorCode int32 `protobuf:"zigzag32,3,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // VM defined error message + ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *AppError) Reset() { + *x = AppError{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_p2p_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppError) ProtoMessage() {} + +func (x *AppError) ProtoReflect() protoreflect.Message { + mi := &file_p2p_p2p_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppError.ProtoReflect.Descriptor instead. +func (*AppError) Descriptor() ([]byte, []int) { + return file_p2p_p2p_proto_rawDescGZIP(), []int{27} +} + +func (x *AppError) GetChainId() []byte { + if x != nil { + return x.ChainId + } + return nil +} + +func (x *AppError) GetRequestId() uint32 { + if x != nil { + return x.RequestId + } + return 0 +} + +func (x *AppError) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *AppError) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + // AppGossip is a VM-defined message type AppGossip struct { state protoimpl.MessageState @@ -2466,7 +2557,7 @@ type AppGossip struct { func (x *AppGossip) Reset() { *x = AppGossip{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[27] + mi := &file_p2p_p2p_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2479,7 +2570,7 @@ func (x *AppGossip) String() string { func (*AppGossip) ProtoMessage() {} func (x *AppGossip) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[27] + mi := &file_p2p_p2p_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2492,7 +2583,7 @@ func (x *AppGossip) ProtoReflect() protoreflect.Message { // Deprecated: Use AppGossip.ProtoReflect.Descriptor instead. func (*AppGossip) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{27} + return file_p2p_p2p_proto_rawDescGZIP(), []int{28} } func (x *AppGossip) GetChainId() []byte { @@ -2513,7 +2604,7 @@ var File_p2p_p2p_proto protoreflect.FileDescriptor var file_p2p_p2p_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x70, 0x32, 0x70, 0x2f, 0x70, 0x32, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x03, 0x70, 0x32, 0x70, 0x22, 0xde, 0x0a, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x03, 0x70, 0x32, 0x70, 0x22, 0x8c, 0x0b, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x67, 0x7a, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x47, 0x7a, 0x69, 0x70, 0x12, 0x29, 0x0a, 0x0f, 0x63, @@ -2598,245 +2689,256 @@ var file_p2p_p2p_proto_rawDesc = []byte{ 0x69, 0x70, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x61, 0x63, 0x6b, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0b, 0x70, - 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x0a, - 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, - 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, - 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x70, 0x32, 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, - 0x52, 0x0d, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, - 0x43, 0x0a, 0x0c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, - 0x1b, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x08, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, - 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, - 0x74, 0x69, 0x6d, 0x65, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x6f, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, - 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, - 0x74, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x75, - 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, - 0x32, 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x52, - 0x0d, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, 0x9a, - 0x02, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x79, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x79, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, - 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, - 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x79, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x79, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, - 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x69, 0x67, 0x12, 0x27, 0x0a, - 0x0f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, - 0x18, 0x08, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x53, - 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22, 0x5e, 0x0a, 0x06, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, - 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, - 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x22, 0xbd, 0x01, 0x0a, 0x0d, - 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, - 0x10, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, - 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, - 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x08, 0x50, - 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x10, 0x63, 0x6c, 0x61, 0x69, 0x6d, - 0x65, 0x64, 0x5f, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, - 0x70, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, - 0x50, 0x6f, 0x72, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x41, 0x63, 0x6b, - 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x22, 0x3e, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, - 0x63, 0x6b, 0x12, 0x29, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x63, 0x6b, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x65, 0x65, 0x72, - 0x41, 0x63, 0x6b, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x41, 0x63, 0x6b, 0x73, 0x4a, 0x04, 0x08, - 0x01, 0x10, 0x02, 0x22, 0x6f, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, - 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, - 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, - 0x6c, 0x69, 0x6e, 0x65, 0x22, 0x6a, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, + 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x6b, 0x12, 0x2c, 0x0a, 0x09, 0x61, 0x70, + 0x70, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x08, + 0x61, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x75, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x75, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x32, + 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x0d, + 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, 0x43, 0x0a, + 0x0c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x08, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x6f, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x75, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x32, 0x70, + 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x73, + 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, 0x9a, 0x02, 0x0a, + 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x79, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x79, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, + 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, + 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x69, 0x67, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x74, + 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x08, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x53, 0x75, 0x62, + 0x6e, 0x65, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22, 0x5e, 0x0a, 0x06, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, + 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x22, 0xbd, 0x01, 0x0a, 0x0d, 0x43, 0x6c, + 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x78, + 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, + 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x08, 0x50, 0x65, 0x65, + 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x10, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, + 0x5f, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, + 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, + 0x72, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x41, 0x63, 0x6b, 0x12, 0x13, + 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, + 0x78, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x22, 0x3e, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x6b, + 0x12, 0x29, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x63, 0x6b, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x41, 0x63, + 0x6b, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x41, 0x63, 0x6b, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x22, 0x6f, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x22, 0x89, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, - 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, - 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x04, 0x52, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x22, 0x71, 0x0a, 0x14, - 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, - 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, + 0x6e, 0x65, 0x22, 0x6a, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x89, + 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x04, 0x52, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x22, 0x71, 0x0a, 0x14, 0x41, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x0a, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x49, 0x64, 0x73, 0x22, 0x9d, 0x01, + 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, + 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, + 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x30, 0x0a, 0x0b, 0x65, + 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x75, 0x0a, + 0x10, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, + 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x22, 0xba, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1f, - 0x0a, 0x0b, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x49, 0x64, 0x73, 0x22, - 0x9d, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, - 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x30, 0x0a, - 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, - 0x75, 0x0a, 0x10, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, - 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, + 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, + 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x22, 0x6f, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, + 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x22, 0xb9, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, + 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, + 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6b, + 0x0a, 0x09, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xb0, 0x01, 0x0a, 0x03, + 0x47, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, - 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, - 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xba, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x41, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, + 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, + 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x8f, + 0x01, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, - 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, - 0x73, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x22, 0x6f, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, - 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x22, 0xb9, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, - 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, - 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, - 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, + 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x30, + 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x22, 0x6b, 0x0a, 0x09, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, + 0x22, 0xdc, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, + 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, + 0xe1, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xb0, 0x01, - 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, - 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, - 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, - 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x22, 0x8f, 0x01, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, - 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x22, 0xdc, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, + 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x22, 0xba, 0x01, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x74, 0x73, 0x12, 0x19, 0x0a, + 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x66, 0x65, + 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x70, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0a, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x70, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x68, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x22, 0x7f, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, + 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x22, 0x64, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, - 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x22, 0xe1, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, - 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, - 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, - 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, - 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, - 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, - 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xba, 0x01, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x74, 0x73, 0x12, + 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, + 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, + 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x08, 0x41, 0x70, 0x70, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x11, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x22, 0x43, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x65, - 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0b, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, - 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x64, 0x12, 0x33, 0x0a, - 0x16, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x5f, 0x61, 0x74, - 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x22, 0x7f, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, - 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, - 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x09, 0x41, 0x70, 0x70, - 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x2a, 0x5d, - 0x0a, 0x0a, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, - 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x45, 0x4e, 0x47, - 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x56, 0x41, 0x4c, 0x41, 0x4e, 0x43, - 0x48, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4e, 0x4f, 0x57, 0x4d, 0x41, 0x4e, 0x10, 0x02, 0x42, 0x2e, 0x5a, - 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, - 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x70, 0x32, 0x70, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, + 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, + 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x2a, 0x5d, 0x0a, 0x0a, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x41, 0x56, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x48, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, + 0x13, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4e, 0x4f, + 0x57, 0x4d, 0x41, 0x4e, 0x10, 0x02, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x70, 0x62, 0x2f, 0x70, 0x32, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2852,7 +2954,7 @@ func file_p2p_p2p_proto_rawDescGZIP() []byte { } var file_p2p_p2p_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_p2p_p2p_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_p2p_p2p_proto_msgTypes = make([]protoimpl.MessageInfo, 29) var file_p2p_p2p_proto_goTypes = []interface{}{ (EngineType)(0), // 0: p2p.EngineType (*Message)(nil), // 1: p2p.Message @@ -2882,7 +2984,8 @@ var file_p2p_p2p_proto_goTypes = []interface{}{ (*Chits)(nil), // 25: p2p.Chits (*AppRequest)(nil), // 26: p2p.AppRequest (*AppResponse)(nil), // 27: p2p.AppResponse - (*AppGossip)(nil), // 28: p2p.AppGossip + (*AppError)(nil), // 28: p2p.AppError + (*AppGossip)(nil), // 29: p2p.AppGossip } var file_p2p_p2p_proto_depIdxs = []int32{ 2, // 0: p2p.Message.ping:type_name -> p2p.Ping @@ -2906,25 +3009,26 @@ var file_p2p_p2p_proto_depIdxs = []int32{ 25, // 18: p2p.Message.chits:type_name -> p2p.Chits 26, // 19: p2p.Message.app_request:type_name -> p2p.AppRequest 27, // 20: p2p.Message.app_response:type_name -> p2p.AppResponse - 28, // 21: p2p.Message.app_gossip:type_name -> p2p.AppGossip + 29, // 21: p2p.Message.app_gossip:type_name -> p2p.AppGossip 10, // 22: p2p.Message.peer_list_ack:type_name -> p2p.PeerListAck - 3, // 23: p2p.Ping.subnet_uptimes:type_name -> p2p.SubnetUptime - 3, // 24: p2p.Pong.subnet_uptimes:type_name -> p2p.SubnetUptime - 6, // 25: p2p.Version.client:type_name -> p2p.Client - 7, // 26: p2p.PeerList.claimed_ip_ports:type_name -> p2p.ClaimedIpPort - 9, // 27: p2p.PeerListAck.peer_acks:type_name -> p2p.PeerAck - 0, // 28: p2p.GetAcceptedFrontier.engine_type:type_name -> p2p.EngineType - 0, // 29: p2p.GetAccepted.engine_type:type_name -> p2p.EngineType - 0, // 30: p2p.GetAncestors.engine_type:type_name -> p2p.EngineType - 0, // 31: p2p.Get.engine_type:type_name -> p2p.EngineType - 0, // 32: p2p.Put.engine_type:type_name -> p2p.EngineType - 0, // 33: p2p.PushQuery.engine_type:type_name -> p2p.EngineType - 0, // 34: p2p.PullQuery.engine_type:type_name -> p2p.EngineType - 35, // [35:35] is the sub-list for method output_type - 35, // [35:35] is the sub-list for method input_type - 35, // [35:35] is the sub-list for extension type_name - 35, // [35:35] is the sub-list for extension extendee - 0, // [0:35] is the sub-list for field type_name + 28, // 23: p2p.Message.app_error:type_name -> p2p.AppError + 3, // 24: p2p.Ping.subnet_uptimes:type_name -> p2p.SubnetUptime + 3, // 25: p2p.Pong.subnet_uptimes:type_name -> p2p.SubnetUptime + 6, // 26: p2p.Version.client:type_name -> p2p.Client + 7, // 27: p2p.PeerList.claimed_ip_ports:type_name -> p2p.ClaimedIpPort + 9, // 28: p2p.PeerListAck.peer_acks:type_name -> p2p.PeerAck + 0, // 29: p2p.GetAcceptedFrontier.engine_type:type_name -> p2p.EngineType + 0, // 30: p2p.GetAccepted.engine_type:type_name -> p2p.EngineType + 0, // 31: p2p.GetAncestors.engine_type:type_name -> p2p.EngineType + 0, // 32: p2p.Get.engine_type:type_name -> p2p.EngineType + 0, // 33: p2p.Put.engine_type:type_name -> p2p.EngineType + 0, // 34: p2p.PushQuery.engine_type:type_name -> p2p.EngineType + 0, // 35: p2p.PullQuery.engine_type:type_name -> p2p.EngineType + 36, // [36:36] is the sub-list for method output_type + 36, // [36:36] is the sub-list for method input_type + 36, // [36:36] is the sub-list for extension type_name + 36, // [36:36] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name } func init() { file_p2p_p2p_proto_init() } @@ -3258,6 +3362,18 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_p2p_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppGossip); i { case 0: return &v.state @@ -3296,6 +3412,7 @@ func file_p2p_p2p_proto_init() { (*Message_AppResponse)(nil), (*Message_AppGossip)(nil), (*Message_PeerListAck)(nil), + (*Message_AppError)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -3303,7 +3420,7 @@ func file_p2p_p2p_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_p2p_p2p_proto_rawDesc, NumEnums: 1, - NumMessages: 28, + NumMessages: 29, NumExtensions: 0, NumServices: 0, }, diff --git a/proto/pb/sync/sync.pb.go b/proto/pb/sync/sync.pb.go index 92cd3d88351e..eb72e145420a 100644 --- a/proto/pb/sync/sync.pb.go +++ b/proto/pb/sync/sync.pb.go @@ -1666,44 +1666,47 @@ var file_sync_sync_proto_rawDesc = []byte{ 0x4e, 0x6f, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x32, 0x8a, 0x04, 0x0a, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x32, 0xc3, 0x04, 0x0a, 0x02, 0x44, 0x42, 0x12, 0x44, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x15, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x79, - 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1b, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x54, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x2e, 0x73, 0x79, - 0x6e, 0x63, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1a, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1b, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, - 0x10, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, - 0x66, 0x12, 0x1d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, - 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x05, 0x43, 0x6c, 0x65, 0x61, + 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x39, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x15, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0e, + 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1b, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x73, 0x79, + 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x11, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x4b, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x0d, + 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1a, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x79, 0x6e, 0x63, + 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1d, 0x2e, 0x73, 0x79, 0x6e, + 0x63, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x79, + 0x6e, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1782,21 +1785,23 @@ var file_sync_sync_proto_depIdxs = []int32{ 23, // 32: sync.ProofNode.children:type_name -> sync.ProofNode.ChildrenEntry 21, // 33: sync.KeyChange.value:type_name -> sync.MaybeBytes 24, // 34: sync.DB.GetMerkleRoot:input_type -> google.protobuf.Empty - 2, // 35: sync.DB.GetProof:input_type -> sync.GetProofRequest - 7, // 36: sync.DB.GetChangeProof:input_type -> sync.GetChangeProofRequest - 9, // 37: sync.DB.VerifyChangeProof:input_type -> sync.VerifyChangeProofRequest - 11, // 38: sync.DB.CommitChangeProof:input_type -> sync.CommitChangeProofRequest - 13, // 39: sync.DB.GetRangeProof:input_type -> sync.GetRangeProofRequest - 15, // 40: sync.DB.CommitRangeProof:input_type -> sync.CommitRangeProofRequest - 1, // 41: sync.DB.GetMerkleRoot:output_type -> sync.GetMerkleRootResponse - 3, // 42: sync.DB.GetProof:output_type -> sync.GetProofResponse - 8, // 43: sync.DB.GetChangeProof:output_type -> sync.GetChangeProofResponse - 10, // 44: sync.DB.VerifyChangeProof:output_type -> sync.VerifyChangeProofResponse - 24, // 45: sync.DB.CommitChangeProof:output_type -> google.protobuf.Empty - 14, // 46: sync.DB.GetRangeProof:output_type -> sync.GetRangeProofResponse - 24, // 47: sync.DB.CommitRangeProof:output_type -> google.protobuf.Empty - 41, // [41:48] is the sub-list for method output_type - 34, // [34:41] is the sub-list for method input_type + 24, // 35: sync.DB.Clear:input_type -> google.protobuf.Empty + 2, // 36: sync.DB.GetProof:input_type -> sync.GetProofRequest + 7, // 37: sync.DB.GetChangeProof:input_type -> sync.GetChangeProofRequest + 9, // 38: sync.DB.VerifyChangeProof:input_type -> sync.VerifyChangeProofRequest + 11, // 39: sync.DB.CommitChangeProof:input_type -> sync.CommitChangeProofRequest + 13, // 40: sync.DB.GetRangeProof:input_type -> sync.GetRangeProofRequest + 15, // 41: sync.DB.CommitRangeProof:input_type -> sync.CommitRangeProofRequest + 1, // 42: sync.DB.GetMerkleRoot:output_type -> sync.GetMerkleRootResponse + 24, // 43: sync.DB.Clear:output_type -> google.protobuf.Empty + 3, // 44: sync.DB.GetProof:output_type -> sync.GetProofResponse + 8, // 45: sync.DB.GetChangeProof:output_type -> sync.GetChangeProofResponse + 10, // 46: sync.DB.VerifyChangeProof:output_type -> sync.VerifyChangeProofResponse + 24, // 47: sync.DB.CommitChangeProof:output_type -> google.protobuf.Empty + 14, // 48: sync.DB.GetRangeProof:output_type -> sync.GetRangeProofResponse + 24, // 49: sync.DB.CommitRangeProof:output_type -> google.protobuf.Empty + 42, // [42:50] is the sub-list for method output_type + 34, // [34:42] is the sub-list for method input_type 34, // [34:34] is the sub-list for extension type_name 34, // [34:34] is the sub-list for extension extendee 0, // [0:34] is the sub-list for field type_name diff --git a/proto/pb/sync/sync_grpc.pb.go b/proto/pb/sync/sync_grpc.pb.go index 3fb420c7273a..5f79687b4d01 100644 --- a/proto/pb/sync/sync_grpc.pb.go +++ b/proto/pb/sync/sync_grpc.pb.go @@ -21,6 +21,7 @@ const _ = grpc.SupportPackageIsVersion7 const ( DB_GetMerkleRoot_FullMethodName = "/sync.DB/GetMerkleRoot" + DB_Clear_FullMethodName = "/sync.DB/Clear" DB_GetProof_FullMethodName = "/sync.DB/GetProof" DB_GetChangeProof_FullMethodName = "/sync.DB/GetChangeProof" DB_VerifyChangeProof_FullMethodName = "/sync.DB/VerifyChangeProof" @@ -34,6 +35,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type DBClient interface { GetMerkleRoot(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMerkleRootResponse, error) + Clear(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) GetProof(ctx context.Context, in *GetProofRequest, opts ...grpc.CallOption) (*GetProofResponse, error) GetChangeProof(ctx context.Context, in *GetChangeProofRequest, opts ...grpc.CallOption) (*GetChangeProofResponse, error) VerifyChangeProof(ctx context.Context, in *VerifyChangeProofRequest, opts ...grpc.CallOption) (*VerifyChangeProofResponse, error) @@ -59,6 +61,15 @@ func (c *dBClient) GetMerkleRoot(ctx context.Context, in *emptypb.Empty, opts .. return out, nil } +func (c *dBClient) Clear(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, DB_Clear_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *dBClient) GetProof(ctx context.Context, in *GetProofRequest, opts ...grpc.CallOption) (*GetProofResponse, error) { out := new(GetProofResponse) err := c.cc.Invoke(ctx, DB_GetProof_FullMethodName, in, out, opts...) @@ -118,6 +129,7 @@ func (c *dBClient) CommitRangeProof(ctx context.Context, in *CommitRangeProofReq // for forward compatibility type DBServer interface { GetMerkleRoot(context.Context, *emptypb.Empty) (*GetMerkleRootResponse, error) + Clear(context.Context, *emptypb.Empty) (*emptypb.Empty, error) GetProof(context.Context, *GetProofRequest) (*GetProofResponse, error) GetChangeProof(context.Context, *GetChangeProofRequest) (*GetChangeProofResponse, error) VerifyChangeProof(context.Context, *VerifyChangeProofRequest) (*VerifyChangeProofResponse, error) @@ -134,6 +146,9 @@ type UnimplementedDBServer struct { func (UnimplementedDBServer) GetMerkleRoot(context.Context, *emptypb.Empty) (*GetMerkleRootResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetMerkleRoot not implemented") } +func (UnimplementedDBServer) Clear(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Clear not implemented") +} func (UnimplementedDBServer) GetProof(context.Context, *GetProofRequest) (*GetProofResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetProof not implemented") } @@ -183,6 +198,24 @@ func _DB_GetMerkleRoot_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } +func _DB_Clear_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Clear(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DB_Clear_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Clear(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + func _DB_GetProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetProofRequest) if err := dec(in); err != nil { @@ -302,6 +335,10 @@ var DB_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetMerkleRoot", Handler: _DB_GetMerkleRoot_Handler, }, + { + MethodName: "Clear", + Handler: _DB_Clear_Handler, + }, { MethodName: "GetProof", Handler: _DB_GetProof_Handler, diff --git a/proto/pb/vm/vm.pb.go b/proto/pb/vm/vm.pb.go index 9bb4f759ff9d..ebc64f5c3a48 100644 --- a/proto/pb/vm/vm.pb.go +++ b/proto/pb/vm/vm.pb.go @@ -232,7 +232,7 @@ func (x StateSummaryAcceptResponse_Mode) Number() protoreflect.EnumNumber { // Deprecated: Use StateSummaryAcceptResponse_Mode.Descriptor instead. func (StateSummaryAcceptResponse_Mode) EnumDescriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{46, 0} + return file_vm_vm_proto_rawDescGZIP(), []int{45, 0} } type InitializeRequest struct { @@ -246,15 +246,15 @@ type InitializeRequest struct { NodeId []byte `protobuf:"bytes,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` // public_key is the BLS public key that would correspond with any signatures // produced by the warp messaging signer - PublicKey []byte `protobuf:"bytes,5,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` - XChainId []byte `protobuf:"bytes,6,opt,name=x_chain_id,json=xChainId,proto3" json:"x_chain_id,omitempty"` - CChainId []byte `protobuf:"bytes,7,opt,name=c_chain_id,json=cChainId,proto3" json:"c_chain_id,omitempty"` - AvaxAssetId []byte `protobuf:"bytes,8,opt,name=avax_asset_id,json=avaxAssetId,proto3" json:"avax_asset_id,omitempty"` - ChainDataDir string `protobuf:"bytes,9,opt,name=chain_data_dir,json=chainDataDir,proto3" json:"chain_data_dir,omitempty"` - GenesisBytes []byte `protobuf:"bytes,10,opt,name=genesis_bytes,json=genesisBytes,proto3" json:"genesis_bytes,omitempty"` - UpgradeBytes []byte `protobuf:"bytes,11,opt,name=upgrade_bytes,json=upgradeBytes,proto3" json:"upgrade_bytes,omitempty"` - ConfigBytes []byte `protobuf:"bytes,12,opt,name=config_bytes,json=configBytes,proto3" json:"config_bytes,omitempty"` - DbServers []*VersionedDBServer `protobuf:"bytes,13,rep,name=db_servers,json=dbServers,proto3" json:"db_servers,omitempty"` + PublicKey []byte `protobuf:"bytes,5,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + XChainId []byte `protobuf:"bytes,6,opt,name=x_chain_id,json=xChainId,proto3" json:"x_chain_id,omitempty"` + CChainId []byte `protobuf:"bytes,7,opt,name=c_chain_id,json=cChainId,proto3" json:"c_chain_id,omitempty"` + AvaxAssetId []byte `protobuf:"bytes,8,opt,name=avax_asset_id,json=avaxAssetId,proto3" json:"avax_asset_id,omitempty"` + ChainDataDir string `protobuf:"bytes,9,opt,name=chain_data_dir,json=chainDataDir,proto3" json:"chain_data_dir,omitempty"` + GenesisBytes []byte `protobuf:"bytes,10,opt,name=genesis_bytes,json=genesisBytes,proto3" json:"genesis_bytes,omitempty"` + UpgradeBytes []byte `protobuf:"bytes,11,opt,name=upgrade_bytes,json=upgradeBytes,proto3" json:"upgrade_bytes,omitempty"` + ConfigBytes []byte `protobuf:"bytes,12,opt,name=config_bytes,json=configBytes,proto3" json:"config_bytes,omitempty"` + DbServerAddr string `protobuf:"bytes,13,opt,name=db_server_addr,json=dbServerAddr,proto3" json:"db_server_addr,omitempty"` // server_addr is the address of the gRPC server which serves // the messenger, keystore, shared memory, blockchain alias, // subnet alias, and appSender services @@ -377,11 +377,11 @@ func (x *InitializeRequest) GetConfigBytes() []byte { return nil } -func (x *InitializeRequest) GetDbServers() []*VersionedDBServer { +func (x *InitializeRequest) GetDbServerAddr() string { if x != nil { - return x.DbServers + return x.DbServerAddr } - return nil + return "" } func (x *InitializeRequest) GetServerAddr() string { @@ -470,63 +470,6 @@ func (x *InitializeResponse) GetTimestamp() *timestamppb.Timestamp { return nil } -type VersionedDBServer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - // server_addr is the address of the gRPC server which serves the - // Database service - ServerAddr string `protobuf:"bytes,2,opt,name=server_addr,json=serverAddr,proto3" json:"server_addr,omitempty"` -} - -func (x *VersionedDBServer) Reset() { - *x = VersionedDBServer{} - if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *VersionedDBServer) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VersionedDBServer) ProtoMessage() {} - -func (x *VersionedDBServer) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VersionedDBServer.ProtoReflect.Descriptor instead. -func (*VersionedDBServer) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{2} -} - -func (x *VersionedDBServer) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *VersionedDBServer) GetServerAddr() string { - if x != nil { - return x.ServerAddr - } - return "" -} - type SetStateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -538,7 +481,7 @@ type SetStateRequest struct { func (x *SetStateRequest) Reset() { *x = SetStateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[3] + mi := &file_vm_vm_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -551,7 +494,7 @@ func (x *SetStateRequest) String() string { func (*SetStateRequest) ProtoMessage() {} func (x *SetStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[3] + mi := &file_vm_vm_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -564,7 +507,7 @@ func (x *SetStateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetStateRequest.ProtoReflect.Descriptor instead. func (*SetStateRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{3} + return file_vm_vm_proto_rawDescGZIP(), []int{2} } func (x *SetStateRequest) GetState() State { @@ -589,7 +532,7 @@ type SetStateResponse struct { func (x *SetStateResponse) Reset() { *x = SetStateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[4] + mi := &file_vm_vm_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -602,7 +545,7 @@ func (x *SetStateResponse) String() string { func (*SetStateResponse) ProtoMessage() {} func (x *SetStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[4] + mi := &file_vm_vm_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -615,7 +558,7 @@ func (x *SetStateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SetStateResponse.ProtoReflect.Descriptor instead. func (*SetStateResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{4} + return file_vm_vm_proto_rawDescGZIP(), []int{3} } func (x *SetStateResponse) GetLastAcceptedId() []byte { @@ -664,7 +607,7 @@ type CreateHandlersResponse struct { func (x *CreateHandlersResponse) Reset() { *x = CreateHandlersResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[5] + mi := &file_vm_vm_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -677,7 +620,7 @@ func (x *CreateHandlersResponse) String() string { func (*CreateHandlersResponse) ProtoMessage() {} func (x *CreateHandlersResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[5] + mi := &file_vm_vm_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -690,7 +633,7 @@ func (x *CreateHandlersResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateHandlersResponse.ProtoReflect.Descriptor instead. func (*CreateHandlersResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{5} + return file_vm_vm_proto_rawDescGZIP(), []int{4} } func (x *CreateHandlersResponse) GetHandlers() []*Handler { @@ -711,7 +654,7 @@ type CreateStaticHandlersResponse struct { func (x *CreateStaticHandlersResponse) Reset() { *x = CreateStaticHandlersResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[6] + mi := &file_vm_vm_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -724,7 +667,7 @@ func (x *CreateStaticHandlersResponse) String() string { func (*CreateStaticHandlersResponse) ProtoMessage() {} func (x *CreateStaticHandlersResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[6] + mi := &file_vm_vm_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -737,7 +680,7 @@ func (x *CreateStaticHandlersResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateStaticHandlersResponse.ProtoReflect.Descriptor instead. func (*CreateStaticHandlersResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{6} + return file_vm_vm_proto_rawDescGZIP(), []int{5} } func (x *CreateStaticHandlersResponse) GetHandlers() []*Handler { @@ -761,7 +704,7 @@ type Handler struct { func (x *Handler) Reset() { *x = Handler{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[7] + mi := &file_vm_vm_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -774,7 +717,7 @@ func (x *Handler) String() string { func (*Handler) ProtoMessage() {} func (x *Handler) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[7] + mi := &file_vm_vm_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -787,7 +730,7 @@ func (x *Handler) ProtoReflect() protoreflect.Message { // Deprecated: Use Handler.ProtoReflect.Descriptor instead. func (*Handler) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{7} + return file_vm_vm_proto_rawDescGZIP(), []int{6} } func (x *Handler) GetPrefix() string { @@ -815,7 +758,7 @@ type BuildBlockRequest struct { func (x *BuildBlockRequest) Reset() { *x = BuildBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[8] + mi := &file_vm_vm_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -828,7 +771,7 @@ func (x *BuildBlockRequest) String() string { func (*BuildBlockRequest) ProtoMessage() {} func (x *BuildBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[8] + mi := &file_vm_vm_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -841,7 +784,7 @@ func (x *BuildBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BuildBlockRequest.ProtoReflect.Descriptor instead. func (*BuildBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{8} + return file_vm_vm_proto_rawDescGZIP(), []int{7} } func (x *BuildBlockRequest) GetPChainHeight() uint64 { @@ -868,7 +811,7 @@ type BuildBlockResponse struct { func (x *BuildBlockResponse) Reset() { *x = BuildBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[9] + mi := &file_vm_vm_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -881,7 +824,7 @@ func (x *BuildBlockResponse) String() string { func (*BuildBlockResponse) ProtoMessage() {} func (x *BuildBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[9] + mi := &file_vm_vm_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -894,7 +837,7 @@ func (x *BuildBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BuildBlockResponse.ProtoReflect.Descriptor instead. func (*BuildBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{9} + return file_vm_vm_proto_rawDescGZIP(), []int{8} } func (x *BuildBlockResponse) GetId() []byte { @@ -950,7 +893,7 @@ type ParseBlockRequest struct { func (x *ParseBlockRequest) Reset() { *x = ParseBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[10] + mi := &file_vm_vm_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -963,7 +906,7 @@ func (x *ParseBlockRequest) String() string { func (*ParseBlockRequest) ProtoMessage() {} func (x *ParseBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[10] + mi := &file_vm_vm_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -976,7 +919,7 @@ func (x *ParseBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseBlockRequest.ProtoReflect.Descriptor instead. func (*ParseBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{10} + return file_vm_vm_proto_rawDescGZIP(), []int{9} } func (x *ParseBlockRequest) GetBytes() []byte { @@ -1002,7 +945,7 @@ type ParseBlockResponse struct { func (x *ParseBlockResponse) Reset() { *x = ParseBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[11] + mi := &file_vm_vm_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1015,7 +958,7 @@ func (x *ParseBlockResponse) String() string { func (*ParseBlockResponse) ProtoMessage() {} func (x *ParseBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[11] + mi := &file_vm_vm_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1028,7 +971,7 @@ func (x *ParseBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseBlockResponse.ProtoReflect.Descriptor instead. func (*ParseBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{11} + return file_vm_vm_proto_rawDescGZIP(), []int{10} } func (x *ParseBlockResponse) GetId() []byte { @@ -1084,7 +1027,7 @@ type GetBlockRequest struct { func (x *GetBlockRequest) Reset() { *x = GetBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[12] + mi := &file_vm_vm_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1097,7 +1040,7 @@ func (x *GetBlockRequest) String() string { func (*GetBlockRequest) ProtoMessage() {} func (x *GetBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[12] + mi := &file_vm_vm_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1110,7 +1053,7 @@ func (x *GetBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockRequest.ProtoReflect.Descriptor instead. func (*GetBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{12} + return file_vm_vm_proto_rawDescGZIP(), []int{11} } func (x *GetBlockRequest) GetId() []byte { @@ -1138,7 +1081,7 @@ type GetBlockResponse struct { func (x *GetBlockResponse) Reset() { *x = GetBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[13] + mi := &file_vm_vm_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1151,7 +1094,7 @@ func (x *GetBlockResponse) String() string { func (*GetBlockResponse) ProtoMessage() {} func (x *GetBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[13] + mi := &file_vm_vm_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1164,7 +1107,7 @@ func (x *GetBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockResponse.ProtoReflect.Descriptor instead. func (*GetBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{13} + return file_vm_vm_proto_rawDescGZIP(), []int{12} } func (x *GetBlockResponse) GetParentId() []byte { @@ -1227,7 +1170,7 @@ type SetPreferenceRequest struct { func (x *SetPreferenceRequest) Reset() { *x = SetPreferenceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[14] + mi := &file_vm_vm_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1240,7 +1183,7 @@ func (x *SetPreferenceRequest) String() string { func (*SetPreferenceRequest) ProtoMessage() {} func (x *SetPreferenceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[14] + mi := &file_vm_vm_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1253,7 +1196,7 @@ func (x *SetPreferenceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetPreferenceRequest.ProtoReflect.Descriptor instead. func (*SetPreferenceRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{14} + return file_vm_vm_proto_rawDescGZIP(), []int{13} } func (x *SetPreferenceRequest) GetId() []byte { @@ -1277,7 +1220,7 @@ type BlockVerifyRequest struct { func (x *BlockVerifyRequest) Reset() { *x = BlockVerifyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[15] + mi := &file_vm_vm_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1290,7 +1233,7 @@ func (x *BlockVerifyRequest) String() string { func (*BlockVerifyRequest) ProtoMessage() {} func (x *BlockVerifyRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[15] + mi := &file_vm_vm_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1303,7 +1246,7 @@ func (x *BlockVerifyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockVerifyRequest.ProtoReflect.Descriptor instead. func (*BlockVerifyRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{15} + return file_vm_vm_proto_rawDescGZIP(), []int{14} } func (x *BlockVerifyRequest) GetBytes() []byte { @@ -1331,7 +1274,7 @@ type BlockVerifyResponse struct { func (x *BlockVerifyResponse) Reset() { *x = BlockVerifyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[16] + mi := &file_vm_vm_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1344,7 +1287,7 @@ func (x *BlockVerifyResponse) String() string { func (*BlockVerifyResponse) ProtoMessage() {} func (x *BlockVerifyResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[16] + mi := &file_vm_vm_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1357,7 +1300,7 @@ func (x *BlockVerifyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockVerifyResponse.ProtoReflect.Descriptor instead. func (*BlockVerifyResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{16} + return file_vm_vm_proto_rawDescGZIP(), []int{15} } func (x *BlockVerifyResponse) GetTimestamp() *timestamppb.Timestamp { @@ -1378,7 +1321,7 @@ type BlockAcceptRequest struct { func (x *BlockAcceptRequest) Reset() { *x = BlockAcceptRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[17] + mi := &file_vm_vm_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1391,7 +1334,7 @@ func (x *BlockAcceptRequest) String() string { func (*BlockAcceptRequest) ProtoMessage() {} func (x *BlockAcceptRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[17] + mi := &file_vm_vm_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1404,7 +1347,7 @@ func (x *BlockAcceptRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockAcceptRequest.ProtoReflect.Descriptor instead. func (*BlockAcceptRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{17} + return file_vm_vm_proto_rawDescGZIP(), []int{16} } func (x *BlockAcceptRequest) GetId() []byte { @@ -1425,7 +1368,7 @@ type BlockRejectRequest struct { func (x *BlockRejectRequest) Reset() { *x = BlockRejectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[18] + mi := &file_vm_vm_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1438,7 +1381,7 @@ func (x *BlockRejectRequest) String() string { func (*BlockRejectRequest) ProtoMessage() {} func (x *BlockRejectRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[18] + mi := &file_vm_vm_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1451,7 +1394,7 @@ func (x *BlockRejectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockRejectRequest.ProtoReflect.Descriptor instead. func (*BlockRejectRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{18} + return file_vm_vm_proto_rawDescGZIP(), []int{17} } func (x *BlockRejectRequest) GetId() []byte { @@ -1472,7 +1415,7 @@ type HealthResponse struct { func (x *HealthResponse) Reset() { *x = HealthResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[19] + mi := &file_vm_vm_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1485,7 +1428,7 @@ func (x *HealthResponse) String() string { func (*HealthResponse) ProtoMessage() {} func (x *HealthResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[19] + mi := &file_vm_vm_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1498,7 +1441,7 @@ func (x *HealthResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead. func (*HealthResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{19} + return file_vm_vm_proto_rawDescGZIP(), []int{18} } func (x *HealthResponse) GetDetails() []byte { @@ -1519,7 +1462,7 @@ type VersionResponse struct { func (x *VersionResponse) Reset() { *x = VersionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[20] + mi := &file_vm_vm_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1532,7 +1475,7 @@ func (x *VersionResponse) String() string { func (*VersionResponse) ProtoMessage() {} func (x *VersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[20] + mi := &file_vm_vm_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1545,7 +1488,7 @@ func (x *VersionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VersionResponse.ProtoReflect.Descriptor instead. func (*VersionResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{20} + return file_vm_vm_proto_rawDescGZIP(), []int{19} } func (x *VersionResponse) GetVersion() string { @@ -1573,7 +1516,7 @@ type AppRequestMsg struct { func (x *AppRequestMsg) Reset() { *x = AppRequestMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[21] + mi := &file_vm_vm_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1586,7 +1529,7 @@ func (x *AppRequestMsg) String() string { func (*AppRequestMsg) ProtoMessage() {} func (x *AppRequestMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[21] + mi := &file_vm_vm_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1599,7 +1542,7 @@ func (x *AppRequestMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppRequestMsg.ProtoReflect.Descriptor instead. func (*AppRequestMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{21} + return file_vm_vm_proto_rawDescGZIP(), []int{20} } func (x *AppRequestMsg) GetNodeId() []byte { @@ -1644,7 +1587,7 @@ type AppRequestFailedMsg struct { func (x *AppRequestFailedMsg) Reset() { *x = AppRequestFailedMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[22] + mi := &file_vm_vm_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1657,7 +1600,7 @@ func (x *AppRequestFailedMsg) String() string { func (*AppRequestFailedMsg) ProtoMessage() {} func (x *AppRequestFailedMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[22] + mi := &file_vm_vm_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1670,7 +1613,7 @@ func (x *AppRequestFailedMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppRequestFailedMsg.ProtoReflect.Descriptor instead. func (*AppRequestFailedMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{22} + return file_vm_vm_proto_rawDescGZIP(), []int{21} } func (x *AppRequestFailedMsg) GetNodeId() []byte { @@ -1703,7 +1646,7 @@ type AppResponseMsg struct { func (x *AppResponseMsg) Reset() { *x = AppResponseMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[23] + mi := &file_vm_vm_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1716,7 +1659,7 @@ func (x *AppResponseMsg) String() string { func (*AppResponseMsg) ProtoMessage() {} func (x *AppResponseMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[23] + mi := &file_vm_vm_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1729,7 +1672,7 @@ func (x *AppResponseMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppResponseMsg.ProtoReflect.Descriptor instead. func (*AppResponseMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{23} + return file_vm_vm_proto_rawDescGZIP(), []int{22} } func (x *AppResponseMsg) GetNodeId() []byte { @@ -1767,7 +1710,7 @@ type AppGossipMsg struct { func (x *AppGossipMsg) Reset() { *x = AppGossipMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[24] + mi := &file_vm_vm_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1780,7 +1723,7 @@ func (x *AppGossipMsg) String() string { func (*AppGossipMsg) ProtoMessage() {} func (x *AppGossipMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[24] + mi := &file_vm_vm_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1793,7 +1736,7 @@ func (x *AppGossipMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppGossipMsg.ProtoReflect.Descriptor instead. func (*AppGossipMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{24} + return file_vm_vm_proto_rawDescGZIP(), []int{23} } func (x *AppGossipMsg) GetNodeId() []byte { @@ -1828,7 +1771,7 @@ type CrossChainAppRequestMsg struct { func (x *CrossChainAppRequestMsg) Reset() { *x = CrossChainAppRequestMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[25] + mi := &file_vm_vm_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1841,7 +1784,7 @@ func (x *CrossChainAppRequestMsg) String() string { func (*CrossChainAppRequestMsg) ProtoMessage() {} func (x *CrossChainAppRequestMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[25] + mi := &file_vm_vm_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1854,7 +1797,7 @@ func (x *CrossChainAppRequestMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use CrossChainAppRequestMsg.ProtoReflect.Descriptor instead. func (*CrossChainAppRequestMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{25} + return file_vm_vm_proto_rawDescGZIP(), []int{24} } func (x *CrossChainAppRequestMsg) GetChainId() []byte { @@ -1899,7 +1842,7 @@ type CrossChainAppRequestFailedMsg struct { func (x *CrossChainAppRequestFailedMsg) Reset() { *x = CrossChainAppRequestFailedMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[26] + mi := &file_vm_vm_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1912,7 +1855,7 @@ func (x *CrossChainAppRequestFailedMsg) String() string { func (*CrossChainAppRequestFailedMsg) ProtoMessage() {} func (x *CrossChainAppRequestFailedMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[26] + mi := &file_vm_vm_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1925,7 +1868,7 @@ func (x *CrossChainAppRequestFailedMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use CrossChainAppRequestFailedMsg.ProtoReflect.Descriptor instead. func (*CrossChainAppRequestFailedMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{26} + return file_vm_vm_proto_rawDescGZIP(), []int{25} } func (x *CrossChainAppRequestFailedMsg) GetChainId() []byte { @@ -1958,7 +1901,7 @@ type CrossChainAppResponseMsg struct { func (x *CrossChainAppResponseMsg) Reset() { *x = CrossChainAppResponseMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[27] + mi := &file_vm_vm_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1971,7 +1914,7 @@ func (x *CrossChainAppResponseMsg) String() string { func (*CrossChainAppResponseMsg) ProtoMessage() {} func (x *CrossChainAppResponseMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[27] + mi := &file_vm_vm_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1984,7 +1927,7 @@ func (x *CrossChainAppResponseMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use CrossChainAppResponseMsg.ProtoReflect.Descriptor instead. func (*CrossChainAppResponseMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{27} + return file_vm_vm_proto_rawDescGZIP(), []int{26} } func (x *CrossChainAppResponseMsg) GetChainId() []byte { @@ -2020,7 +1963,7 @@ type ConnectedRequest struct { func (x *ConnectedRequest) Reset() { *x = ConnectedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[28] + mi := &file_vm_vm_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2033,7 +1976,7 @@ func (x *ConnectedRequest) String() string { func (*ConnectedRequest) ProtoMessage() {} func (x *ConnectedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[28] + mi := &file_vm_vm_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2046,7 +1989,7 @@ func (x *ConnectedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ConnectedRequest.ProtoReflect.Descriptor instead. func (*ConnectedRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{28} + return file_vm_vm_proto_rawDescGZIP(), []int{27} } func (x *ConnectedRequest) GetNodeId() []byte { @@ -2074,7 +2017,7 @@ type DisconnectedRequest struct { func (x *DisconnectedRequest) Reset() { *x = DisconnectedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[29] + mi := &file_vm_vm_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2087,7 +2030,7 @@ func (x *DisconnectedRequest) String() string { func (*DisconnectedRequest) ProtoMessage() {} func (x *DisconnectedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[29] + mi := &file_vm_vm_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2100,7 +2043,7 @@ func (x *DisconnectedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DisconnectedRequest.ProtoReflect.Descriptor instead. func (*DisconnectedRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{29} + return file_vm_vm_proto_rawDescGZIP(), []int{28} } func (x *DisconnectedRequest) GetNodeId() []byte { @@ -2124,7 +2067,7 @@ type GetAncestorsRequest struct { func (x *GetAncestorsRequest) Reset() { *x = GetAncestorsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[30] + mi := &file_vm_vm_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2137,7 +2080,7 @@ func (x *GetAncestorsRequest) String() string { func (*GetAncestorsRequest) ProtoMessage() {} func (x *GetAncestorsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[30] + mi := &file_vm_vm_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2150,7 +2093,7 @@ func (x *GetAncestorsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAncestorsRequest.ProtoReflect.Descriptor instead. func (*GetAncestorsRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{30} + return file_vm_vm_proto_rawDescGZIP(), []int{29} } func (x *GetAncestorsRequest) GetBlkId() []byte { @@ -2192,7 +2135,7 @@ type GetAncestorsResponse struct { func (x *GetAncestorsResponse) Reset() { *x = GetAncestorsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[31] + mi := &file_vm_vm_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2205,7 +2148,7 @@ func (x *GetAncestorsResponse) String() string { func (*GetAncestorsResponse) ProtoMessage() {} func (x *GetAncestorsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[31] + mi := &file_vm_vm_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2218,7 +2161,7 @@ func (x *GetAncestorsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAncestorsResponse.ProtoReflect.Descriptor instead. func (*GetAncestorsResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{31} + return file_vm_vm_proto_rawDescGZIP(), []int{30} } func (x *GetAncestorsResponse) GetBlksBytes() [][]byte { @@ -2239,7 +2182,7 @@ type BatchedParseBlockRequest struct { func (x *BatchedParseBlockRequest) Reset() { *x = BatchedParseBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[32] + mi := &file_vm_vm_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2252,7 +2195,7 @@ func (x *BatchedParseBlockRequest) String() string { func (*BatchedParseBlockRequest) ProtoMessage() {} func (x *BatchedParseBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[32] + mi := &file_vm_vm_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2265,7 +2208,7 @@ func (x *BatchedParseBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchedParseBlockRequest.ProtoReflect.Descriptor instead. func (*BatchedParseBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{32} + return file_vm_vm_proto_rawDescGZIP(), []int{31} } func (x *BatchedParseBlockRequest) GetRequest() [][]byte { @@ -2286,7 +2229,7 @@ type BatchedParseBlockResponse struct { func (x *BatchedParseBlockResponse) Reset() { *x = BatchedParseBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[33] + mi := &file_vm_vm_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2299,7 +2242,7 @@ func (x *BatchedParseBlockResponse) String() string { func (*BatchedParseBlockResponse) ProtoMessage() {} func (x *BatchedParseBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[33] + mi := &file_vm_vm_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2312,7 +2255,7 @@ func (x *BatchedParseBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchedParseBlockResponse.ProtoReflect.Descriptor instead. func (*BatchedParseBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{33} + return file_vm_vm_proto_rawDescGZIP(), []int{32} } func (x *BatchedParseBlockResponse) GetResponse() []*ParseBlockResponse { @@ -2333,7 +2276,7 @@ type VerifyHeightIndexResponse struct { func (x *VerifyHeightIndexResponse) Reset() { *x = VerifyHeightIndexResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[34] + mi := &file_vm_vm_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2346,7 +2289,7 @@ func (x *VerifyHeightIndexResponse) String() string { func (*VerifyHeightIndexResponse) ProtoMessage() {} func (x *VerifyHeightIndexResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[34] + mi := &file_vm_vm_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2359,7 +2302,7 @@ func (x *VerifyHeightIndexResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VerifyHeightIndexResponse.ProtoReflect.Descriptor instead. func (*VerifyHeightIndexResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{34} + return file_vm_vm_proto_rawDescGZIP(), []int{33} } func (x *VerifyHeightIndexResponse) GetErr() Error { @@ -2380,7 +2323,7 @@ type GetBlockIDAtHeightRequest struct { func (x *GetBlockIDAtHeightRequest) Reset() { *x = GetBlockIDAtHeightRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[35] + mi := &file_vm_vm_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2393,7 +2336,7 @@ func (x *GetBlockIDAtHeightRequest) String() string { func (*GetBlockIDAtHeightRequest) ProtoMessage() {} func (x *GetBlockIDAtHeightRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[35] + mi := &file_vm_vm_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2406,7 +2349,7 @@ func (x *GetBlockIDAtHeightRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockIDAtHeightRequest.ProtoReflect.Descriptor instead. func (*GetBlockIDAtHeightRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{35} + return file_vm_vm_proto_rawDescGZIP(), []int{34} } func (x *GetBlockIDAtHeightRequest) GetHeight() uint64 { @@ -2428,7 +2371,7 @@ type GetBlockIDAtHeightResponse struct { func (x *GetBlockIDAtHeightResponse) Reset() { *x = GetBlockIDAtHeightResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[36] + mi := &file_vm_vm_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2441,7 +2384,7 @@ func (x *GetBlockIDAtHeightResponse) String() string { func (*GetBlockIDAtHeightResponse) ProtoMessage() {} func (x *GetBlockIDAtHeightResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[36] + mi := &file_vm_vm_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2454,7 +2397,7 @@ func (x *GetBlockIDAtHeightResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockIDAtHeightResponse.ProtoReflect.Descriptor instead. func (*GetBlockIDAtHeightResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{36} + return file_vm_vm_proto_rawDescGZIP(), []int{35} } func (x *GetBlockIDAtHeightResponse) GetBlkId() []byte { @@ -2482,7 +2425,7 @@ type GatherResponse struct { func (x *GatherResponse) Reset() { *x = GatherResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[37] + mi := &file_vm_vm_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2495,7 +2438,7 @@ func (x *GatherResponse) String() string { func (*GatherResponse) ProtoMessage() {} func (x *GatherResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[37] + mi := &file_vm_vm_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2508,7 +2451,7 @@ func (x *GatherResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GatherResponse.ProtoReflect.Descriptor instead. func (*GatherResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{37} + return file_vm_vm_proto_rawDescGZIP(), []int{36} } func (x *GatherResponse) GetMetricFamilies() []*_go.MetricFamily { @@ -2530,7 +2473,7 @@ type StateSyncEnabledResponse struct { func (x *StateSyncEnabledResponse) Reset() { *x = StateSyncEnabledResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[38] + mi := &file_vm_vm_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2543,7 +2486,7 @@ func (x *StateSyncEnabledResponse) String() string { func (*StateSyncEnabledResponse) ProtoMessage() {} func (x *StateSyncEnabledResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[38] + mi := &file_vm_vm_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2556,7 +2499,7 @@ func (x *StateSyncEnabledResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSyncEnabledResponse.ProtoReflect.Descriptor instead. func (*StateSyncEnabledResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{38} + return file_vm_vm_proto_rawDescGZIP(), []int{37} } func (x *StateSyncEnabledResponse) GetEnabled() bool { @@ -2587,7 +2530,7 @@ type GetOngoingSyncStateSummaryResponse struct { func (x *GetOngoingSyncStateSummaryResponse) Reset() { *x = GetOngoingSyncStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[39] + mi := &file_vm_vm_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2600,7 +2543,7 @@ func (x *GetOngoingSyncStateSummaryResponse) String() string { func (*GetOngoingSyncStateSummaryResponse) ProtoMessage() {} func (x *GetOngoingSyncStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[39] + mi := &file_vm_vm_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2613,7 +2556,7 @@ func (x *GetOngoingSyncStateSummaryResponse) ProtoReflect() protoreflect.Message // Deprecated: Use GetOngoingSyncStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetOngoingSyncStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{39} + return file_vm_vm_proto_rawDescGZIP(), []int{38} } func (x *GetOngoingSyncStateSummaryResponse) GetId() []byte { @@ -2658,7 +2601,7 @@ type GetLastStateSummaryResponse struct { func (x *GetLastStateSummaryResponse) Reset() { *x = GetLastStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[40] + mi := &file_vm_vm_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2671,7 +2614,7 @@ func (x *GetLastStateSummaryResponse) String() string { func (*GetLastStateSummaryResponse) ProtoMessage() {} func (x *GetLastStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[40] + mi := &file_vm_vm_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2684,7 +2627,7 @@ func (x *GetLastStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetLastStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetLastStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{40} + return file_vm_vm_proto_rawDescGZIP(), []int{39} } func (x *GetLastStateSummaryResponse) GetId() []byte { @@ -2726,7 +2669,7 @@ type ParseStateSummaryRequest struct { func (x *ParseStateSummaryRequest) Reset() { *x = ParseStateSummaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[41] + mi := &file_vm_vm_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2739,7 +2682,7 @@ func (x *ParseStateSummaryRequest) String() string { func (*ParseStateSummaryRequest) ProtoMessage() {} func (x *ParseStateSummaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[41] + mi := &file_vm_vm_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2752,7 +2695,7 @@ func (x *ParseStateSummaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseStateSummaryRequest.ProtoReflect.Descriptor instead. func (*ParseStateSummaryRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{41} + return file_vm_vm_proto_rawDescGZIP(), []int{40} } func (x *ParseStateSummaryRequest) GetBytes() []byte { @@ -2775,7 +2718,7 @@ type ParseStateSummaryResponse struct { func (x *ParseStateSummaryResponse) Reset() { *x = ParseStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[42] + mi := &file_vm_vm_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2788,7 +2731,7 @@ func (x *ParseStateSummaryResponse) String() string { func (*ParseStateSummaryResponse) ProtoMessage() {} func (x *ParseStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[42] + mi := &file_vm_vm_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2801,7 +2744,7 @@ func (x *ParseStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseStateSummaryResponse.ProtoReflect.Descriptor instead. func (*ParseStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{42} + return file_vm_vm_proto_rawDescGZIP(), []int{41} } func (x *ParseStateSummaryResponse) GetId() []byte { @@ -2836,7 +2779,7 @@ type GetStateSummaryRequest struct { func (x *GetStateSummaryRequest) Reset() { *x = GetStateSummaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[43] + mi := &file_vm_vm_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2849,7 +2792,7 @@ func (x *GetStateSummaryRequest) String() string { func (*GetStateSummaryRequest) ProtoMessage() {} func (x *GetStateSummaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[43] + mi := &file_vm_vm_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2862,7 +2805,7 @@ func (x *GetStateSummaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryRequest.ProtoReflect.Descriptor instead. func (*GetStateSummaryRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{43} + return file_vm_vm_proto_rawDescGZIP(), []int{42} } func (x *GetStateSummaryRequest) GetHeight() uint64 { @@ -2885,7 +2828,7 @@ type GetStateSummaryResponse struct { func (x *GetStateSummaryResponse) Reset() { *x = GetStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[44] + mi := &file_vm_vm_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2898,7 +2841,7 @@ func (x *GetStateSummaryResponse) String() string { func (*GetStateSummaryResponse) ProtoMessage() {} func (x *GetStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[44] + mi := &file_vm_vm_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2911,7 +2854,7 @@ func (x *GetStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{44} + return file_vm_vm_proto_rawDescGZIP(), []int{43} } func (x *GetStateSummaryResponse) GetId() []byte { @@ -2946,7 +2889,7 @@ type StateSummaryAcceptRequest struct { func (x *StateSummaryAcceptRequest) Reset() { *x = StateSummaryAcceptRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[45] + mi := &file_vm_vm_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2959,7 +2902,7 @@ func (x *StateSummaryAcceptRequest) String() string { func (*StateSummaryAcceptRequest) ProtoMessage() {} func (x *StateSummaryAcceptRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[45] + mi := &file_vm_vm_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2972,7 +2915,7 @@ func (x *StateSummaryAcceptRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryAcceptRequest.ProtoReflect.Descriptor instead. func (*StateSummaryAcceptRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{45} + return file_vm_vm_proto_rawDescGZIP(), []int{44} } func (x *StateSummaryAcceptRequest) GetBytes() []byte { @@ -2994,7 +2937,7 @@ type StateSummaryAcceptResponse struct { func (x *StateSummaryAcceptResponse) Reset() { *x = StateSummaryAcceptResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[46] + mi := &file_vm_vm_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3007,7 +2950,7 @@ func (x *StateSummaryAcceptResponse) String() string { func (*StateSummaryAcceptResponse) ProtoMessage() {} func (x *StateSummaryAcceptResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[46] + mi := &file_vm_vm_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3020,7 +2963,7 @@ func (x *StateSummaryAcceptResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryAcceptResponse.ProtoReflect.Descriptor instead. func (*StateSummaryAcceptResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{46} + return file_vm_vm_proto_rawDescGZIP(), []int{45} } func (x *StateSummaryAcceptResponse) GetMode() StateSummaryAcceptResponse_Mode { @@ -3047,7 +2990,7 @@ var file_vm_vm_proto_rawDesc = []byte{ 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xec, 0x03, 0x0a, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, + 0x6f, 0x74, 0x6f, 0x22, 0xdc, 0x03, 0x0a, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, @@ -3072,11 +3015,10 @@ var file_vm_vm_proto_rawDesc = []byte{ 0x79, 0x74, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x0a, 0x64, - 0x62, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x44, 0x42, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x09, 0x64, 0x62, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, + 0x62, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x62, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, + 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x22, 0xdd, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x61, 0x73, @@ -3092,12 +3034,7 @@ var file_vm_vm_proto_rawDesc = []byte{ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x22, 0x4e, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x44, - 0x42, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, - 0x64, 0x72, 0x22, 0x32, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x6d, 0x70, 0x22, 0x32, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x74, 0x53, 0x74, @@ -3545,7 +3482,7 @@ func file_vm_vm_proto_rawDescGZIP() []byte { } var file_vm_vm_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_vm_vm_proto_msgTypes = make([]protoimpl.MessageInfo, 47) +var file_vm_vm_proto_msgTypes = make([]protoimpl.MessageInfo, 46) var file_vm_vm_proto_goTypes = []interface{}{ (State)(0), // 0: vm.State (Status)(0), // 1: vm.Status @@ -3553,155 +3490,153 @@ var file_vm_vm_proto_goTypes = []interface{}{ (StateSummaryAcceptResponse_Mode)(0), // 3: vm.StateSummaryAcceptResponse.Mode (*InitializeRequest)(nil), // 4: vm.InitializeRequest (*InitializeResponse)(nil), // 5: vm.InitializeResponse - (*VersionedDBServer)(nil), // 6: vm.VersionedDBServer - (*SetStateRequest)(nil), // 7: vm.SetStateRequest - (*SetStateResponse)(nil), // 8: vm.SetStateResponse - (*CreateHandlersResponse)(nil), // 9: vm.CreateHandlersResponse - (*CreateStaticHandlersResponse)(nil), // 10: vm.CreateStaticHandlersResponse - (*Handler)(nil), // 11: vm.Handler - (*BuildBlockRequest)(nil), // 12: vm.BuildBlockRequest - (*BuildBlockResponse)(nil), // 13: vm.BuildBlockResponse - (*ParseBlockRequest)(nil), // 14: vm.ParseBlockRequest - (*ParseBlockResponse)(nil), // 15: vm.ParseBlockResponse - (*GetBlockRequest)(nil), // 16: vm.GetBlockRequest - (*GetBlockResponse)(nil), // 17: vm.GetBlockResponse - (*SetPreferenceRequest)(nil), // 18: vm.SetPreferenceRequest - (*BlockVerifyRequest)(nil), // 19: vm.BlockVerifyRequest - (*BlockVerifyResponse)(nil), // 20: vm.BlockVerifyResponse - (*BlockAcceptRequest)(nil), // 21: vm.BlockAcceptRequest - (*BlockRejectRequest)(nil), // 22: vm.BlockRejectRequest - (*HealthResponse)(nil), // 23: vm.HealthResponse - (*VersionResponse)(nil), // 24: vm.VersionResponse - (*AppRequestMsg)(nil), // 25: vm.AppRequestMsg - (*AppRequestFailedMsg)(nil), // 26: vm.AppRequestFailedMsg - (*AppResponseMsg)(nil), // 27: vm.AppResponseMsg - (*AppGossipMsg)(nil), // 28: vm.AppGossipMsg - (*CrossChainAppRequestMsg)(nil), // 29: vm.CrossChainAppRequestMsg - (*CrossChainAppRequestFailedMsg)(nil), // 30: vm.CrossChainAppRequestFailedMsg - (*CrossChainAppResponseMsg)(nil), // 31: vm.CrossChainAppResponseMsg - (*ConnectedRequest)(nil), // 32: vm.ConnectedRequest - (*DisconnectedRequest)(nil), // 33: vm.DisconnectedRequest - (*GetAncestorsRequest)(nil), // 34: vm.GetAncestorsRequest - (*GetAncestorsResponse)(nil), // 35: vm.GetAncestorsResponse - (*BatchedParseBlockRequest)(nil), // 36: vm.BatchedParseBlockRequest - (*BatchedParseBlockResponse)(nil), // 37: vm.BatchedParseBlockResponse - (*VerifyHeightIndexResponse)(nil), // 38: vm.VerifyHeightIndexResponse - (*GetBlockIDAtHeightRequest)(nil), // 39: vm.GetBlockIDAtHeightRequest - (*GetBlockIDAtHeightResponse)(nil), // 40: vm.GetBlockIDAtHeightResponse - (*GatherResponse)(nil), // 41: vm.GatherResponse - (*StateSyncEnabledResponse)(nil), // 42: vm.StateSyncEnabledResponse - (*GetOngoingSyncStateSummaryResponse)(nil), // 43: vm.GetOngoingSyncStateSummaryResponse - (*GetLastStateSummaryResponse)(nil), // 44: vm.GetLastStateSummaryResponse - (*ParseStateSummaryRequest)(nil), // 45: vm.ParseStateSummaryRequest - (*ParseStateSummaryResponse)(nil), // 46: vm.ParseStateSummaryResponse - (*GetStateSummaryRequest)(nil), // 47: vm.GetStateSummaryRequest - (*GetStateSummaryResponse)(nil), // 48: vm.GetStateSummaryResponse - (*StateSummaryAcceptRequest)(nil), // 49: vm.StateSummaryAcceptRequest - (*StateSummaryAcceptResponse)(nil), // 50: vm.StateSummaryAcceptResponse - (*timestamppb.Timestamp)(nil), // 51: google.protobuf.Timestamp - (*_go.MetricFamily)(nil), // 52: io.prometheus.client.MetricFamily - (*emptypb.Empty)(nil), // 53: google.protobuf.Empty + (*SetStateRequest)(nil), // 6: vm.SetStateRequest + (*SetStateResponse)(nil), // 7: vm.SetStateResponse + (*CreateHandlersResponse)(nil), // 8: vm.CreateHandlersResponse + (*CreateStaticHandlersResponse)(nil), // 9: vm.CreateStaticHandlersResponse + (*Handler)(nil), // 10: vm.Handler + (*BuildBlockRequest)(nil), // 11: vm.BuildBlockRequest + (*BuildBlockResponse)(nil), // 12: vm.BuildBlockResponse + (*ParseBlockRequest)(nil), // 13: vm.ParseBlockRequest + (*ParseBlockResponse)(nil), // 14: vm.ParseBlockResponse + (*GetBlockRequest)(nil), // 15: vm.GetBlockRequest + (*GetBlockResponse)(nil), // 16: vm.GetBlockResponse + (*SetPreferenceRequest)(nil), // 17: vm.SetPreferenceRequest + (*BlockVerifyRequest)(nil), // 18: vm.BlockVerifyRequest + (*BlockVerifyResponse)(nil), // 19: vm.BlockVerifyResponse + (*BlockAcceptRequest)(nil), // 20: vm.BlockAcceptRequest + (*BlockRejectRequest)(nil), // 21: vm.BlockRejectRequest + (*HealthResponse)(nil), // 22: vm.HealthResponse + (*VersionResponse)(nil), // 23: vm.VersionResponse + (*AppRequestMsg)(nil), // 24: vm.AppRequestMsg + (*AppRequestFailedMsg)(nil), // 25: vm.AppRequestFailedMsg + (*AppResponseMsg)(nil), // 26: vm.AppResponseMsg + (*AppGossipMsg)(nil), // 27: vm.AppGossipMsg + (*CrossChainAppRequestMsg)(nil), // 28: vm.CrossChainAppRequestMsg + (*CrossChainAppRequestFailedMsg)(nil), // 29: vm.CrossChainAppRequestFailedMsg + (*CrossChainAppResponseMsg)(nil), // 30: vm.CrossChainAppResponseMsg + (*ConnectedRequest)(nil), // 31: vm.ConnectedRequest + (*DisconnectedRequest)(nil), // 32: vm.DisconnectedRequest + (*GetAncestorsRequest)(nil), // 33: vm.GetAncestorsRequest + (*GetAncestorsResponse)(nil), // 34: vm.GetAncestorsResponse + (*BatchedParseBlockRequest)(nil), // 35: vm.BatchedParseBlockRequest + (*BatchedParseBlockResponse)(nil), // 36: vm.BatchedParseBlockResponse + (*VerifyHeightIndexResponse)(nil), // 37: vm.VerifyHeightIndexResponse + (*GetBlockIDAtHeightRequest)(nil), // 38: vm.GetBlockIDAtHeightRequest + (*GetBlockIDAtHeightResponse)(nil), // 39: vm.GetBlockIDAtHeightResponse + (*GatherResponse)(nil), // 40: vm.GatherResponse + (*StateSyncEnabledResponse)(nil), // 41: vm.StateSyncEnabledResponse + (*GetOngoingSyncStateSummaryResponse)(nil), // 42: vm.GetOngoingSyncStateSummaryResponse + (*GetLastStateSummaryResponse)(nil), // 43: vm.GetLastStateSummaryResponse + (*ParseStateSummaryRequest)(nil), // 44: vm.ParseStateSummaryRequest + (*ParseStateSummaryResponse)(nil), // 45: vm.ParseStateSummaryResponse + (*GetStateSummaryRequest)(nil), // 46: vm.GetStateSummaryRequest + (*GetStateSummaryResponse)(nil), // 47: vm.GetStateSummaryResponse + (*StateSummaryAcceptRequest)(nil), // 48: vm.StateSummaryAcceptRequest + (*StateSummaryAcceptResponse)(nil), // 49: vm.StateSummaryAcceptResponse + (*timestamppb.Timestamp)(nil), // 50: google.protobuf.Timestamp + (*_go.MetricFamily)(nil), // 51: io.prometheus.client.MetricFamily + (*emptypb.Empty)(nil), // 52: google.protobuf.Empty } var file_vm_vm_proto_depIdxs = []int32{ - 6, // 0: vm.InitializeRequest.db_servers:type_name -> vm.VersionedDBServer - 51, // 1: vm.InitializeResponse.timestamp:type_name -> google.protobuf.Timestamp - 0, // 2: vm.SetStateRequest.state:type_name -> vm.State - 51, // 3: vm.SetStateResponse.timestamp:type_name -> google.protobuf.Timestamp - 11, // 4: vm.CreateHandlersResponse.handlers:type_name -> vm.Handler - 11, // 5: vm.CreateStaticHandlersResponse.handlers:type_name -> vm.Handler - 51, // 6: vm.BuildBlockResponse.timestamp:type_name -> google.protobuf.Timestamp - 1, // 7: vm.ParseBlockResponse.status:type_name -> vm.Status - 51, // 8: vm.ParseBlockResponse.timestamp:type_name -> google.protobuf.Timestamp - 1, // 9: vm.GetBlockResponse.status:type_name -> vm.Status - 51, // 10: vm.GetBlockResponse.timestamp:type_name -> google.protobuf.Timestamp - 2, // 11: vm.GetBlockResponse.err:type_name -> vm.Error - 51, // 12: vm.BlockVerifyResponse.timestamp:type_name -> google.protobuf.Timestamp - 51, // 13: vm.AppRequestMsg.deadline:type_name -> google.protobuf.Timestamp - 51, // 14: vm.CrossChainAppRequestMsg.deadline:type_name -> google.protobuf.Timestamp - 15, // 15: vm.BatchedParseBlockResponse.response:type_name -> vm.ParseBlockResponse - 2, // 16: vm.VerifyHeightIndexResponse.err:type_name -> vm.Error - 2, // 17: vm.GetBlockIDAtHeightResponse.err:type_name -> vm.Error - 52, // 18: vm.GatherResponse.metric_families:type_name -> io.prometheus.client.MetricFamily - 2, // 19: vm.StateSyncEnabledResponse.err:type_name -> vm.Error - 2, // 20: vm.GetOngoingSyncStateSummaryResponse.err:type_name -> vm.Error - 2, // 21: vm.GetLastStateSummaryResponse.err:type_name -> vm.Error - 2, // 22: vm.ParseStateSummaryResponse.err:type_name -> vm.Error - 2, // 23: vm.GetStateSummaryResponse.err:type_name -> vm.Error - 3, // 24: vm.StateSummaryAcceptResponse.mode:type_name -> vm.StateSummaryAcceptResponse.Mode - 2, // 25: vm.StateSummaryAcceptResponse.err:type_name -> vm.Error - 4, // 26: vm.VM.Initialize:input_type -> vm.InitializeRequest - 7, // 27: vm.VM.SetState:input_type -> vm.SetStateRequest - 53, // 28: vm.VM.Shutdown:input_type -> google.protobuf.Empty - 53, // 29: vm.VM.CreateHandlers:input_type -> google.protobuf.Empty - 53, // 30: vm.VM.CreateStaticHandlers:input_type -> google.protobuf.Empty - 32, // 31: vm.VM.Connected:input_type -> vm.ConnectedRequest - 33, // 32: vm.VM.Disconnected:input_type -> vm.DisconnectedRequest - 12, // 33: vm.VM.BuildBlock:input_type -> vm.BuildBlockRequest - 14, // 34: vm.VM.ParseBlock:input_type -> vm.ParseBlockRequest - 16, // 35: vm.VM.GetBlock:input_type -> vm.GetBlockRequest - 18, // 36: vm.VM.SetPreference:input_type -> vm.SetPreferenceRequest - 53, // 37: vm.VM.Health:input_type -> google.protobuf.Empty - 53, // 38: vm.VM.Version:input_type -> google.protobuf.Empty - 25, // 39: vm.VM.AppRequest:input_type -> vm.AppRequestMsg - 26, // 40: vm.VM.AppRequestFailed:input_type -> vm.AppRequestFailedMsg - 27, // 41: vm.VM.AppResponse:input_type -> vm.AppResponseMsg - 28, // 42: vm.VM.AppGossip:input_type -> vm.AppGossipMsg - 53, // 43: vm.VM.Gather:input_type -> google.protobuf.Empty - 29, // 44: vm.VM.CrossChainAppRequest:input_type -> vm.CrossChainAppRequestMsg - 30, // 45: vm.VM.CrossChainAppRequestFailed:input_type -> vm.CrossChainAppRequestFailedMsg - 31, // 46: vm.VM.CrossChainAppResponse:input_type -> vm.CrossChainAppResponseMsg - 34, // 47: vm.VM.GetAncestors:input_type -> vm.GetAncestorsRequest - 36, // 48: vm.VM.BatchedParseBlock:input_type -> vm.BatchedParseBlockRequest - 53, // 49: vm.VM.VerifyHeightIndex:input_type -> google.protobuf.Empty - 39, // 50: vm.VM.GetBlockIDAtHeight:input_type -> vm.GetBlockIDAtHeightRequest - 53, // 51: vm.VM.StateSyncEnabled:input_type -> google.protobuf.Empty - 53, // 52: vm.VM.GetOngoingSyncStateSummary:input_type -> google.protobuf.Empty - 53, // 53: vm.VM.GetLastStateSummary:input_type -> google.protobuf.Empty - 45, // 54: vm.VM.ParseStateSummary:input_type -> vm.ParseStateSummaryRequest - 47, // 55: vm.VM.GetStateSummary:input_type -> vm.GetStateSummaryRequest - 19, // 56: vm.VM.BlockVerify:input_type -> vm.BlockVerifyRequest - 21, // 57: vm.VM.BlockAccept:input_type -> vm.BlockAcceptRequest - 22, // 58: vm.VM.BlockReject:input_type -> vm.BlockRejectRequest - 49, // 59: vm.VM.StateSummaryAccept:input_type -> vm.StateSummaryAcceptRequest - 5, // 60: vm.VM.Initialize:output_type -> vm.InitializeResponse - 8, // 61: vm.VM.SetState:output_type -> vm.SetStateResponse - 53, // 62: vm.VM.Shutdown:output_type -> google.protobuf.Empty - 9, // 63: vm.VM.CreateHandlers:output_type -> vm.CreateHandlersResponse - 10, // 64: vm.VM.CreateStaticHandlers:output_type -> vm.CreateStaticHandlersResponse - 53, // 65: vm.VM.Connected:output_type -> google.protobuf.Empty - 53, // 66: vm.VM.Disconnected:output_type -> google.protobuf.Empty - 13, // 67: vm.VM.BuildBlock:output_type -> vm.BuildBlockResponse - 15, // 68: vm.VM.ParseBlock:output_type -> vm.ParseBlockResponse - 17, // 69: vm.VM.GetBlock:output_type -> vm.GetBlockResponse - 53, // 70: vm.VM.SetPreference:output_type -> google.protobuf.Empty - 23, // 71: vm.VM.Health:output_type -> vm.HealthResponse - 24, // 72: vm.VM.Version:output_type -> vm.VersionResponse - 53, // 73: vm.VM.AppRequest:output_type -> google.protobuf.Empty - 53, // 74: vm.VM.AppRequestFailed:output_type -> google.protobuf.Empty - 53, // 75: vm.VM.AppResponse:output_type -> google.protobuf.Empty - 53, // 76: vm.VM.AppGossip:output_type -> google.protobuf.Empty - 41, // 77: vm.VM.Gather:output_type -> vm.GatherResponse - 53, // 78: vm.VM.CrossChainAppRequest:output_type -> google.protobuf.Empty - 53, // 79: vm.VM.CrossChainAppRequestFailed:output_type -> google.protobuf.Empty - 53, // 80: vm.VM.CrossChainAppResponse:output_type -> google.protobuf.Empty - 35, // 81: vm.VM.GetAncestors:output_type -> vm.GetAncestorsResponse - 37, // 82: vm.VM.BatchedParseBlock:output_type -> vm.BatchedParseBlockResponse - 38, // 83: vm.VM.VerifyHeightIndex:output_type -> vm.VerifyHeightIndexResponse - 40, // 84: vm.VM.GetBlockIDAtHeight:output_type -> vm.GetBlockIDAtHeightResponse - 42, // 85: vm.VM.StateSyncEnabled:output_type -> vm.StateSyncEnabledResponse - 43, // 86: vm.VM.GetOngoingSyncStateSummary:output_type -> vm.GetOngoingSyncStateSummaryResponse - 44, // 87: vm.VM.GetLastStateSummary:output_type -> vm.GetLastStateSummaryResponse - 46, // 88: vm.VM.ParseStateSummary:output_type -> vm.ParseStateSummaryResponse - 48, // 89: vm.VM.GetStateSummary:output_type -> vm.GetStateSummaryResponse - 20, // 90: vm.VM.BlockVerify:output_type -> vm.BlockVerifyResponse - 53, // 91: vm.VM.BlockAccept:output_type -> google.protobuf.Empty - 53, // 92: vm.VM.BlockReject:output_type -> google.protobuf.Empty - 50, // 93: vm.VM.StateSummaryAccept:output_type -> vm.StateSummaryAcceptResponse - 60, // [60:94] is the sub-list for method output_type - 26, // [26:60] is the sub-list for method input_type - 26, // [26:26] is the sub-list for extension type_name - 26, // [26:26] is the sub-list for extension extendee - 0, // [0:26] is the sub-list for field type_name + 50, // 0: vm.InitializeResponse.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: vm.SetStateRequest.state:type_name -> vm.State + 50, // 2: vm.SetStateResponse.timestamp:type_name -> google.protobuf.Timestamp + 10, // 3: vm.CreateHandlersResponse.handlers:type_name -> vm.Handler + 10, // 4: vm.CreateStaticHandlersResponse.handlers:type_name -> vm.Handler + 50, // 5: vm.BuildBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 1, // 6: vm.ParseBlockResponse.status:type_name -> vm.Status + 50, // 7: vm.ParseBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 1, // 8: vm.GetBlockResponse.status:type_name -> vm.Status + 50, // 9: vm.GetBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 2, // 10: vm.GetBlockResponse.err:type_name -> vm.Error + 50, // 11: vm.BlockVerifyResponse.timestamp:type_name -> google.protobuf.Timestamp + 50, // 12: vm.AppRequestMsg.deadline:type_name -> google.protobuf.Timestamp + 50, // 13: vm.CrossChainAppRequestMsg.deadline:type_name -> google.protobuf.Timestamp + 14, // 14: vm.BatchedParseBlockResponse.response:type_name -> vm.ParseBlockResponse + 2, // 15: vm.VerifyHeightIndexResponse.err:type_name -> vm.Error + 2, // 16: vm.GetBlockIDAtHeightResponse.err:type_name -> vm.Error + 51, // 17: vm.GatherResponse.metric_families:type_name -> io.prometheus.client.MetricFamily + 2, // 18: vm.StateSyncEnabledResponse.err:type_name -> vm.Error + 2, // 19: vm.GetOngoingSyncStateSummaryResponse.err:type_name -> vm.Error + 2, // 20: vm.GetLastStateSummaryResponse.err:type_name -> vm.Error + 2, // 21: vm.ParseStateSummaryResponse.err:type_name -> vm.Error + 2, // 22: vm.GetStateSummaryResponse.err:type_name -> vm.Error + 3, // 23: vm.StateSummaryAcceptResponse.mode:type_name -> vm.StateSummaryAcceptResponse.Mode + 2, // 24: vm.StateSummaryAcceptResponse.err:type_name -> vm.Error + 4, // 25: vm.VM.Initialize:input_type -> vm.InitializeRequest + 6, // 26: vm.VM.SetState:input_type -> vm.SetStateRequest + 52, // 27: vm.VM.Shutdown:input_type -> google.protobuf.Empty + 52, // 28: vm.VM.CreateHandlers:input_type -> google.protobuf.Empty + 52, // 29: vm.VM.CreateStaticHandlers:input_type -> google.protobuf.Empty + 31, // 30: vm.VM.Connected:input_type -> vm.ConnectedRequest + 32, // 31: vm.VM.Disconnected:input_type -> vm.DisconnectedRequest + 11, // 32: vm.VM.BuildBlock:input_type -> vm.BuildBlockRequest + 13, // 33: vm.VM.ParseBlock:input_type -> vm.ParseBlockRequest + 15, // 34: vm.VM.GetBlock:input_type -> vm.GetBlockRequest + 17, // 35: vm.VM.SetPreference:input_type -> vm.SetPreferenceRequest + 52, // 36: vm.VM.Health:input_type -> google.protobuf.Empty + 52, // 37: vm.VM.Version:input_type -> google.protobuf.Empty + 24, // 38: vm.VM.AppRequest:input_type -> vm.AppRequestMsg + 25, // 39: vm.VM.AppRequestFailed:input_type -> vm.AppRequestFailedMsg + 26, // 40: vm.VM.AppResponse:input_type -> vm.AppResponseMsg + 27, // 41: vm.VM.AppGossip:input_type -> vm.AppGossipMsg + 52, // 42: vm.VM.Gather:input_type -> google.protobuf.Empty + 28, // 43: vm.VM.CrossChainAppRequest:input_type -> vm.CrossChainAppRequestMsg + 29, // 44: vm.VM.CrossChainAppRequestFailed:input_type -> vm.CrossChainAppRequestFailedMsg + 30, // 45: vm.VM.CrossChainAppResponse:input_type -> vm.CrossChainAppResponseMsg + 33, // 46: vm.VM.GetAncestors:input_type -> vm.GetAncestorsRequest + 35, // 47: vm.VM.BatchedParseBlock:input_type -> vm.BatchedParseBlockRequest + 52, // 48: vm.VM.VerifyHeightIndex:input_type -> google.protobuf.Empty + 38, // 49: vm.VM.GetBlockIDAtHeight:input_type -> vm.GetBlockIDAtHeightRequest + 52, // 50: vm.VM.StateSyncEnabled:input_type -> google.protobuf.Empty + 52, // 51: vm.VM.GetOngoingSyncStateSummary:input_type -> google.protobuf.Empty + 52, // 52: vm.VM.GetLastStateSummary:input_type -> google.protobuf.Empty + 44, // 53: vm.VM.ParseStateSummary:input_type -> vm.ParseStateSummaryRequest + 46, // 54: vm.VM.GetStateSummary:input_type -> vm.GetStateSummaryRequest + 18, // 55: vm.VM.BlockVerify:input_type -> vm.BlockVerifyRequest + 20, // 56: vm.VM.BlockAccept:input_type -> vm.BlockAcceptRequest + 21, // 57: vm.VM.BlockReject:input_type -> vm.BlockRejectRequest + 48, // 58: vm.VM.StateSummaryAccept:input_type -> vm.StateSummaryAcceptRequest + 5, // 59: vm.VM.Initialize:output_type -> vm.InitializeResponse + 7, // 60: vm.VM.SetState:output_type -> vm.SetStateResponse + 52, // 61: vm.VM.Shutdown:output_type -> google.protobuf.Empty + 8, // 62: vm.VM.CreateHandlers:output_type -> vm.CreateHandlersResponse + 9, // 63: vm.VM.CreateStaticHandlers:output_type -> vm.CreateStaticHandlersResponse + 52, // 64: vm.VM.Connected:output_type -> google.protobuf.Empty + 52, // 65: vm.VM.Disconnected:output_type -> google.protobuf.Empty + 12, // 66: vm.VM.BuildBlock:output_type -> vm.BuildBlockResponse + 14, // 67: vm.VM.ParseBlock:output_type -> vm.ParseBlockResponse + 16, // 68: vm.VM.GetBlock:output_type -> vm.GetBlockResponse + 52, // 69: vm.VM.SetPreference:output_type -> google.protobuf.Empty + 22, // 70: vm.VM.Health:output_type -> vm.HealthResponse + 23, // 71: vm.VM.Version:output_type -> vm.VersionResponse + 52, // 72: vm.VM.AppRequest:output_type -> google.protobuf.Empty + 52, // 73: vm.VM.AppRequestFailed:output_type -> google.protobuf.Empty + 52, // 74: vm.VM.AppResponse:output_type -> google.protobuf.Empty + 52, // 75: vm.VM.AppGossip:output_type -> google.protobuf.Empty + 40, // 76: vm.VM.Gather:output_type -> vm.GatherResponse + 52, // 77: vm.VM.CrossChainAppRequest:output_type -> google.protobuf.Empty + 52, // 78: vm.VM.CrossChainAppRequestFailed:output_type -> google.protobuf.Empty + 52, // 79: vm.VM.CrossChainAppResponse:output_type -> google.protobuf.Empty + 34, // 80: vm.VM.GetAncestors:output_type -> vm.GetAncestorsResponse + 36, // 81: vm.VM.BatchedParseBlock:output_type -> vm.BatchedParseBlockResponse + 37, // 82: vm.VM.VerifyHeightIndex:output_type -> vm.VerifyHeightIndexResponse + 39, // 83: vm.VM.GetBlockIDAtHeight:output_type -> vm.GetBlockIDAtHeightResponse + 41, // 84: vm.VM.StateSyncEnabled:output_type -> vm.StateSyncEnabledResponse + 42, // 85: vm.VM.GetOngoingSyncStateSummary:output_type -> vm.GetOngoingSyncStateSummaryResponse + 43, // 86: vm.VM.GetLastStateSummary:output_type -> vm.GetLastStateSummaryResponse + 45, // 87: vm.VM.ParseStateSummary:output_type -> vm.ParseStateSummaryResponse + 47, // 88: vm.VM.GetStateSummary:output_type -> vm.GetStateSummaryResponse + 19, // 89: vm.VM.BlockVerify:output_type -> vm.BlockVerifyResponse + 52, // 90: vm.VM.BlockAccept:output_type -> google.protobuf.Empty + 52, // 91: vm.VM.BlockReject:output_type -> google.protobuf.Empty + 49, // 92: vm.VM.StateSummaryAccept:output_type -> vm.StateSummaryAcceptResponse + 59, // [59:93] is the sub-list for method output_type + 25, // [25:59] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name } func init() { file_vm_vm_proto_init() } @@ -3735,18 +3670,6 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VersionedDBServer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_vm_vm_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetStateRequest); i { case 0: return &v.state @@ -3758,7 +3681,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetStateResponse); i { case 0: return &v.state @@ -3770,7 +3693,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateHandlersResponse); i { case 0: return &v.state @@ -3782,7 +3705,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateStaticHandlersResponse); i { case 0: return &v.state @@ -3794,7 +3717,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Handler); i { case 0: return &v.state @@ -3806,7 +3729,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BuildBlockRequest); i { case 0: return &v.state @@ -3818,7 +3741,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BuildBlockResponse); i { case 0: return &v.state @@ -3830,7 +3753,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseBlockRequest); i { case 0: return &v.state @@ -3842,7 +3765,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseBlockResponse); i { case 0: return &v.state @@ -3854,7 +3777,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockRequest); i { case 0: return &v.state @@ -3866,7 +3789,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockResponse); i { case 0: return &v.state @@ -3878,7 +3801,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetPreferenceRequest); i { case 0: return &v.state @@ -3890,7 +3813,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlockVerifyRequest); i { case 0: return &v.state @@ -3902,7 +3825,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlockVerifyResponse); i { case 0: return &v.state @@ -3914,7 +3837,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlockAcceptRequest); i { case 0: return &v.state @@ -3926,7 +3849,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlockRejectRequest); i { case 0: return &v.state @@ -3938,7 +3861,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HealthResponse); i { case 0: return &v.state @@ -3950,7 +3873,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VersionResponse); i { case 0: return &v.state @@ -3962,7 +3885,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppRequestMsg); i { case 0: return &v.state @@ -3974,7 +3897,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppRequestFailedMsg); i { case 0: return &v.state @@ -3986,7 +3909,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppResponseMsg); i { case 0: return &v.state @@ -3998,7 +3921,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppGossipMsg); i { case 0: return &v.state @@ -4010,7 +3933,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CrossChainAppRequestMsg); i { case 0: return &v.state @@ -4022,7 +3945,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CrossChainAppRequestFailedMsg); i { case 0: return &v.state @@ -4034,7 +3957,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CrossChainAppResponseMsg); i { case 0: return &v.state @@ -4046,7 +3969,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ConnectedRequest); i { case 0: return &v.state @@ -4058,7 +3981,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DisconnectedRequest); i { case 0: return &v.state @@ -4070,7 +3993,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetAncestorsRequest); i { case 0: return &v.state @@ -4082,7 +4005,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetAncestorsResponse); i { case 0: return &v.state @@ -4094,7 +4017,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BatchedParseBlockRequest); i { case 0: return &v.state @@ -4106,7 +4029,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BatchedParseBlockResponse); i { case 0: return &v.state @@ -4118,7 +4041,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VerifyHeightIndexResponse); i { case 0: return &v.state @@ -4130,7 +4053,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockIDAtHeightRequest); i { case 0: return &v.state @@ -4142,7 +4065,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockIDAtHeightResponse); i { case 0: return &v.state @@ -4154,7 +4077,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GatherResponse); i { case 0: return &v.state @@ -4166,7 +4089,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSyncEnabledResponse); i { case 0: return &v.state @@ -4178,7 +4101,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetOngoingSyncStateSummaryResponse); i { case 0: return &v.state @@ -4190,7 +4113,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetLastStateSummaryResponse); i { case 0: return &v.state @@ -4202,7 +4125,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseStateSummaryRequest); i { case 0: return &v.state @@ -4214,7 +4137,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseStateSummaryResponse); i { case 0: return &v.state @@ -4226,7 +4149,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetStateSummaryRequest); i { case 0: return &v.state @@ -4238,7 +4161,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetStateSummaryResponse); i { case 0: return &v.state @@ -4250,7 +4173,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSummaryAcceptRequest); i { case 0: return &v.state @@ -4262,7 +4185,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSummaryAcceptResponse); i { case 0: return &v.state @@ -4275,15 +4198,15 @@ func file_vm_vm_proto_init() { } } } - file_vm_vm_proto_msgTypes[8].OneofWrappers = []interface{}{} - file_vm_vm_proto_msgTypes[15].OneofWrappers = []interface{}{} + file_vm_vm_proto_msgTypes[7].OneofWrappers = []interface{}{} + file_vm_vm_proto_msgTypes[14].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vm_vm_proto_rawDesc, NumEnums: 4, - NumMessages: 47, + NumMessages: 46, NumExtensions: 0, NumServices: 1, }, diff --git a/proto/sync/sync.proto b/proto/sync/sync.proto index 4c4c6f434722..1a799433d7e7 100644 --- a/proto/sync/sync.proto +++ b/proto/sync/sync.proto @@ -21,6 +21,8 @@ message Request { service DB { rpc GetMerkleRoot(google.protobuf.Empty) returns (GetMerkleRootResponse); + rpc Clear(google.protobuf.Empty) returns (google.protobuf.Empty); + rpc GetProof(GetProofRequest) returns (GetProofResponse); rpc GetChangeProof(GetChangeProofRequest) returns (GetChangeProofResponse); diff --git a/proto/vm/vm.proto b/proto/vm/vm.proto index e005dabf66e9..0eca74b46041 100644 --- a/proto/vm/vm.proto +++ b/proto/vm/vm.proto @@ -127,7 +127,7 @@ message InitializeRequest { bytes genesis_bytes = 10; bytes upgrade_bytes = 11; bytes config_bytes = 12; - repeated VersionedDBServer db_servers = 13; + string db_server_addr = 13; // server_addr is the address of the gRPC server which serves // the messenger, keystore, shared memory, blockchain alias, // subnet alias, and appSender services @@ -142,13 +142,6 @@ message InitializeResponse { google.protobuf.Timestamp timestamp = 5; } -message VersionedDBServer { - string version = 1; - // server_addr is the address of the gRPC server which serves the - // Database service - string server_addr = 2; -} - message SetStateRequest { State state = 1; } diff --git a/snow/consensus/metrics/polls.go b/snow/consensus/metrics/polls.go index 188bb217ebe0..589833954f6b 100644 --- a/snow/consensus/metrics/polls.go +++ b/snow/consensus/metrics/polls.go @@ -6,7 +6,7 @@ package metrics import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) var _ Polls = (*polls)(nil) @@ -38,12 +38,11 @@ func NewPolls(namespace string, reg prometheus.Registerer) (Polls, error) { Help: "Number of failed polls", }), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( reg.Register(p.numFailedPolls), reg.Register(p.numSuccessfulPolls), ) - return p, errs.Err + return p, err } func (p *polls) Failed() { diff --git a/snow/consensus/snowman/bootstrapper/majority.go b/snow/consensus/snowman/bootstrapper/majority.go new file mode 100644 index 000000000000..1decb837ef40 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/majority.go @@ -0,0 +1,110 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "go.uber.org/zap" + + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ Poll = (*Majority)(nil) + +// Majority implements the bootstrapping poll to filter the initial set of +// potentially accaptable blocks into a set of accepted blocks to sync to. +// +// Once the last accepted blocks have been fetched from the initial set of +// peers, the set of blocks are sent to all peers. Each peer is expected to +// filter the provided blocks and report which of them they consider accepted. +// If a majority of the peers report that a block is accepted, then the node +// will consider that block to be accepted by the network. This assumes that a +// majority of the network is correct. If a majority of the network is +// malicious, the node may accept an incorrect block. +type Majority struct { + requests + + log logging.Logger + nodeWeights map[ids.NodeID]uint64 + + // received maps the blockID to the total sum of weight that has reported + // that block as accepted. + received map[ids.ID]uint64 + accepted []ids.ID +} + +func NewMajority( + log logging.Logger, + nodeWeights map[ids.NodeID]uint64, + maxOutstanding int, +) *Majority { + return &Majority{ + requests: requests{ + maxOutstanding: maxOutstanding, + pendingSend: set.Of(maps.Keys(nodeWeights)...), + }, + log: log, + nodeWeights: nodeWeights, + received: make(map[ids.ID]uint64), + } +} + +func (m *Majority) RecordOpinion(_ context.Context, nodeID ids.NodeID, blkIDs set.Set[ids.ID]) error { + if !m.recordResponse(nodeID) { + // The chain router should have already dropped unexpected messages. + m.log.Error("received unexpected opinion", + zap.String("pollType", "majority"), + zap.Stringer("nodeID", nodeID), + zap.Reflect("blkIDs", blkIDs), + ) + return nil + } + + weight := m.nodeWeights[nodeID] + for blkID := range blkIDs { + newWeight, err := math.Add64(m.received[blkID], weight) + if err != nil { + return err + } + m.received[blkID] = newWeight + } + + if !m.finished() { + return nil + } + + var ( + totalWeight uint64 + err error + ) + for _, weight := range m.nodeWeights { + totalWeight, err = math.Add64(totalWeight, weight) + if err != nil { + return err + } + } + + requiredWeight := totalWeight/2 + 1 + for blkID, weight := range m.received { + if weight >= requiredWeight { + m.accepted = append(m.accepted, blkID) + } + } + + m.log.Debug("finalized bootstrapping poll", + zap.String("pollType", "majority"), + zap.Stringers("accepted", m.accepted), + ) + return nil +} + +func (m *Majority) Result(context.Context) ([]ids.ID, bool) { + return m.accepted, m.finished() +} diff --git a/snow/consensus/snowman/bootstrapper/majority_test.go b/snow/consensus/snowman/bootstrapper/majority_test.go new file mode 100644 index 000000000000..d276566fb910 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/majority_test.go @@ -0,0 +1,396 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + + safemath "github.com/ava-labs/avalanchego/utils/math" +) + +func TestNewMajority(t *testing.T) { + majority := NewMajority( + logging.NoLog{}, // log + map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, // nodeWeights + 2, // maxOutstanding + ) + + expectedMajority := &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + } + require.Equal(t, expectedMajority, majority) +} + +func TestMajorityGetPeers(t *testing.T) { + tests := []struct { + name string + majority Poll + expectedState Poll + expectedPeers set.Set[ids.NodeID] + }{ + { + name: "max outstanding", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedPeers: nil, + }, + { + name: "send until max outstanding", + majority: &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Set[ids.NodeID]{}, + outstanding: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedPeers: set.Of(nodeID0, nodeID1), + }, + { + name: "send until no more to send", + majority: &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Set[ids.NodeID]{}, + outstanding: set.Of(nodeID0), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedPeers: set.Of(nodeID0), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + peers := test.majority.GetPeers(context.Background()) + require.Equal(test.expectedState, test.majority) + require.Equal(test.expectedPeers, peers) + }) + } +} + +func TestMajorityRecordOpinion(t *testing.T) { + tests := []struct { + name string + majority Poll + nodeID ids.NodeID + blkIDs set.Set[ids.ID] + expectedState Poll + expectedErr error + }{ + { + name: "unexpected response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + nodeID: nodeID0, + blkIDs: nil, + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedErr: nil, + }, + { + name: "unfinished after response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 2, + nodeID1: 3, + }, + received: make(map[ids.ID]uint64), + }, + nodeID: nodeID1, + blkIDs: set.Of(blkID0), + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 2, + nodeID1: 3, + }, + received: map[ids.ID]uint64{ + blkID0: 3, + }, + }, + expectedErr: nil, + }, + { + name: "overflow during response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: math.MaxUint64, + }, + received: map[ids.ID]uint64{ + blkID0: 1, + }, + }, + nodeID: nodeID1, + blkIDs: set.Of(blkID0), + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: math.MaxUint64, + }, + received: map[ids.ID]uint64{ + blkID0: 1, + }, + }, + expectedErr: safemath.ErrOverflow, + }, + { + name: "overflow during final response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: math.MaxUint64, + }, + received: make(map[ids.ID]uint64), + }, + nodeID: nodeID1, + blkIDs: set.Of(blkID0), + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: math.MaxUint64, + }, + received: map[ids.ID]uint64{ + blkID0: math.MaxUint64, + }, + }, + expectedErr: safemath.ErrOverflow, + }, + { + name: "finished after response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID2), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + nodeID2: 1, + }, + received: map[ids.ID]uint64{ + blkID0: 1, + blkID1: 1, + }, + }, + nodeID: nodeID2, + blkIDs: set.Of(blkID1), + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + nodeID2: 1, + }, + received: map[ids.ID]uint64{ + blkID0: 1, + blkID1: 2, + }, + accepted: []ids.ID{blkID1}, + }, + expectedErr: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + err := test.majority.RecordOpinion(context.Background(), test.nodeID, test.blkIDs) + require.Equal(test.expectedState, test.majority) + require.ErrorIs(err, test.expectedErr) + }) + } +} + +func TestMajorityResult(t *testing.T) { + tests := []struct { + name string + majority Poll + expectedAccepted []ids.ID + expectedFinalized bool + }{ + { + name: "not finalized", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + accepted: nil, + }, + expectedAccepted: nil, + expectedFinalized: false, + }, + { + name: "finalized", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: map[ids.ID]uint64{ + blkID0: 2, + }, + accepted: []ids.ID{blkID0}, + }, + expectedAccepted: []ids.ID{blkID0}, + expectedFinalized: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + accepted, finalized := test.majority.Result(context.Background()) + require.Equal(test.expectedAccepted, accepted) + require.Equal(test.expectedFinalized, finalized) + }) + } +} diff --git a/snow/consensus/snowman/bootstrapper/minority.go b/snow/consensus/snowman/bootstrapper/minority.go new file mode 100644 index 000000000000..52b45c4407ba --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/minority.go @@ -0,0 +1,77 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ Poll = (*Minority)(nil) + +// Minority implements the bootstrapping poll to determine the initial set of +// potentially accaptable blocks. +// +// This poll fetches the last accepted block from an initial set of peers. In +// order for the protocol to find a recently accepted block, there must be at +// least one correct node in this set of peers. If there is not a correct node +// in the set of peers, the node will not accept an incorrect block. However, +// the node may be unable to find an acceptable block. +type Minority struct { + requests + + log logging.Logger + + receivedSet set.Set[ids.ID] + received []ids.ID +} + +func NewMinority( + log logging.Logger, + frontierNodes set.Set[ids.NodeID], + maxOutstanding int, +) *Minority { + return &Minority{ + requests: requests{ + maxOutstanding: maxOutstanding, + pendingSend: frontierNodes, + }, + log: log, + } +} + +func (m *Minority) RecordOpinion(_ context.Context, nodeID ids.NodeID, blkIDs set.Set[ids.ID]) error { + if !m.recordResponse(nodeID) { + // The chain router should have already dropped unexpected messages. + m.log.Error("received unexpected opinion", + zap.String("pollType", "minority"), + zap.Stringer("nodeID", nodeID), + zap.Reflect("blkIDs", blkIDs), + ) + return nil + } + + m.receivedSet.Union(blkIDs) + + if !m.finished() { + return nil + } + + m.received = m.receivedSet.List() + + m.log.Debug("finalized bootstrapping poll", + zap.String("pollType", "minority"), + zap.Stringers("frontier", m.received), + ) + return nil +} + +func (m *Minority) Result(context.Context) ([]ids.ID, bool) { + return m.received, m.finished() +} diff --git a/snow/consensus/snowman/bootstrapper/minority_test.go b/snow/consensus/snowman/bootstrapper/minority_test.go new file mode 100644 index 000000000000..f720ee18025a --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/minority_test.go @@ -0,0 +1,242 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" +) + +func TestNewMinority(t *testing.T) { + minority := NewMinority( + logging.NoLog{}, // log + set.Of(nodeID0), // frontierNodes + 2, // maxOutstanding + ) + + expectedMinority := &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0), + }, + log: logging.NoLog{}, + } + require.Equal(t, expectedMinority, minority) +} + +func TestMinorityGetPeers(t *testing.T) { + tests := []struct { + name string + minority Poll + expectedState Poll + expectedPeers set.Set[ids.NodeID] + }{ + { + name: "max outstanding", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + expectedPeers: nil, + }, + { + name: "send until max outstanding", + minority: &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + }, + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Set[ids.NodeID]{}, + outstanding: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + }, + expectedPeers: set.Of(nodeID0, nodeID1), + }, + { + name: "send until no more to send", + minority: &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0), + }, + log: logging.NoLog{}, + }, + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Set[ids.NodeID]{}, + outstanding: set.Of(nodeID0), + }, + log: logging.NoLog{}, + }, + expectedPeers: set.Of(nodeID0), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + peers := test.minority.GetPeers(context.Background()) + require.Equal(test.expectedState, test.minority) + require.Equal(test.expectedPeers, peers) + }) + } +} + +func TestMinorityRecordOpinion(t *testing.T) { + tests := []struct { + name string + minority Poll + nodeID ids.NodeID + blkIDs set.Set[ids.ID] + expectedState Poll + expectedErr error + }{ + { + name: "unexpected response", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + nodeID: nodeID0, + blkIDs: nil, + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + expectedErr: nil, + }, + { + name: "unfinished after response", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + nodeID: nodeID1, + blkIDs: set.Of(blkID0), + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + receivedSet: set.Of(blkID0), + }, + expectedErr: nil, + }, + { + name: "finished after response", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID2), + }, + log: logging.NoLog{}, + }, + nodeID: nodeID2, + blkIDs: set.Of(blkID1), + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + receivedSet: set.Of(blkID1), + received: []ids.ID{blkID1}, + }, + expectedErr: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + err := test.minority.RecordOpinion(context.Background(), test.nodeID, test.blkIDs) + require.Equal(test.expectedState, test.minority) + require.ErrorIs(err, test.expectedErr) + }) + } +} + +func TestMinorityResult(t *testing.T) { + tests := []struct { + name string + minority Poll + expectedAccepted []ids.ID + expectedFinalized bool + }{ + { + name: "not finalized", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + received: nil, + }, + expectedAccepted: nil, + expectedFinalized: false, + }, + { + name: "finalized", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + }, + log: logging.NoLog{}, + receivedSet: set.Of(blkID0), + received: []ids.ID{blkID0}, + }, + expectedAccepted: []ids.ID{blkID0}, + expectedFinalized: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + accepted, finalized := test.minority.Result(context.Background()) + require.Equal(test.expectedAccepted, accepted) + require.Equal(test.expectedFinalized, finalized) + }) + } +} diff --git a/snow/consensus/snowman/bootstrapper/noop.go b/snow/consensus/snowman/bootstrapper/noop.go new file mode 100644 index 000000000000..1cd3bffd58b7 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/noop.go @@ -0,0 +1,27 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +var Noop Poll = noop{} + +type noop struct{} + +func (noop) GetPeers(context.Context) set.Set[ids.NodeID] { + return nil +} + +func (noop) RecordOpinion(context.Context, ids.NodeID, set.Set[ids.ID]) error { + return nil +} + +func (noop) Result(context.Context) ([]ids.ID, bool) { + return nil, false +} diff --git a/snow/consensus/snowman/bootstrapper/noop_test.go b/snow/consensus/snowman/bootstrapper/noop_test.go new file mode 100644 index 000000000000..0a485a8fae76 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/noop_test.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNoop(t *testing.T) { + require := require.New(t) + + require.Empty(Noop.GetPeers(context.Background())) + + require.NoError(Noop.RecordOpinion(context.Background(), nodeID0, nil)) + + blkIDs, finalized := Noop.Result(context.Background()) + require.Empty(blkIDs) + require.False(finalized) +} diff --git a/snow/consensus/snowman/bootstrapper/poll.go b/snow/consensus/snowman/bootstrapper/poll.go new file mode 100644 index 000000000000..450341d9d64d --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/poll.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +type Poll interface { + // GetPeers returns the set of peers whose opinion should be requested. It + // is expected to repeatedly call this function along with [RecordOpinion] + // until [Result] returns finalized. + GetPeers(ctx context.Context) (peers set.Set[ids.NodeID]) + // RecordOpinion of a node whose opinion was requested. + RecordOpinion(ctx context.Context, nodeID ids.NodeID, blkIDs set.Set[ids.ID]) error + // Result returns the evaluation of all the peer's opinions along with a + // flag to identify that the result has finished being calculated. + Result(ctx context.Context) (blkIDs []ids.ID, finalized bool) +} diff --git a/snow/consensus/snowman/bootstrapper/poll_test.go b/snow/consensus/snowman/bootstrapper/poll_test.go new file mode 100644 index 000000000000..134867ae1822 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/poll_test.go @@ -0,0 +1,15 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import "github.com/ava-labs/avalanchego/ids" + +var ( + nodeID0 = ids.GenerateTestNodeID() + nodeID1 = ids.GenerateTestNodeID() + nodeID2 = ids.GenerateTestNodeID() + + blkID0 = ids.GenerateTestID() + blkID1 = ids.GenerateTestID() +) diff --git a/snow/consensus/snowman/bootstrapper/requests.go b/snow/consensus/snowman/bootstrapper/requests.go new file mode 100644 index 000000000000..28fc25ce1643 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/requests.go @@ -0,0 +1,48 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" +) + +type requests struct { + maxOutstanding int + + pendingSend set.Set[ids.NodeID] + outstanding set.Set[ids.NodeID] +} + +func (r *requests) GetPeers(context.Context) set.Set[ids.NodeID] { + numPending := r.outstanding.Len() + if numPending >= r.maxOutstanding { + return nil + } + + numToSend := math.Min( + r.maxOutstanding-numPending, + r.pendingSend.Len(), + ) + nodeIDs := set.NewSet[ids.NodeID](numToSend) + for i := 0; i < numToSend; i++ { + nodeID, _ := r.pendingSend.Pop() + nodeIDs.Add(nodeID) + } + r.outstanding.Union(nodeIDs) + return nodeIDs +} + +func (r *requests) recordResponse(nodeID ids.NodeID) bool { + wasOutstanding := r.outstanding.Contains(nodeID) + r.outstanding.Remove(nodeID) + return wasOutstanding +} + +func (r *requests) finished() bool { + return r.pendingSend.Len() == 0 && r.outstanding.Len() == 0 +} diff --git a/snow/consensus/snowman/bootstrapper/sampler.go b/snow/consensus/snowman/bootstrapper/sampler.go new file mode 100644 index 000000000000..9511a1e4243f --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/sampler.go @@ -0,0 +1,49 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/sampler" + "github.com/ava-labs/avalanchego/utils/set" +) + +// Sample keys from [elements] uniformly by weight without replacement. The +// returned set will have size less than or equal to [maxSize]. This function +// will error if the sum of all weights overflows. +func Sample[T comparable](elements map[T]uint64, maxSize int) (set.Set[T], error) { + var ( + keys = make([]T, len(elements)) + weights = make([]uint64, len(elements)) + totalWeight uint64 + err error + ) + i := 0 + for key, weight := range elements { + keys[i] = key + weights[i] = weight + totalWeight, err = math.Add64(totalWeight, weight) + if err != nil { + return nil, err + } + i++ + } + + sampler := sampler.NewWeightedWithoutReplacement() + if err := sampler.Initialize(weights); err != nil { + return nil, err + } + + maxSize = int(math.Min(uint64(maxSize), totalWeight)) + indices, err := sampler.Sample(maxSize) + if err != nil { + return nil, err + } + + sampledElements := set.NewSet[T](maxSize) + for _, index := range indices { + sampledElements.Add(keys[index]) + } + return sampledElements, nil +} diff --git a/snow/consensus/snowman/bootstrapper/sampler_test.go b/snow/consensus/snowman/bootstrapper/sampler_test.go new file mode 100644 index 000000000000..1b9e366decc7 --- /dev/null +++ b/snow/consensus/snowman/bootstrapper/sampler_test.go @@ -0,0 +1,75 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" + + safemath "github.com/ava-labs/avalanchego/utils/math" +) + +func TestSample(t *testing.T) { + tests := []struct { + name string + elements map[ids.NodeID]uint64 + maxSize int + expectedSampled set.Set[ids.NodeID] + expectedErr error + }{ + { + name: "sample everything", + elements: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + maxSize: 2, + expectedSampled: set.Of(nodeID0, nodeID1), + expectedErr: nil, + }, + { + name: "limit sample due to too few elements", + elements: map[ids.NodeID]uint64{ + nodeID0: 1, + }, + maxSize: 2, + expectedSampled: set.Of(nodeID0), + expectedErr: nil, + }, + { + name: "limit sample", + elements: map[ids.NodeID]uint64{ + nodeID0: math.MaxUint64 - 1, + nodeID1: 1, + }, + maxSize: 1, + expectedSampled: set.Of(nodeID0), + expectedErr: nil, + }, + { + name: "overflow", + elements: map[ids.NodeID]uint64{ + nodeID0: math.MaxUint64, + nodeID1: 1, + }, + maxSize: 1, + expectedSampled: nil, + expectedErr: safemath.ErrOverflow, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + sampled, err := Sample(test.elements, test.maxSize) + require.ErrorIs(err, test.expectedErr) + require.Equal(test.expectedSampled, sampled) + }) + } +} diff --git a/snow/consensus/snowman/poll/set.go b/snow/consensus/snowman/poll/set.go index e31821476bc8..e58059f20c3d 100644 --- a/snow/consensus/snowman/poll/set.go +++ b/snow/consensus/snowman/poll/set.go @@ -4,6 +4,7 @@ package poll import ( + "errors" "fmt" "strings" "time" @@ -19,6 +20,11 @@ import ( "github.com/ava-labs/avalanchego/utils/metric" ) +var ( + errFailedPollsMetric = errors.New("failed to register polls metric") + errFailedPollDurationMetrics = errors.New("failed to register poll_duration metrics") +) + type pollHolder interface { GetPoll() Poll StartTime() time.Time @@ -52,16 +58,14 @@ func NewSet( log logging.Logger, namespace string, reg prometheus.Registerer, -) Set { +) (Set, error) { numPolls := prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "polls", Help: "Number of pending network polls", }) if err := reg.Register(numPolls); err != nil { - log.Error("failed to register polls statistics", - zap.Error(err), - ) + return nil, fmt.Errorf("%w: %w", errFailedPollsMetric, err) } durPolls, err := metric.NewAverager( @@ -71,9 +75,7 @@ func NewSet( reg, ) if err != nil { - log.Error("failed to register poll_duration statistics", - zap.Error(err), - ) + return nil, fmt.Errorf("%w: %w", errFailedPollDurationMetrics, err) } return &set{ @@ -82,7 +84,7 @@ func NewSet( durPolls: durPolls, factory: factory, polls: linkedhashmap.New[uint32, pollHolder](), - } + }, nil } // Add to the current set of polls diff --git a/snow/consensus/snowman/poll/set_test.go b/snow/consensus/snowman/poll/set_test.go index 8200f25dc5f0..84ed8f7a5c8c 100644 --- a/snow/consensus/snowman/poll/set_test.go +++ b/snow/consensus/snowman/poll/set_test.go @@ -21,14 +21,14 @@ var ( blkID3 = ids.ID{3} blkID4 = ids.ID{4} - vdr1 = ids.NodeID{1} - vdr2 = ids.NodeID{2} - vdr3 = ids.NodeID{3} - vdr4 = ids.NodeID{4} - vdr5 = ids.NodeID{5} + vdr1 = ids.BuildTestNodeID([]byte{0x01}) + vdr2 = ids.BuildTestNodeID([]byte{0x02}) + vdr3 = ids.BuildTestNodeID([]byte{0x03}) + vdr4 = ids.BuildTestNodeID([]byte{0x04}) + vdr5 = ids.BuildTestNodeID([]byte{0x05}) // k = 5 ) -func TestNewSetErrorOnMetrics(t *testing.T) { +func TestNewSetErrorOnPollsMetrics(t *testing.T) { require := require.New(t) factory := NewEarlyTermNoTraversalFactory(1, 1) @@ -37,13 +37,29 @@ func TestNewSetErrorOnMetrics(t *testing.T) { registerer := prometheus.NewRegistry() require.NoError(registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Name: "polls", + Namespace: namespace, + Name: "polls", }))) + + _, err := NewSet(factory, log, namespace, registerer) + require.ErrorIs(err, errFailedPollsMetric) +} + +func TestNewSetErrorOnPollDurationMetrics(t *testing.T) { + require := require.New(t) + + factory := NewEarlyTermNoTraversalFactory(1, 1) + log := logging.NoLog{} + namespace := "" + registerer := prometheus.NewRegistry() + require.NoError(registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Name: "poll_duration", + Namespace: namespace, + Name: "poll_duration_count", }))) - require.NotNil(NewSet(factory, log, namespace, registerer)) + _, err := NewSet(factory, log, namespace, registerer) + require.ErrorIs(err, errFailedPollDurationMetrics) } func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { @@ -56,7 +72,8 @@ func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) // create two polls for the two blocks vdrBag := bag.Of(vdrs...) @@ -92,7 +109,8 @@ func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) // create two polls for the two blocks vdrBag := bag.Of(vdrs...) @@ -128,7 +146,8 @@ func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) // create three polls for the two blocks vdrBag := bag.Of(vdrs...) @@ -172,7 +191,8 @@ func TestCreateAndFinishSuccessfulPoll(t *testing.T) { log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) require.Zero(s.Len()) @@ -204,7 +224,8 @@ func TestCreateAndFinishFailedPoll(t *testing.T) { log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) require.Zero(s.Len()) @@ -233,7 +254,8 @@ func TestSetString(t *testing.T) { log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) expected := `current polls: (Size = 1) RequestID 0: diff --git a/snow/engine/avalanche/bootstrap/bootstrapper.go b/snow/engine/avalanche/bootstrap/bootstrapper.go index 0f8a2484a4e2..c6ef6513b668 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper.go @@ -29,6 +29,10 @@ const ( stripeDistance = 2000 stripeWidth = 5 cacheSize = 100000 + + // maxOutstandingGetAncestorsRequests is the maximum number of GetAncestors + // sent but not yet responded to/failed + maxOutstandingGetAncestorsRequests = 10 ) var _ common.BootstrapableEngine = (*bootstrapper)(nil) @@ -42,6 +46,8 @@ func New( StateSummaryFrontierHandler: common.NewNoOpStateSummaryFrontierHandler(config.Ctx.Log), AcceptedStateSummaryHandler: common.NewNoOpAcceptedStateSummaryHandler(config.Ctx.Log), + AcceptedFrontierHandler: common.NewNoOpAcceptedFrontierHandler(config.Ctx.Log), + AcceptedHandler: common.NewNoOpAcceptedHandler(config.Ctx.Log), PutHandler: common.NewNoOpPutHandler(config.Ctx.Log), QueryHandler: common.NewNoOpQueryHandler(config.Ctx.Log), ChitsHandler: common.NewNoOpChitsHandler(config.Ctx.Log), @@ -52,41 +58,41 @@ func New( OnFinished: onFinished, }, } - - if err := b.metrics.Initialize("bs", config.Ctx.AvalancheRegisterer); err != nil { - return nil, err - } - - config.Config.Bootstrapable = b - b.Bootstrapper = common.NewCommonBootstrapper(config.Config) - return b, nil + return b, b.metrics.Initialize("bs", config.Ctx.AvalancheRegisterer) } // Note: To align with the Snowman invariant, it should be guaranteed the VM is // not used until after the bootstrapper has been Started. type bootstrapper struct { Config + common.Halter // list of NoOpsHandler for messages dropped by bootstrapper common.StateSummaryFrontierHandler common.AcceptedStateSummaryHandler + common.AcceptedFrontierHandler + common.AcceptedHandler common.PutHandler common.QueryHandler common.ChitsHandler common.AppHandler - common.Bootstrapper common.Fetcher metrics - started bool - // IDs of vertices that we will send a GetAncestors request for once we are // not at the max number of outstanding requests needToFetch set.Set[ids.ID] // Contains IDs of vertices that have recently been processed processedCache *cache.LRU[ids.ID, struct{}] + + // Tracks the last requestID that was used in a request + requestID uint32 +} + +func (b *bootstrapper) Context() *snow.ConsensusContext { + return b.Ctx } func (b *bootstrapper) Clear(context.Context) error { @@ -256,16 +262,7 @@ func (b *bootstrapper) Connected( return err } - if err := b.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { - return err - } - - if b.started || !b.StartupTracker.ShouldStart() { - return nil - } - - b.started = true - return b.Startup(ctx) + return b.StartupTracker.Connected(ctx, nodeID, nodeVersion) } func (b *bootstrapper) Disconnected(ctx context.Context, nodeID ids.NodeID) error { @@ -327,7 +324,7 @@ func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { return err } - b.Config.SharedCfg.RequestID = startReqID + b.requestID = startReqID // If the network was already linearized, don't attempt to linearize it // again. @@ -336,38 +333,38 @@ func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { return fmt.Errorf("failed to get linearization status: %w", err) } if linearized { - edge := b.Manager.Edge(ctx) - return b.ForceAccepted(ctx, edge) + return b.ForceAccepted(ctx, nil) } - // If requested, assume the currently accepted state is what was linearized. - // - // Note: This is used to linearize networks that were created after the - // linearization occurred. - if b.Config.LinearizeOnStartup { - edge := b.Manager.Edge(ctx) - stopVertex, err := b.Manager.BuildStopVtx(ctx, edge) - if err != nil { - return fmt.Errorf("failed to create stop vertex: %w", err) - } - if err := stopVertex.Accept(ctx); err != nil { - return fmt.Errorf("failed to accept stop vertex: %w", err) - } - - stopVertexID := stopVertex.ID() - b.Ctx.Log.Info("accepted stop vertex", - zap.Stringer("vtxID", stopVertexID), + // If a stop vertex is well known, accept that. + if b.Config.StopVertexID != ids.Empty { + b.Ctx.Log.Info("using well known stop vertex", + zap.Stringer("vtxID", b.Config.StopVertexID), ) - return b.ForceAccepted(ctx, []ids.ID{stopVertexID}) + return b.ForceAccepted(ctx, []ids.ID{b.Config.StopVertexID}) } - if !b.StartupTracker.ShouldStart() { - return nil + // If a stop vertex isn't well known, treat the current state as the final + // DAG state. + // + // Note: This is used to linearize networks that were created after the + // linearization occurred. + edge := b.Manager.Edge(ctx) + stopVertex, err := b.Manager.BuildStopVtx(ctx, edge) + if err != nil { + return fmt.Errorf("failed to create stop vertex: %w", err) + } + if err := stopVertex.Accept(ctx); err != nil { + return fmt.Errorf("failed to accept stop vertex: %w", err) } - b.started = true - return b.Startup(ctx) + stopVertexID := stopVertex.ID() + b.Ctx.Log.Info("generated stop vertex", + zap.Stringer("vtxID", stopVertexID), + ) + + return b.ForceAccepted(ctx, nil) } func (b *bootstrapper) HealthCheck(ctx context.Context) (interface{}, error) { @@ -391,7 +388,7 @@ func (b *bootstrapper) GetVM() common.VM { // to fetch or we are at the maximum number of outstanding requests. func (b *bootstrapper) fetch(ctx context.Context, vtxIDs ...ids.ID) error { b.needToFetch.Add(vtxIDs...) - for b.needToFetch.Len() > 0 && b.OutstandingRequests.Len() < common.MaxOutstandingGetAncestorsRequests { + for b.needToFetch.Len() > 0 && b.OutstandingRequests.Len() < maxOutstandingGetAncestorsRequests { vtxID := b.needToFetch.CappedList(1)[0] b.needToFetch.Remove(vtxID) @@ -410,10 +407,10 @@ func (b *bootstrapper) fetch(ctx context.Context, vtxIDs ...ids.ID) error { return fmt.Errorf("dropping request for %s as there are no validators", vtxID) } validatorID := validatorIDs[0] - b.Config.SharedCfg.RequestID++ + b.requestID++ - b.OutstandingRequests.Add(validatorID, b.Config.SharedCfg.RequestID, vtxID) - b.Config.Sender.SendGetAncestors(ctx, validatorID, b.Config.SharedCfg.RequestID, vtxID) // request vertex and ancestors + b.OutstandingRequests.Add(validatorID, b.requestID, vtxID) + b.Config.Sender.SendGetAncestors(ctx, validatorID, b.requestID, vtxID) // request vertex and ancestors } return b.checkFinish(ctx) } @@ -498,15 +495,9 @@ func (b *bootstrapper) process(ctx context.Context, vtxs ...avalanche.Vertex) er verticesFetchedSoFar := b.VtxBlocked.Jobs.PendingJobs() if verticesFetchedSoFar%common.StatusUpdateFrequency == 0 { // Periodically print progress - if !b.Config.SharedCfg.Restarted { - b.Ctx.Log.Info("fetched vertices", - zap.Uint64("numVerticesFetched", verticesFetchedSoFar), - ) - } else { - b.Ctx.Log.Debug("fetched vertices", - zap.Uint64("numVerticesFetched", verticesFetchedSoFar), - ) - } + b.Ctx.Log.Info("fetched vertices", + zap.Uint64("numVerticesFetched", verticesFetchedSoFar), + ) } parents, err := vtx.Parents() @@ -578,58 +569,36 @@ func (b *bootstrapper) ForceAccepted(ctx context.Context, acceptedContainerIDs [ // checkFinish repeatedly executes pending transactions and requests new frontier blocks until there aren't any new ones // after which it finishes the bootstrap process func (b *bootstrapper) checkFinish(ctx context.Context) error { - // If there are outstanding requests for vertices or we still need to fetch vertices, we can't finish - pendingJobs := b.VtxBlocked.MissingIDs() - if b.IsBootstrapped() || len(pendingJobs) > 0 { + // If we still need to fetch vertices, we can't finish + if len(b.VtxBlocked.MissingIDs()) > 0 { return nil } - if !b.Config.SharedCfg.Restarted { - b.Ctx.Log.Info("executing transactions") - } else { - b.Ctx.Log.Debug("executing transactions") - } - + b.Ctx.Log.Info("executing transactions") _, err := b.TxBlocked.ExecuteAll( ctx, b.Config.Ctx, b, - b.Config.SharedCfg.Restarted, + false, b.Ctx.TxAcceptor, ) if err != nil || b.Halted() { return err } - if !b.Config.SharedCfg.Restarted { - b.Ctx.Log.Info("executing vertices") - } else { - b.Ctx.Log.Debug("executing vertices") - } - + b.Ctx.Log.Info("executing vertices") _, err = b.VtxBlocked.ExecuteAll( ctx, b.Config.Ctx, b, - b.Config.SharedCfg.Restarted, + false, b.Ctx.VertexAcceptor, ) if err != nil || b.Halted() { return err } - // If the chain is linearized, we should immediately move on to start - // bootstrapping snowman. - linearized, err := b.Manager.StopVertexAccepted(ctx) - if err != nil { - return err - } - if !linearized { - b.Ctx.Log.Debug("checking for stop vertex before finishing bootstrapping") - return b.Restart(ctx, true) - } - - // Invariant: edge will only be the stop vertex after its acceptance. + // Invariant: edge will only be the stop vertex edge := b.Manager.Edge(ctx) stopVertexID := edge[0] if err := b.VM.Linearize(ctx, stopVertexID); err != nil { @@ -637,7 +606,7 @@ func (b *bootstrapper) checkFinish(ctx context.Context) error { } b.processedCache.Flush() - return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) + return b.OnFinished(ctx, b.requestID) } // A vertex is less than another vertex if it is unknown. Ties are broken by diff --git a/snow/engine/avalanche/bootstrap/bootstrapper_test.go b/snow/engine/avalanche/bootstrap/bootstrapper_test.go index d5bd51a9f78c..4e61e9b137ee 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper_test.go @@ -8,9 +8,12 @@ import ( "context" "errors" "testing" + "time" "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" @@ -61,23 +64,10 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.Te vm := &vertex.TestVM{} vm.T = t - isBootstrapped := false - bootstrapTracker := &common.BootstrapTrackerTest{ - T: t, - IsBootstrappedF: func() bool { - return isBootstrapped - }, - BootstrappedF: func(ids.ID) { - isBootstrapped = true - }, - } - sender.Default(true) manager.Default(true) vm.Default(true) - sender.CantSendGetAcceptedFrontier = false - peer := ids.GenerateTestNodeID() require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, peer, nil, ids.Empty, 1)) @@ -93,30 +83,20 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.Te startupTracker := tracker.NewStartup(peerTracker, totalWeight/2+1) vdrs.RegisterCallbackListener(constants.PrimaryNetworkID, startupTracker) - commonConfig := common.Config{ + avaGetHandler, err := getter.New(manager, sender, ctx.Log, time.Second, 2000, ctx.AvalancheRegisterer) + require.NoError(err) + + return Config{ + AllGetsServer: avaGetHandler, Ctx: ctx, Beacons: vdrs, - SampleK: vdrs.Count(constants.PrimaryNetworkID), - Alpha: totalWeight/2 + 1, StartupTracker: startupTracker, Sender: sender, - BootstrapTracker: bootstrapTracker, - Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - avaGetHandler, err := getter.New(manager, commonConfig) - require.NoError(err) - - return Config{ - Config: commonConfig, - AllGetsServer: avaGetHandler, - VtxBlocked: vtxBlocker, - TxBlocked: txBlocker, - Manager: manager, - VM: vm, + VtxBlocked: vtxBlocker, + TxBlocked: txBlocker, + Manager: manager, + VM: vm, }, peer, sender, manager, vm } @@ -148,7 +128,10 @@ func TestBootstrapperSingleFrontier(t *testing.T) { IDV: vtxID1, StatusV: choices.Processing, }, - HeightV: 0, + ParentsV: []avalanche.Vertex{ + vtx0, + }, + HeightV: 1, BytesV: vtxBytes1, } vtx2 := &avalanche.TestVertex{ // vtx2 is the stop vertex @@ -156,10 +139,14 @@ func TestBootstrapperSingleFrontier(t *testing.T) { IDV: vtxID2, StatusV: choices.Processing, }, - HeightV: 0, + ParentsV: []avalanche.Vertex{ + vtx1, + }, + HeightV: 2, BytesV: vtxBytes2, } + config.StopVertexID = vtxID2 bs, err := New( config, func(context.Context, uint32) error { @@ -172,11 +159,6 @@ func TestBootstrapperSingleFrontier(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - acceptedIDs := []ids.ID{vtxID0, vtxID1, vtxID2} - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID0: @@ -219,7 +201,8 @@ func TestBootstrapperSingleFrontier(t *testing.T) { return nil } - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) require.Equal(choices.Accepted, vtx0.Status()) require.Equal(choices.Accepted, vtx1.Status()) @@ -269,6 +252,7 @@ func TestBootstrapperByzantineResponses(t *testing.T) { BytesV: vtxBytes2, } + config.StopVertexID = vtxID1 bs, err := New( config, func(context.Context, uint32) error { @@ -281,10 +265,6 @@ func TestBootstrapperByzantineResponses(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - acceptedIDs := []ids.ID{vtxID1} manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID1: @@ -324,7 +304,8 @@ func TestBootstrapperByzantineResponses(t *testing.T) { } } - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request vtx0 + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) // should request vtx0 require.Equal(vtxID0, reqVtxID) oldReqID := *requestID @@ -437,6 +418,7 @@ func TestBootstrapperTxDependencies(t *testing.T) { BytesV: vtxBytes1, } + config.StopVertexID = vtxID1 bs, err := New( config, func(context.Context, uint32) error { @@ -449,11 +431,6 @@ func TestBootstrapperTxDependencies(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - acceptedIDs := []ids.ID{vtxID1} - manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { case bytes.Equal(vtxBytes, vtxBytes1): @@ -485,7 +462,8 @@ func TestBootstrapperTxDependencies(t *testing.T) { *reqIDPtr = reqID } - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request vtx0 + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { @@ -563,6 +541,7 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { BytesV: vtxBytes2, } + config.StopVertexID = vtxID2 bs, err := New( config, func(context.Context, uint32) error { @@ -575,10 +554,6 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - acceptedIDs := []ids.ID{vtxID2} manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch { case vtxID == vtxID0: @@ -617,7 +592,8 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { requested = vtxID } - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request vtx1 + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) // should request vtx1 require.Equal(vtxID1, requested) require.NoError(bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes1})) // Provide vtx1; should request vtx0 @@ -645,7 +621,7 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { require.Equal(choices.Accepted, vtx2.Status()) } -func TestBootstrapperFinalized(t *testing.T) { +func TestBootstrapperUnexpectedVertex(t *testing.T) { require := require.New(t) config, peerID, sender, manager, vm := newConfig(t) @@ -674,6 +650,7 @@ func TestBootstrapperFinalized(t *testing.T) { BytesV: vtxBytes1, } + config.StopVertexID = vtxID1 bs, err := New( config, func(context.Context, uint32) error { @@ -686,10 +663,6 @@ func TestBootstrapperFinalized(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - acceptedIDs := []ids.ID{vtxID0, vtxID1} parsedVtx0 := false parsedVtx1 := false manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { @@ -728,20 +701,17 @@ func TestBootstrapperFinalized(t *testing.T) { requestIDs := map[ids.ID]uint32{} sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { require.Equal(peerID, vdr) - requestIDs[vtxID] = reqID } - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request vtx0 and vtx1 + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) // should request vtx1 require.Contains(requestIDs, vtxID1) reqID := requestIDs[vtxID1] - require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes1, vtxBytes0})) - require.Contains(requestIDs, vtxID0) - - manager.StopVertexAcceptedF = func(context.Context) (bool, error) { - return vtx1.Status() == choices.Accepted, nil - } + maps.Clear(requestIDs) + require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes0})) + require.Contains(requestIDs, vtxID1) manager.EdgeF = func(context.Context) []ids.ID { require.Equal(choices.Accepted, vtx1.Status()) @@ -753,356 +723,8 @@ func TestBootstrapperFinalized(t *testing.T) { return nil } - reqID = requestIDs[vtxID0] - require.NoError(bs.GetAncestorsFailed(context.Background(), peerID, reqID)) - require.Equal(snow.NormalOp, config.Ctx.State.Get().State) - require.Equal(choices.Accepted, vtx0.Status()) - require.Equal(choices.Accepted, vtx1.Status()) -} - -// Test that Ancestors accepts the parents of the first vertex returned -func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { - require := require.New(t) - - config, peerID, sender, manager, vm := newConfig(t) - - vtxID0 := ids.Empty.Prefix(0) - vtxID1 := ids.Empty.Prefix(1) - vtxID2 := ids.Empty.Prefix(2) - - vtxBytes0 := []byte{0} - vtxBytes1 := []byte{1} - vtxBytes2 := []byte{2} - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID0, - StatusV: choices.Unknown, - }, - HeightV: 0, - BytesV: vtxBytes0, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID1, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx0}, - HeightV: 1, - BytesV: vtxBytes1, - } - vtx2 := &avalanche.TestVertex{ // vtx2 is the stop vertex - TestDecidable: choices.TestDecidable{ - IDV: vtxID2, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx1}, - HeightV: 2, - BytesV: vtxBytes2, - } - - bs, err := New( - config, - func(context.Context, uint32) error { - config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - State: snow.NormalOp, - }) - return nil - }, - ) - require.NoError(err) - - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - acceptedIDs := []ids.ID{vtxID2} - parsedVtx0 := false - parsedVtx1 := false - parsedVtx2 := false - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - switch vtxID { - case vtxID0: - if parsedVtx0 { - return vtx0, nil - } - case vtxID1: - if parsedVtx1 { - return vtx1, nil - } - case vtxID2: - if parsedVtx2 { - return vtx2, nil - } - default: - require.FailNow(errUnknownVertex.Error()) - return nil, errUnknownVertex - } - return nil, errUnknownVertex - } - manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { - switch { - case bytes.Equal(vtxBytes, vtxBytes0): - vtx0.StatusV = choices.Processing - parsedVtx0 = true - return vtx0, nil - case bytes.Equal(vtxBytes, vtxBytes1): - vtx1.StatusV = choices.Processing - parsedVtx1 = true - return vtx1, nil - case bytes.Equal(vtxBytes, vtxBytes2): - vtx2.StatusV = choices.Processing - parsedVtx2 = true - return vtx2, nil - default: - require.FailNow(errUnknownVertex.Error()) - return nil, errUnknownVertex - } - } - - requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - require.Equal(peerID, vdr) - - requestIDs[vtxID] = reqID - } - - require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request vtx2 - require.Contains(requestIDs, vtxID2) - - manager.StopVertexAcceptedF = func(context.Context) (bool, error) { - return vtx2.Status() == choices.Accepted, nil - } - - manager.EdgeF = func(context.Context) []ids.ID { - require.Equal(choices.Accepted, vtx2.Status()) - return []ids.ID{vtxID2} - } - - vm.LinearizeF = func(_ context.Context, stopVertexID ids.ID) error { - require.Equal(vtxID2, stopVertexID) - return nil - } - - reqID := requestIDs[vtxID2] - require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes2, vtxBytes1, vtxBytes0})) - require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes1, vtxBytes0})) require.Equal(choices.Accepted, vtx0.Status()) require.Equal(choices.Accepted, vtx1.Status()) - require.Equal(choices.Accepted, vtx2.Status()) -} - -func TestRestartBootstrapping(t *testing.T) { - require := require.New(t) - - config, peerID, sender, manager, vm := newConfig(t) - - vtxID0 := ids.GenerateTestID() - vtxID1 := ids.GenerateTestID() - vtxID2 := ids.GenerateTestID() - vtxID3 := ids.GenerateTestID() - vtxID4 := ids.GenerateTestID() - vtxID5 := ids.GenerateTestID() - - vtxBytes0 := []byte{0} - vtxBytes1 := []byte{1} - vtxBytes2 := []byte{2} - vtxBytes3 := []byte{3} - vtxBytes4 := []byte{4} - vtxBytes5 := []byte{5} - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID0, - StatusV: choices.Unknown, - }, - HeightV: 0, - BytesV: vtxBytes0, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID1, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx0}, - HeightV: 1, - BytesV: vtxBytes1, - } - vtx2 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID2, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx1}, - HeightV: 2, - BytesV: vtxBytes2, - } - vtx3 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID3, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx2}, - HeightV: 3, - BytesV: vtxBytes3, - } - vtx4 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID4, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx2}, - HeightV: 3, - BytesV: vtxBytes4, - } - vtx5 := &avalanche.TestVertex{ // vtx5 is the stop vertex - TestDecidable: choices.TestDecidable{ - IDV: vtxID5, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx3, vtx4}, - HeightV: 4, - BytesV: vtxBytes5, - } - - bsIntf, err := New( - config, - func(context.Context, uint32) error { - config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - State: snow.NormalOp, - }) - return nil - }, - ) - require.NoError(err) - - bs := bsIntf.(*bootstrapper) - - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) - - parsedVtx0 := false - parsedVtx1 := false - parsedVtx2 := false - parsedVtx3 := false - parsedVtx4 := false - parsedVtx5 := false - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - switch vtxID { - case vtxID0: - if parsedVtx0 { - return vtx0, nil - } - case vtxID1: - if parsedVtx1 { - return vtx1, nil - } - case vtxID2: - if parsedVtx2 { - return vtx2, nil - } - case vtxID3: - if parsedVtx3 { - return vtx3, nil - } - case vtxID4: - if parsedVtx4 { - return vtx4, nil - } - case vtxID5: - if parsedVtx5 { - return vtx5, nil - } - default: - require.FailNow(errUnknownVertex.Error()) - return nil, errUnknownVertex - } - return nil, errUnknownVertex - } - manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { - switch { - case bytes.Equal(vtxBytes, vtxBytes0): - vtx0.StatusV = choices.Processing - parsedVtx0 = true - return vtx0, nil - case bytes.Equal(vtxBytes, vtxBytes1): - vtx1.StatusV = choices.Processing - parsedVtx1 = true - return vtx1, nil - case bytes.Equal(vtxBytes, vtxBytes2): - vtx2.StatusV = choices.Processing - parsedVtx2 = true - return vtx2, nil - case bytes.Equal(vtxBytes, vtxBytes3): - vtx3.StatusV = choices.Processing - parsedVtx3 = true - return vtx3, nil - case bytes.Equal(vtxBytes, vtxBytes4): - vtx4.StatusV = choices.Processing - parsedVtx4 = true - return vtx4, nil - case bytes.Equal(vtxBytes, vtxBytes5): - vtx5.StatusV = choices.Processing - parsedVtx5 = true - return vtx5, nil - default: - require.FailNow(errUnknownVertex.Error()) - return nil, errUnknownVertex - } - } - - requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - require.Equal(peerID, vdr) - - requestIDs[vtxID] = reqID - } - - require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{vtxID3, vtxID4})) // should request vtx3 and vtx4 - require.Contains(requestIDs, vtxID3) - require.Contains(requestIDs, vtxID4) - - vtx3ReqID := requestIDs[vtxID3] - require.NoError(bs.Ancestors(context.Background(), peerID, vtx3ReqID, [][]byte{vtxBytes3, vtxBytes2})) - require.Contains(requestIDs, vtxID1) - require.True(bs.OutstandingRequests.RemoveAny(vtxID4)) - require.True(bs.OutstandingRequests.RemoveAny(vtxID1)) - - bs.needToFetch.Clear() - requestIDs = map[ids.ID]uint32{} - - require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{vtxID5, vtxID3})) - require.Contains(requestIDs, vtxID1) - require.Contains(requestIDs, vtxID4) - require.Contains(requestIDs, vtxID5) - require.NotContains(requestIDs, vtxID3) - - vtx5ReqID := requestIDs[vtxID5] - require.NoError(bs.Ancestors(context.Background(), peerID, vtx5ReqID, [][]byte{vtxBytes5, vtxBytes4, vtxBytes2, vtxBytes1})) - require.Contains(requestIDs, vtxID0) - - manager.StopVertexAcceptedF = func(context.Context) (bool, error) { - return vtx5.Status() == choices.Accepted, nil - } - - manager.EdgeF = func(context.Context) []ids.ID { - require.Equal(choices.Accepted, vtx5.Status()) - return []ids.ID{vtxID5} - } - - vm.LinearizeF = func(_ context.Context, stopVertexID ids.ID) error { - require.Equal(vtxID5, stopVertexID) - return nil - } - - vtx1ReqID := requestIDs[vtxID1] - require.NoError(bs.Ancestors(context.Background(), peerID, vtx1ReqID, [][]byte{vtxBytes1, vtxBytes0})) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) - require.Equal(choices.Accepted, vtx0.Status()) - require.Equal(choices.Accepted, vtx1.Status()) - require.Equal(choices.Accepted, vtx2.Status()) - require.Equal(choices.Accepted, vtx3.Status()) - require.Equal(choices.Accepted, vtx4.Status()) - require.Equal(choices.Accepted, vtx5.Status()) } diff --git a/snow/engine/avalanche/bootstrap/config.go b/snow/engine/avalanche/bootstrap/config.go index 0569342cc328..54fe7f2e45fa 100644 --- a/snow/engine/avalanche/bootstrap/config.go +++ b/snow/engine/avalanche/bootstrap/config.go @@ -4,21 +4,37 @@ package bootstrap import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/queue" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" + "github.com/ava-labs/avalanchego/snow/validators" ) type Config struct { - common.Config common.AllGetsServer + Ctx *snow.ConsensusContext + Beacons validators.Manager + + StartupTracker tracker.Startup + Sender common.Sender + + // This node will only consider the first [AncestorsMaxContainersReceived] + // containers in an ancestors message it receives. + AncestorsMaxContainersReceived int + // VtxBlocked tracks operations that are blocked on vertices VtxBlocked *queue.JobsWithMissing // TxBlocked tracks operations that are blocked on transactions TxBlocked *queue.Jobs - Manager vertex.Manager - VM vertex.LinearizableVM - LinearizeOnStartup bool + Manager vertex.Manager + VM vertex.LinearizableVM + + // If StopVertexID is empty, the engine will generate the stop vertex based + // on the current state. + StopVertexID ids.ID } diff --git a/snow/engine/avalanche/bootstrap/metrics.go b/snow/engine/avalanche/bootstrap/metrics.go index 2033a7764afb..b9d5824ec95a 100644 --- a/snow/engine/avalanche/bootstrap/metrics.go +++ b/snow/engine/avalanche/bootstrap/metrics.go @@ -6,7 +6,7 @@ package bootstrap import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) type metrics struct { @@ -50,8 +50,7 @@ func (m *metrics) Initialize( Help: "Number of transactions accepted during bootstrapping", }) - errs := wrappers.Errs{} - errs.Add( + return utils.Err( registerer.Register(m.numFetchedVts), registerer.Register(m.numDroppedVts), registerer.Register(m.numAcceptedVts), @@ -59,5 +58,4 @@ func (m *metrics) Initialize( registerer.Register(m.numDroppedTxs), registerer.Register(m.numAcceptedTxs), ) - return errs.Err } diff --git a/snow/engine/avalanche/getter/getter.go b/snow/engine/avalanche/getter/getter.go index ebf6ef38b09f..a93d2f1d069e 100644 --- a/snow/engine/avalanche/getter/getter.go +++ b/snow/engine/avalanche/getter/getter.go @@ -7,6 +7,8 @@ import ( "context" "time" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" @@ -25,12 +27,20 @@ import ( // Get requests are always served, regardless node state (bootstrapping or normal operations). var _ common.AllGetsServer = (*getter)(nil) -func New(storage vertex.Storage, commonCfg common.Config) (common.AllGetsServer, error) { +func New( + storage vertex.Storage, + sender common.Sender, + log logging.Logger, + maxTimeGetAncestors time.Duration, + maxContainersGetAncestors int, + reg prometheus.Registerer, +) (common.AllGetsServer, error) { gh := &getter{ - storage: storage, - sender: commonCfg.Sender, - cfg: commonCfg, - log: commonCfg.Ctx.Log, + storage: storage, + sender: sender, + log: log, + maxTimeGetAncestors: maxTimeGetAncestors, + maxContainersGetAncestors: maxContainersGetAncestors, } var err error @@ -38,17 +48,18 @@ func New(storage vertex.Storage, commonCfg common.Config) (common.AllGetsServer, "bs", "get_ancestors_vtxs", "vertices fetched in a call to GetAncestors", - commonCfg.Ctx.AvalancheRegisterer, + reg, ) return gh, err } type getter struct { - storage vertex.Storage - sender common.Sender - cfg common.Config + storage vertex.Storage + sender common.Sender + log logging.Logger + maxTimeGetAncestors time.Duration + maxContainersGetAncestors int - log logging.Logger getAncestorsVtxs metric.Averager } @@ -62,7 +73,7 @@ func (gh *getter) GetStateSummaryFrontier(_ context.Context, nodeID ids.NodeID, return nil } -func (gh *getter) GetAcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []uint64) error { +func (gh *getter) GetAcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ set.Set[uint64]) error { gh.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), zap.Stringer("messageOp", message.GetAcceptedStateSummaryOp), @@ -72,6 +83,8 @@ func (gh *getter) GetAcceptedStateSummary(_ context.Context, nodeID ids.NodeID, return nil } +// TODO: Remove support for GetAcceptedFrontier messages after v1.11.x is +// activated. func (gh *getter) GetAcceptedFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32) error { acceptedFrontier := gh.storage.Edge(ctx) // Since all the DAGs are linearized, we only need to return the stop @@ -82,9 +95,10 @@ func (gh *getter) GetAcceptedFrontier(ctx context.Context, validatorID ids.NodeI return nil } -func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - acceptedVtxIDs := make([]ids.ID, 0, len(containerIDs)) - for _, vtxID := range containerIDs { +// TODO: Remove support for GetAccepted messages after v1.11.x is activated. +func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { + acceptedVtxIDs := make([]ids.ID, 0, containerIDs.Len()) + for vtxID := range containerIDs { if vtx, err := gh.storage.GetVtx(ctx, vtxID); err == nil && vtx.Status() == choices.Accepted { acceptedVtxIDs = append(acceptedVtxIDs, vtxID) } @@ -106,13 +120,13 @@ func (gh *getter) GetAncestors(ctx context.Context, nodeID ids.NodeID, requestID return nil // Don't have the requested vertex. Drop message. } - queue := make([]avalanche.Vertex, 1, gh.cfg.AncestorsMaxContainersSent) // for BFS + queue := make([]avalanche.Vertex, 1, gh.maxContainersGetAncestors) // for BFS queue[0] = vertex - ancestorsBytesLen := 0 // length, in bytes, of vertex and its ancestors - ancestorsBytes := make([][]byte, 0, gh.cfg.AncestorsMaxContainersSent) // vertex and its ancestors in BFS order - visited := set.Of(vertex.ID()) // IDs of vertices that have been in queue before + ancestorsBytesLen := 0 // length, in bytes, of vertex and its ancestors + ancestorsBytes := make([][]byte, 0, gh.maxContainersGetAncestors) // vertex and its ancestors in BFS order + visited := set.Of(vertex.ID()) // IDs of vertices that have been in queue before - for len(ancestorsBytes) < gh.cfg.AncestorsMaxContainersSent && len(queue) > 0 && time.Since(startTime) < gh.cfg.MaxTimeGetAncestors { + for len(ancestorsBytes) < gh.maxContainersGetAncestors && len(queue) > 0 && time.Since(startTime) < gh.maxTimeGetAncestors { var vtx avalanche.Vertex vtx, queue = queue[0], queue[1:] // pop vtxBytes := vtx.Bytes() diff --git a/snow/engine/avalanche/getter/getter_test.go b/snow/engine/avalanche/getter/getter_test.go index 93694ed5bba0..4d25e29f296b 100644 --- a/snow/engine/avalanche/getter/getter_test.go +++ b/snow/engine/avalanche/getter/getter_test.go @@ -7,75 +7,50 @@ import ( "context" "errors" "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" ) var errUnknownVertex = errors.New("unknown vertex") -func testSetup(t *testing.T) (*vertex.TestManager, *common.SenderTest, common.Config) { - vdrs := validators.NewManager() - peer := ids.GenerateTestNodeID() - require.NoError(t, vdrs.AddStaker(constants.PrimaryNetworkID, peer, nil, ids.Empty, 1)) - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false +func newTest(t *testing.T) (common.AllGetsServer, *vertex.TestManager, *common.SenderTest) { + manager := vertex.NewTestManager(t) + manager.Default(true) - isBootstrapped := false - bootstrapTracker := &common.BootstrapTrackerTest{ + sender := &common.SenderTest{ T: t, - IsBootstrappedF: func() bool { - return isBootstrapped - }, - BootstrappedF: func(ids.ID) { - isBootstrapped = true - }, } + sender.Default(true) - totalWeight, err := vdrs.TotalWeight(constants.PrimaryNetworkID) + bs, err := New( + manager, + sender, + logging.NoLog{}, + time.Second, + 2000, + prometheus.NewRegistry(), + ) require.NoError(t, err) - commonConfig := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Count(constants.PrimaryNetworkID), - Alpha: totalWeight/2 + 1, - Sender: sender, - BootstrapTracker: bootstrapTracker, - Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, - AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - manager := vertex.NewTestManager(t) - manager.Default(true) - - return manager, sender, commonConfig + return bs, manager, sender } func TestAcceptedFrontier(t *testing.T) { require := require.New(t) - - manager, sender, config := testSetup(t) + bs, manager, sender := newTest(t) vtxID := ids.GenerateTestID() - - bsIntf, err := New(manager, config) - require.NoError(err) - require.IsType(&getter{}, bsIntf) - bs := bsIntf.(*getter) - manager.EdgeF = func(context.Context) []ids.ID { return []ids.ID{ vtxID, @@ -92,8 +67,7 @@ func TestAcceptedFrontier(t *testing.T) { func TestFilterAccepted(t *testing.T) { require := require.New(t) - - manager, sender, config := testSetup(t) + bs, manager, sender := newTest(t) vtxID0 := ids.GenerateTestID() vtxID1 := ids.GenerateTestID() @@ -108,13 +82,6 @@ func TestFilterAccepted(t *testing.T) { StatusV: choices.Accepted, }} - bsIntf, err := New(manager, config) - require.NoError(err) - require.IsType(&getter{}, bsIntf) - bs := bsIntf.(*getter) - - vtxIDs := []ids.ID{vtxID0, vtxID1, vtxID2} - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID0: @@ -133,6 +100,7 @@ func TestFilterAccepted(t *testing.T) { accepted = frontier } + vtxIDs := set.Of(vtxID0, vtxID1, vtxID2) require.NoError(bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, vtxIDs)) require.Contains(accepted, vtxID0) diff --git a/snow/engine/avalanche/vertex/mock_vm.go b/snow/engine/avalanche/vertex/mock_vm.go index 973ba6690801..b8d2637c7311 100644 --- a/snow/engine/avalanche/vertex/mock_vm.go +++ b/snow/engine/avalanche/vertex/mock_vm.go @@ -13,7 +13,7 @@ import ( reflect "reflect" time "time" - manager "github.com/ava-labs/avalanchego/database/manager" + database "github.com/ava-labs/avalanchego/database" ids "github.com/ava-labs/avalanchego/ids" snow "github.com/ava-labs/avalanchego/snow" snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" @@ -263,7 +263,7 @@ func (mr *MockLinearizableVMMockRecorder) HealthCheck(arg0 interface{}) *gomock. } // Initialize mocks base method. -func (m *MockLinearizableVM) Initialize(arg0 context.Context, arg1 *snow.Context, arg2 manager.Manager, arg3, arg4, arg5 []byte, arg6 chan<- common.Message, arg7 []*common.Fx, arg8 common.AppSender) error { +func (m *MockLinearizableVM) Initialize(arg0 context.Context, arg1 *snow.Context, arg2 database.Database, arg3, arg4, arg5 []byte, arg6 chan<- common.Message, arg7 []*common.Fx, arg8 common.AppSender) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Initialize", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) ret0, _ := ret[0].(error) diff --git a/snow/engine/common/appsender/appsender_client.go b/snow/engine/common/appsender/appsender_client.go index a816dd68241e..c74616d71006 100644 --- a/snow/engine/common/appsender/appsender_client.go +++ b/snow/engine/common/appsender/appsender_client.go @@ -52,8 +52,7 @@ func (c *Client) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID] nodeIDsBytes := make([][]byte, nodeIDs.Len()) i := 0 for nodeID := range nodeIDs { - nodeID := nodeID // Prevent overwrite in next iteration - nodeIDsBytes[i] = nodeID[:] + nodeIDsBytes[i] = nodeID.Bytes() i++ } _, err := c.client.SendAppRequest( @@ -71,7 +70,7 @@ func (c *Client) SendAppResponse(ctx context.Context, nodeID ids.NodeID, request _, err := c.client.SendAppResponse( ctx, &appsenderpb.SendAppResponseMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, Response: response, }, @@ -93,8 +92,7 @@ func (c *Client) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids. nodeIDsBytes := make([][]byte, nodeIDs.Len()) i := 0 for nodeID := range nodeIDs { - nodeID := nodeID // Prevent overwrite in next iteration - nodeIDsBytes[i] = nodeID[:] + nodeIDsBytes[i] = nodeID.Bytes() i++ } _, err := c.client.SendAppGossipSpecific( diff --git a/snow/engine/common/bootstrapper.go b/snow/engine/common/bootstrapper.go index aca219130478..befb3628771b 100644 --- a/snow/engine/common/bootstrapper.go +++ b/snow/engine/common/bootstrapper.go @@ -5,16 +5,13 @@ package common import ( "context" - "fmt" - "math" "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/set" - safemath "github.com/ava-labs/avalanchego/utils/math" + smbootstrapper "github.com/ava-labs/avalanchego/snow/consensus/snowman/bootstrapper" ) const ( @@ -22,10 +19,6 @@ const ( // logs StatusUpdateFrequency = 5000 - // MaxOutstandingGetAncestorsRequests is the maximum number of GetAncestors - // sent but not responded to/failed - MaxOutstandingGetAncestorsRequests = 10 - // MaxOutstandingBroadcastRequests is the maximum number of requests to have // outstanding when broadcasting. MaxOutstandingBroadcastRequests = 50 @@ -46,30 +39,8 @@ type bootstrapper struct { Config Halter - // Holds the beacons that were sampled for the accepted frontier - sampledBeacons validators.Manager - // IDs of validators we should request an accepted frontier from - pendingSendAcceptedFrontier set.Set[ids.NodeID] - // IDs of validators we requested an accepted frontier from but haven't - // received a reply yet - pendingReceiveAcceptedFrontier set.Set[ids.NodeID] - // IDs of validators that failed to respond with their accepted frontier - failedAcceptedFrontier set.Set[ids.NodeID] - // IDs of all the returned accepted frontiers - acceptedFrontierSet set.Set[ids.ID] - - // IDs of validators we should request filtering the accepted frontier from - pendingSendAccepted set.Set[ids.NodeID] - // IDs of validators we requested filtering the accepted frontier from but - // haven't received a reply yet - pendingReceiveAccepted set.Set[ids.NodeID] - // IDs of validators that failed to respond with their filtered accepted - // frontier - failedAccepted set.Set[ids.NodeID] - // IDs of the returned accepted containers and the stake weight that has - // marked them as accepted - acceptedVotes map[ids.ID]uint64 - acceptedFrontier []ids.ID + minority smbootstrapper.Poll + majority smbootstrapper.Poll // number of times the bootstrap has been attempted bootstrapAttempts int @@ -77,12 +48,13 @@ type bootstrapper struct { func NewCommonBootstrapper(config Config) Bootstrapper { return &bootstrapper{ - Config: config, + Config: config, + minority: smbootstrapper.Noop, + majority: smbootstrapper.Noop, } } func (b *bootstrapper) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { - // ignores any late responses if requestID != b.Config.SharedCfg.RequestID { b.Ctx.Log.Debug("received out-of-sync AcceptedFrontier message", zap.Stringer("nodeID", nodeID), @@ -92,21 +64,13 @@ func (b *bootstrapper) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, return nil } - if !b.pendingReceiveAcceptedFrontier.Contains(nodeID) { - b.Ctx.Log.Debug("received unexpected AcceptedFrontier message", - zap.Stringer("nodeID", nodeID), - ) - return nil + if err := b.minority.RecordOpinion(ctx, nodeID, set.Of(containerID)); err != nil { + return err } - - // Union the reported accepted frontier from [nodeID] with the accepted - // frontier we got from others - b.acceptedFrontierSet.Add(containerID) - return b.markAcceptedFrontierReceived(ctx, nodeID) + return b.sendMessagesOrFinish(ctx) } func (b *bootstrapper) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - // ignores any late responses if requestID != b.Config.SharedCfg.RequestID { b.Ctx.Log.Debug("received out-of-sync GetAcceptedFrontierFailed message", zap.Stringer("nodeID", nodeID), @@ -116,76 +80,13 @@ func (b *bootstrapper) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids return nil } - if !b.pendingReceiveAcceptedFrontier.Contains(nodeID) { - b.Ctx.Log.Debug("received unexpected GetAcceptedFrontierFailed message", - zap.Stringer("nodeID", nodeID), - ) - return nil - } - - // If we can't get a response from [nodeID], act as though they said their - // accepted frontier is empty and we add the validator to the failed list - b.failedAcceptedFrontier.Add(nodeID) - return b.markAcceptedFrontierReceived(ctx, nodeID) -} - -func (b *bootstrapper) markAcceptedFrontierReceived(ctx context.Context, nodeID ids.NodeID) error { - // Mark that we received a response from [nodeID] - b.pendingReceiveAcceptedFrontier.Remove(nodeID) - - b.sendGetAcceptedFrontiers(ctx) - - // still waiting on requests - if b.pendingReceiveAcceptedFrontier.Len() != 0 { - return nil - } - - // We've received the accepted frontier from every bootstrap validator - // Ask each bootstrap validator to filter the list of containers that we were - // told are on the accepted frontier such that the list only contains containers - // they think are accepted. - totalSampledWeight, err := b.sampledBeacons.TotalWeight(b.Ctx.SubnetID) - if err != nil { - return fmt.Errorf("failed to get total weight of sampled beacons for subnet %s: %w", b.Ctx.SubnetID, err) - } - beaconsTotalWeight, err := b.Beacons.TotalWeight(b.Ctx.SubnetID) - if err != nil { - return fmt.Errorf("failed to get total weight of beacons for subnet %s: %w", b.Ctx.SubnetID, err) - } - newAlpha := float64(totalSampledWeight*b.Alpha) / float64(beaconsTotalWeight) - - failedBeaconWeight, err := b.Beacons.SubsetWeight(b.Ctx.SubnetID, b.failedAcceptedFrontier) - if err != nil { - return fmt.Errorf("failed to get total weight of failed beacons: %w", err) - } - - // fail the bootstrap if the weight is not enough to bootstrap - if float64(totalSampledWeight)-newAlpha < float64(failedBeaconWeight) { - if b.Config.RetryBootstrap { - b.Ctx.Log.Debug("restarting bootstrap", - zap.String("reason", "not enough frontiers received"), - zap.Int("numBeacons", b.Beacons.Count(b.Ctx.SubnetID)), - zap.Int("numFailedBootstrappers", b.failedAcceptedFrontier.Len()), - zap.Int("numBootstrapAttemps", b.bootstrapAttempts), - ) - return b.Restart(ctx, false) - } - - b.Ctx.Log.Debug("didn't receive enough frontiers", - zap.Int("numFailedValidators", b.failedAcceptedFrontier.Len()), - zap.Int("numBootstrapAttempts", b.bootstrapAttempts), - ) + if err := b.minority.RecordOpinion(ctx, nodeID, nil); err != nil { + return err } - - b.Config.SharedCfg.RequestID++ - b.acceptedFrontier = b.acceptedFrontierSet.List() - - b.sendGetAccepted(ctx) - return nil + return b.sendMessagesOrFinish(ctx) } -func (b *bootstrapper) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - // ignores any late responses +func (b *bootstrapper) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { if requestID != b.Config.SharedCfg.RequestID { b.Ctx.Log.Debug("received out-of-sync Accepted message", zap.Stringer("nodeID", nodeID), @@ -195,90 +96,13 @@ func (b *bootstrapper) Accepted(ctx context.Context, nodeID ids.NodeID, requestI return nil } - if !b.pendingReceiveAccepted.Contains(nodeID) { - b.Ctx.Log.Debug("received unexpected Accepted message", - zap.Stringer("nodeID", nodeID), - ) - return nil - } - // Mark that we received a response from [nodeID] - b.pendingReceiveAccepted.Remove(nodeID) - - weight := b.Beacons.GetWeight(b.Ctx.SubnetID, nodeID) - for _, containerID := range containerIDs { - previousWeight := b.acceptedVotes[containerID] - newWeight, err := safemath.Add64(weight, previousWeight) - if err != nil { - b.Ctx.Log.Error("failed calculating the Accepted votes", - zap.Uint64("weight", weight), - zap.Uint64("previousWeight", previousWeight), - zap.Error(err), - ) - newWeight = math.MaxUint64 - } - b.acceptedVotes[containerID] = newWeight - } - - b.sendGetAccepted(ctx) - - // wait on pending responses - if b.pendingReceiveAccepted.Len() != 0 { - return nil - } - - // We've received the filtered accepted frontier from every bootstrap validator - // Accept all containers that have a sufficient weight behind them - accepted := make([]ids.ID, 0, len(b.acceptedVotes)) - for containerID, weight := range b.acceptedVotes { - if weight >= b.Alpha { - accepted = append(accepted, containerID) - } - } - - // if we don't have enough weight for the bootstrap to be accepted then - // retry or fail the bootstrap - size := len(accepted) - if size == 0 && b.Beacons.Count(b.Ctx.SubnetID) > 0 { - // if we had too many timeouts when asking for validator votes, we - // should restart bootstrap hoping for the network problems to go away; - // otherwise, we received enough (>= b.Alpha) responses, but no frontier - // was supported by a majority of validators (i.e. votes are split - // between minorities supporting different frontiers). - beaconTotalWeight, err := b.Beacons.TotalWeight(b.Ctx.SubnetID) - if err != nil { - return fmt.Errorf("failed to get total weight of beacons for subnet %s: %w", b.Ctx.SubnetID, err) - } - failedBeaconWeight, err := b.Beacons.SubsetWeight(b.Ctx.SubnetID, b.failedAccepted) - if err != nil { - return fmt.Errorf("failed to get total weight of failed beacons for subnet %s: %w", b.Ctx.SubnetID, err) - } - votingStakes := beaconTotalWeight - failedBeaconWeight - if b.Config.RetryBootstrap && votingStakes < b.Alpha { - b.Ctx.Log.Debug("restarting bootstrap", - zap.String("reason", "not enough votes received"), - zap.Int("numBeacons", b.Beacons.Count(b.Ctx.SubnetID)), - zap.Int("numFailedBootstrappers", b.failedAccepted.Len()), - zap.Int("numBootstrapAttempts", b.bootstrapAttempts), - ) - return b.Restart(ctx, false) - } - } - - if !b.Config.SharedCfg.Restarted { - b.Ctx.Log.Info("bootstrapping started syncing", - zap.Int("numVerticesInFrontier", size), - ) - } else { - b.Ctx.Log.Debug("bootstrapping started syncing", - zap.Int("numVerticesInFrontier", size), - ) + if err := b.majority.RecordOpinion(ctx, nodeID, containerIDs); err != nil { + return err } - - return b.Bootstrapable.ForceAccepted(ctx, accepted) + return b.sendMessagesOrFinish(ctx) } func (b *bootstrapper) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - // ignores any late responses if requestID != b.Config.SharedCfg.RequestID { b.Ctx.Log.Debug("received out-of-sync GetAcceptedFailed message", zap.Stringer("nodeID", nodeID), @@ -288,58 +112,50 @@ func (b *bootstrapper) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, return nil } - // If we can't get a response from [nodeID], act as though they said that - // they think none of the containers we sent them in GetAccepted are - // accepted - b.failedAccepted.Add(nodeID) - return b.Accepted(ctx, nodeID, requestID, nil) + if err := b.majority.RecordOpinion(ctx, nodeID, nil); err != nil { + return err + } + return b.sendMessagesOrFinish(ctx) } func (b *bootstrapper) Startup(ctx context.Context) error { - beaconIDs, err := b.Beacons.Sample(b.Ctx.SubnetID, b.Config.SampleK) - if err != nil { - return err + currentBeacons := b.Beacons.GetMap(b.Ctx.SubnetID) + nodeWeights := make(map[ids.NodeID]uint64, len(currentBeacons)) + for nodeID, beacon := range currentBeacons { + nodeWeights[nodeID] = beacon.Weight } - b.sampledBeacons = validators.NewManager() - b.pendingSendAcceptedFrontier.Clear() - for _, nodeID := range beaconIDs { - if _, ok := b.sampledBeacons.GetValidator(b.Ctx.SubnetID, nodeID); !ok { - // Invariant: We never use the TxID or BLS keys populated here. - err = b.sampledBeacons.AddStaker(b.Ctx.SubnetID, nodeID, nil, ids.Empty, 1) - } else { - err = b.sampledBeacons.AddWeight(b.Ctx.SubnetID, nodeID, 1) - } - if err != nil { - return err - } - b.pendingSendAcceptedFrontier.Add(nodeID) + frontierNodes, err := smbootstrapper.Sample(nodeWeights, b.SampleK) + if err != nil { + return err } - b.pendingReceiveAcceptedFrontier.Clear() - b.failedAcceptedFrontier.Clear() - b.acceptedFrontierSet.Clear() + b.Ctx.Log.Debug("sampled nodes to seed bootstrapping frontier", + zap.Reflect("sampledNodes", frontierNodes), + zap.Int("numNodes", len(nodeWeights)), + ) - b.pendingSendAccepted.Clear() - for _, nodeID := range b.Beacons.GetValidatorIDs(b.Ctx.SubnetID) { - b.pendingSendAccepted.Add(nodeID) - } - - b.pendingReceiveAccepted.Clear() - b.failedAccepted.Clear() - b.acceptedVotes = make(map[ids.ID]uint64) + b.minority = smbootstrapper.NewMinority( + b.Ctx.Log, + frontierNodes, + MaxOutstandingBroadcastRequests, + ) + b.majority = smbootstrapper.NewMajority( + b.Ctx.Log, + nodeWeights, + MaxOutstandingBroadcastRequests, + ) b.bootstrapAttempts++ - if b.pendingSendAcceptedFrontier.Len() == 0 { + if accepted, finalized := b.majority.Result(ctx); finalized { b.Ctx.Log.Info("bootstrapping skipped", zap.String("reason", "no provided bootstraps"), ) - return b.Bootstrapable.ForceAccepted(ctx, nil) + return b.Bootstrapable.ForceAccepted(ctx, accepted) } b.Config.SharedCfg.RequestID++ - b.sendGetAcceptedFrontiers(ctx) - return nil + return b.sendMessagesOrFinish(ctx) } func (b *bootstrapper) Restart(ctx context.Context, reset bool) error { @@ -361,40 +177,50 @@ func (b *bootstrapper) Restart(ctx context.Context, reset bool) error { return b.Startup(ctx) } -// Ask up to [MaxOutstandingBroadcastRequests] bootstrap validators to send -// their accepted frontier with the current accepted frontier -func (b *bootstrapper) sendGetAcceptedFrontiers(ctx context.Context) { - vdrs := set.NewSet[ids.NodeID](1) - for b.pendingSendAcceptedFrontier.Len() > 0 && b.pendingReceiveAcceptedFrontier.Len() < MaxOutstandingBroadcastRequests { - vdr, _ := b.pendingSendAcceptedFrontier.Pop() - // Add the validator to the set to send the messages to - vdrs.Add(vdr) - // Add the validator to send pending receipt set - b.pendingReceiveAcceptedFrontier.Add(vdr) +func (b *bootstrapper) sendMessagesOrFinish(ctx context.Context) error { + if peers := b.minority.GetPeers(ctx); peers.Len() > 0 { + b.Sender.SendGetAcceptedFrontier(ctx, peers, b.Config.SharedCfg.RequestID) + return nil } - if vdrs.Len() > 0 { - b.Sender.SendGetAcceptedFrontier(ctx, vdrs, b.Config.SharedCfg.RequestID) + potentialAccepted, finalized := b.minority.Result(ctx) + if !finalized { + // We haven't finalized the accepted frontier, so we should wait for the + // outstanding requests. + return nil + } + + if peers := b.majority.GetPeers(ctx); peers.Len() > 0 { + b.Sender.SendGetAccepted(ctx, peers, b.Config.SharedCfg.RequestID, potentialAccepted) + return nil + } + + accepted, finalized := b.majority.Result(ctx) + if !finalized { + // We haven't finalized the accepted set, so we should wait for the + // outstanding requests. + return nil } -} -// Ask up to [MaxOutstandingBroadcastRequests] bootstrap validators to send -// their filtered accepted frontier -func (b *bootstrapper) sendGetAccepted(ctx context.Context) { - vdrs := set.NewSet[ids.NodeID](1) - for b.pendingSendAccepted.Len() > 0 && b.pendingReceiveAccepted.Len() < MaxOutstandingBroadcastRequests { - vdr, _ := b.pendingSendAccepted.Pop() - // Add the validator to the set to send the messages to - vdrs.Add(vdr) - // Add the validator to send pending receipt set - b.pendingReceiveAccepted.Add(vdr) + numAccepted := len(accepted) + if numAccepted == 0 { + b.Ctx.Log.Debug("restarting bootstrap", + zap.String("reason", "no blocks accepted"), + zap.Int("numBeacons", b.Beacons.Count(b.Ctx.SubnetID)), + zap.Int("numBootstrapAttempts", b.bootstrapAttempts), + ) + return b.Restart(ctx, false /*=reset*/) } - if vdrs.Len() > 0 { - b.Ctx.Log.Debug("sent GetAccepted messages", - zap.Int("numSent", vdrs.Len()), - zap.Int("numPending", b.pendingSendAccepted.Len()), + if !b.Config.SharedCfg.Restarted { + b.Ctx.Log.Info("bootstrapping started syncing", + zap.Int("numAccepted", numAccepted), + ) + } else { + b.Ctx.Log.Debug("bootstrapping started syncing", + zap.Int("numAccepted", numAccepted), ) - b.Sender.SendGetAccepted(ctx, vdrs, b.Config.SharedCfg.RequestID, b.acceptedFrontier) } + + return b.Bootstrapable.ForceAccepted(ctx, accepted) } diff --git a/snow/engine/common/config.go b/snow/engine/common/config.go index 05eb3602f876..f513c8f29655 100644 --- a/snow/engine/common/config.go +++ b/snow/engine/common/config.go @@ -4,8 +4,6 @@ package common import ( - "time" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/validators" @@ -31,13 +29,6 @@ type Config struct { // Max number of times to retry bootstrap before warning the node operator RetryBootstrapWarnFrequency int - // Max time to spend fetching a container and its ancestors when responding - // to a GetAncestors - MaxTimeGetAncestors time.Duration - - // Max number of containers in an ancestors message sent by this node. - AncestorsMaxContainersSent int - // This node will only consider the first [AncestorsMaxContainersReceived] // containers in an ancestors message it receives. AncestorsMaxContainersReceived int @@ -45,15 +36,6 @@ type Config struct { SharedCfg *SharedConfig } -func (c *Config) Context() *snow.ConsensusContext { - return c.Ctx -} - -// IsBootstrapped returns true iff this chain is done bootstrapping -func (c *Config) IsBootstrapped() bool { - return c.Ctx.State.Get().State == snow.NormalOp -} - // Shared among common.bootstrapper and snowman/avalanche bootstrapper type SharedConfig struct { // Tracks the last requestID that was used in a request diff --git a/snow/engine/common/engine.go b/snow/engine/common/engine.go index 4c8213a432f6..d92911ff1f7f 100644 --- a/snow/engine/common/engine.go +++ b/snow/engine/common/engine.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/set" ) // Engine describes the standard interface of a consensus engine. @@ -108,7 +109,7 @@ type GetAcceptedStateSummaryHandler interface { ctx context.Context, nodeID ids.NodeID, requestID uint32, - heights []uint64, + heights set.Set[uint64], ) error } @@ -122,7 +123,7 @@ type AcceptedStateSummaryHandler interface { ctx context.Context, nodeID ids.NodeID, requestID uint32, - summaryIDs []ids.ID, + summaryIDs set.Set[ids.ID], ) error // Notify this engine that a GetAcceptedStateSummary request it issued has @@ -182,7 +183,7 @@ type GetAcceptedHandler interface { ctx context.Context, nodeID ids.NodeID, requestID uint32, - containerIDs []ids.ID, + containerIDs set.Set[ids.ID], ) error } @@ -196,7 +197,7 @@ type AcceptedHandler interface { ctx context.Context, nodeID ids.NodeID, requestID uint32, - containerIDs []ids.ID, + containerIDs set.Set[ids.ID], ) error // Notify this engine that a GetAccepted request it issued has failed. diff --git a/snow/engine/common/no_ops_handlers.go b/snow/engine/common/no_ops_handlers.go index 9530600177bb..716d1d5111b3 100644 --- a/snow/engine/common/no_ops_handlers.go +++ b/snow/engine/common/no_ops_handlers.go @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -65,7 +66,7 @@ func NewNoOpAcceptedStateSummaryHandler(log logging.Logger) AcceptedStateSummary return &noOpAcceptedStateSummaryHandler{log: log} } -func (nop *noOpAcceptedStateSummaryHandler) AcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { +func (nop *noOpAcceptedStateSummaryHandler) AcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ set.Set[ids.ID]) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), zap.Stringer("messageOp", message.AcceptedStateSummaryOp), @@ -122,7 +123,7 @@ func NewNoOpAcceptedHandler(log logging.Logger) AcceptedHandler { return &noOpAcceptedHandler{log: log} } -func (nop *noOpAcceptedHandler) Accepted(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { +func (nop *noOpAcceptedHandler) Accepted(_ context.Context, nodeID ids.NodeID, requestID uint32, _ set.Set[ids.ID]) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), zap.Stringer("messageOp", message.AcceptedOp), diff --git a/snow/engine/common/queue/jobs.go b/snow/engine/common/queue/jobs.go index 728edcc98a81..5592ad822439 100644 --- a/snow/engine/common/queue/jobs.go +++ b/snow/engine/common/queue/jobs.go @@ -17,9 +17,9 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" - "github.com/ava-labs/avalanchego/utils/wrappers" ) const progressUpdateFrequency = 30 * time.Second @@ -425,10 +425,8 @@ func (jm *JobsWithMissing) cleanRunnableStack(ctx context.Context) error { } } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( runnableJobsIter.Error(), jm.Commit(), ) - return errs.Err } diff --git a/snow/engine/common/queue/state.go b/snow/engine/common/queue/state.go index 5e5ccb232271..cae43f8c2101 100644 --- a/snow/engine/common/queue/state.go +++ b/snow/engine/common/queue/state.go @@ -15,8 +15,8 @@ import ( "github.com/ava-labs/avalanchego/database/linkeddb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" ) const ( @@ -152,14 +152,12 @@ func (s *state) Clear() error { return err } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( runJobsIter.Error(), jobsIter.Error(), depsIter.Error(), missJobsIter.Error(), ) - return errs.Err } // AddRunnableJob adds [jobID] to the runnable queue diff --git a/snow/engine/common/requests_test.go b/snow/engine/common/requests_test.go index 00e648dfa90a..73a98e4ccb94 100644 --- a/snow/engine/common/requests_test.go +++ b/snow/engine/common/requests_test.go @@ -30,7 +30,7 @@ func TestRequests(t *testing.T) { _, removed = req.Remove(ids.EmptyNodeID, 1) require.False(removed) - _, removed = req.Remove(ids.NodeID{1}, 0) + _, removed = req.Remove(ids.BuildTestNodeID([]byte{0x01}), 0) require.False(removed) require.True(req.Contains(ids.Empty)) @@ -42,7 +42,7 @@ func TestRequests(t *testing.T) { _, removed = req.Remove(ids.EmptyNodeID, 1) require.False(removed) - _, removed = req.Remove(ids.NodeID{1}, 0) + _, removed = req.Remove(ids.BuildTestNodeID([]byte{0x01}), 0) require.False(removed) require.True(req.Contains(ids.Empty)) diff --git a/snow/engine/common/test_config.go b/snow/engine/common/test_config.go index d39e6078fd01..c8cfa7fc76fd 100644 --- a/snow/engine/common/test_config.go +++ b/snow/engine/common/test_config.go @@ -37,7 +37,6 @@ func DefaultConfigTest() Config { Bootstrapable: &BootstrapableTest{}, BootstrapTracker: bootstrapTracker, Timer: &TimerTest{}, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, SharedCfg: &SharedConfig{}, } diff --git a/snow/engine/common/test_engine.go b/snow/engine/common/test_engine.go index 579f2ca94c9d..3eb376de8e32 100644 --- a/snow/engine/common/test_engine.go +++ b/snow/engine/common/test_engine.go @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -119,15 +120,15 @@ type EngineTest struct { PushQueryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte, requestedHeight uint64) error AncestorsF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containers [][]byte) error AcceptedFrontierF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error - GetAcceptedF, AcceptedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs []ids.ID) error + GetAcceptedF, AcceptedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs set.Set[ids.ID]) error ChitsF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredID ids.ID, preferredIDAtHeight ids.ID, acceptedID ids.ID) error GetStateSummaryFrontierF, GetStateSummaryFrontierFailedF, GetAcceptedStateSummaryFailedF, GetAcceptedFrontierF, GetFailedF, GetAncestorsFailedF, QueryFailedF, GetAcceptedFrontierFailedF, GetAcceptedFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error AppRequestFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error StateSummaryFrontierF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, summary []byte) error - GetAcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, keys []uint64) error - AcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error + GetAcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, keys set.Set[uint64]) error + AcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs set.Set[ids.ID]) error ConnectedF func(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error DisconnectedF func(ctx context.Context, nodeID ids.NodeID) error HealthF func(context.Context) (interface{}, error) @@ -314,7 +315,7 @@ func (e *EngineTest) GetStateSummaryFrontierFailed(ctx context.Context, validato return errGetStateSummaryFrontierFailed } -func (e *EngineTest) GetAcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, keys []uint64) error { +func (e *EngineTest) GetAcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, keys set.Set[uint64]) error { if e.GetAcceptedStateSummaryF != nil { return e.GetAcceptedStateSummaryF(ctx, validatorID, requestID, keys) } @@ -327,7 +328,7 @@ func (e *EngineTest) GetAcceptedStateSummary(ctx context.Context, validatorID id return errGetAcceptedStateSummary } -func (e *EngineTest) AcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { +func (e *EngineTest) AcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, summaryIDs set.Set[ids.ID]) error { if e.AcceptedStateSummaryF != nil { return e.AcceptedStateSummaryF(ctx, validatorID, requestID, summaryIDs) } @@ -392,7 +393,7 @@ func (e *EngineTest) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, re return errAcceptedFrontier } -func (e *EngineTest) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *EngineTest) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { if e.GetAcceptedF != nil { return e.GetAcceptedF(ctx, nodeID, requestID, containerIDs) } @@ -418,7 +419,7 @@ func (e *EngineTest) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, r return errGetAcceptedFailed } -func (e *EngineTest) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *EngineTest) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { if e.AcceptedF != nil { return e.AcceptedF(ctx, nodeID, requestID, containerIDs) } diff --git a/snow/engine/common/test_sender.go b/snow/engine/common/test_sender.go index 5896f48dfa25..5b76f3b6a2f4 100644 --- a/snow/engine/common/test_sender.go +++ b/snow/engine/common/test_sender.go @@ -6,7 +6,6 @@ package common import ( "context" "errors" - "testing" "github.com/stretchr/testify/require" @@ -27,7 +26,7 @@ var ( // SenderTest is a test sender type SenderTest struct { - T *testing.T + T require.TestingT CantAccept, CantSendGetStateSummaryFrontier, CantSendStateSummaryFrontier, diff --git a/snow/engine/common/test_vm.go b/snow/engine/common/test_vm.go index 254401388e6e..9d1a77ef2a9f 100644 --- a/snow/engine/common/test_vm.go +++ b/snow/engine/common/test_vm.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/version" @@ -49,7 +49,7 @@ type TestVM struct { CantAppRequest, CantAppResponse, CantAppGossip, CantAppRequestFailed, CantCrossChainAppRequest, CantCrossChainAppResponse, CantCrossChainAppRequestFailed bool - InitializeF func(ctx context.Context, chainCtx *snow.Context, db manager.Manager, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, msgChan chan<- Message, fxs []*Fx, appSender AppSender) error + InitializeF func(ctx context.Context, chainCtx *snow.Context, db database.Database, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, msgChan chan<- Message, fxs []*Fx, appSender AppSender) error SetStateF func(ctx context.Context, state snow.State) error ShutdownF func(context.Context) error CreateHandlersF func(context.Context) (map[string]http.Handler, error) @@ -89,7 +89,7 @@ func (vm *TestVM) Default(cant bool) { func (vm *TestVM) Initialize( ctx context.Context, chainCtx *snow.Context, - db manager.Manager, + db database.Database, genesisBytes, upgradeBytes, configBytes []byte, diff --git a/snow/engine/common/traced_engine.go b/snow/engine/common/traced_engine.go index 387ee8289e2a..9050f29cd1e1 100644 --- a/snow/engine/common/traced_engine.go +++ b/snow/engine/common/traced_engine.go @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -62,22 +63,22 @@ func (e *tracedEngine) GetStateSummaryFrontierFailed(ctx context.Context, nodeID return e.engine.GetStateSummaryFrontierFailed(ctx, nodeID, requestID) } -func (e *tracedEngine) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights []uint64) error { +func (e *tracedEngine) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights set.Set[uint64]) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAcceptedStateSummary", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numHeights", len(heights)), + attribute.Int("numHeights", heights.Len()), )) defer span.End() return e.engine.GetAcceptedStateSummary(ctx, nodeID, requestID, heights) } -func (e *tracedEngine) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { +func (e *tracedEngine) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs set.Set[ids.ID]) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.AcceptedStateSummary", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numSummaryIDs", len(summaryIDs)), + attribute.Int("numSummaryIDs", summaryIDs.Len()), )) defer span.End() @@ -125,22 +126,22 @@ func (e *tracedEngine) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids return e.engine.GetAcceptedFrontierFailed(ctx, nodeID, requestID) } -func (e *tracedEngine) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *tracedEngine) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAccepted", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numContainerIDs", len(containerIDs)), + attribute.Int("numContainerIDs", containerIDs.Len()), )) defer span.End() return e.engine.GetAccepted(ctx, nodeID, requestID, containerIDs) } -func (e *tracedEngine) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *tracedEngine) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.Accepted", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numContainerIDs", len(containerIDs)), + attribute.Int("numContainerIDs", containerIDs.Len()), )) defer span.End() diff --git a/snow/engine/common/tracker/peers.go b/snow/engine/common/tracker/peers.go index cde9eb8f6134..ad9592209a5a 100644 --- a/snow/engine/common/tracker/peers.go +++ b/snow/engine/common/tracker/peers.go @@ -11,9 +11,9 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" ) @@ -139,8 +139,7 @@ func NewMeteredPeers(namespace string, reg prometheus.Registerer) (Peers, error) Name: "num_validators", Help: "Total number of validators", }) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( reg.Register(percentConnected), reg.Register(totalWeight), reg.Register(numValidators), @@ -154,7 +153,7 @@ func NewMeteredPeers(namespace string, reg prometheus.Registerer) (Peers, error) totalWeight: totalWeight, numValidators: numValidators, }, - }, errs.Err + }, err } func (p *meteredPeers) OnValidatorAdded(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { diff --git a/snow/engine/common/vm.go b/snow/engine/common/vm.go index 07cfe1272a6c..e77bdd552bbf 100644 --- a/snow/engine/common/vm.go +++ b/snow/engine/common/vm.go @@ -8,7 +8,7 @@ import ( "net/http" "github.com/ava-labs/avalanchego/api/health" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" ) @@ -47,7 +47,7 @@ type VM interface { Initialize( ctx context.Context, chainCtx *snow.Context, - dbManager manager.Manager, + db database.Database, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, diff --git a/snow/engine/snowman/block/mocks/chain_vm.go b/snow/engine/snowman/block/mocks/chain_vm.go index bea5f558d89b..2a2446a94ee5 100644 --- a/snow/engine/snowman/block/mocks/chain_vm.go +++ b/snow/engine/snowman/block/mocks/chain_vm.go @@ -13,7 +13,7 @@ import ( reflect "reflect" time "time" - manager "github.com/ava-labs/avalanchego/database/manager" + database "github.com/ava-labs/avalanchego/database" ids "github.com/ava-labs/avalanchego/ids" snow "github.com/ava-labs/avalanchego/snow" snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" @@ -262,7 +262,7 @@ func (mr *MockChainVMMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { } // Initialize mocks base method. -func (m *MockChainVM) Initialize(arg0 context.Context, arg1 *snow.Context, arg2 manager.Manager, arg3, arg4, arg5 []byte, arg6 chan<- common.Message, arg7 []*common.Fx, arg8 common.AppSender) error { +func (m *MockChainVM) Initialize(arg0 context.Context, arg1 *snow.Context, arg2 database.Database, arg3, arg4, arg5 []byte, arg6 chan<- common.Message, arg7 []*common.Fx, arg8 common.AppSender) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Initialize", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) ret0, _ := ret[0].(error) diff --git a/snow/engine/snowman/bootstrap/bootstrapper.go b/snow/engine/snowman/bootstrap/bootstrapper.go index f6725aa00ba5..ad1d3d901bcd 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/snow/engine/snowman/bootstrap/bootstrapper.go @@ -111,6 +111,10 @@ func New(config Config, onFinished func(ctx context.Context, lastReqID uint32) e return b, nil } +func (b *bootstrapper) Context() *snow.ConsensusContext { + return b.Ctx +} + func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { b.Ctx.Log.Info("starting bootstrapper") @@ -556,7 +560,7 @@ func (b *bootstrapper) checkFinish(ctx context.Context) error { return nil } - if b.IsBootstrapped() || b.awaitingTimeout { + if b.Ctx.State.Get().State == snow.NormalOp || b.awaitingTimeout { return nil } @@ -594,9 +598,7 @@ func (b *bootstrapper) checkFinish(ctx context.Context) error { // If there is an additional callback, notify them that this chain has been // synced. if b.Bootstrapped != nil { - b.bootstrappedOnce.Do(func() { - b.Bootstrapped() - }) + b.bootstrappedOnce.Do(b.Bootstrapped) } // Notify the subnet that this chain is synced diff --git a/snow/engine/snowman/bootstrap/bootstrapper_test.go b/snow/engine/snowman/bootstrap/bootstrapper_test.go index 620d85b0ba80..9dc53342c844 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -8,6 +8,7 @@ import ( "context" "errors" "testing" + "time" "github.com/prometheus/client_golang/prometheus" @@ -83,12 +84,11 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *block.Tes Sender: sender, BootstrapTracker: bootstrapTracker, Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, SharedCfg: &common.SharedConfig{}, } - snowGetHandler, err := getter.New(vm, commonConfig) + snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) require.NoError(err) blocker, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) @@ -130,13 +130,12 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { Sender: sender, BootstrapTracker: &common.BootstrapTrackerTest{}, Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, SharedCfg: &common.SharedConfig{}, } blocker, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) - snowGetHandler, err := getter.New(vm, commonCfg) + snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) require.NoError(err) cfg := Config{ Config: commonCfg, @@ -405,7 +404,7 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { require.NoError(bs.Ancestors(context.Background(), peerID, *requestID+1, [][]byte{blkBytes1})) // respond with wrong request ID require.Equal(oldReqID, *requestID) - require.NoError(bs.Ancestors(context.Background(), ids.NodeID{1, 2, 3}, *requestID, [][]byte{blkBytes1})) // respond from wrong peer + require.NoError(bs.Ancestors(context.Background(), ids.BuildTestNodeID([]byte{1, 2, 3}), *requestID, [][]byte{blkBytes1})) // respond from wrong peer require.Equal(oldReqID, *requestID) require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes0})) // respond with wrong block @@ -1368,12 +1367,11 @@ func TestBootstrapNoParseOnNew(t *testing.T) { Sender: sender, BootstrapTracker: bootstrapTracker, Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, SharedCfg: &common.SharedConfig{}, } - snowGetHandler, err := getter.New(vm, commonConfig) + snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) require.NoError(err) queueDB := memdb.New() diff --git a/snow/engine/snowman/bootstrap/metrics.go b/snow/engine/snowman/bootstrap/metrics.go index 91260df3ca64..9359ecfadb19 100644 --- a/snow/engine/snowman/bootstrap/metrics.go +++ b/snow/engine/snowman/bootstrap/metrics.go @@ -6,7 +6,7 @@ package bootstrap import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) type metrics struct { @@ -38,12 +38,11 @@ func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, e }), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( registerer.Register(m.numFetched), registerer.Register(m.numDropped), registerer.Register(m.numAccepted), registerer.Register(m.fetchETA), ) - return m, errs.Err + return m, err } diff --git a/snow/engine/snowman/getter/getter.go b/snow/engine/snowman/getter/getter.go index a8cb405d57ce..0f9dc40b0a19 100644 --- a/snow/engine/snowman/getter/getter.go +++ b/snow/engine/snowman/getter/getter.go @@ -5,6 +5,9 @@ package getter import ( "context" + "time" + + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" @@ -15,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/metric" + "github.com/ava-labs/avalanchego/utils/set" ) // Get requests are always served, regardless node state (bootstrapping or normal operations). @@ -22,15 +26,20 @@ var _ common.AllGetsServer = (*getter)(nil) func New( vm block.ChainVM, - commonCfg common.Config, + sender common.Sender, + log logging.Logger, + maxTimeGetAncestors time.Duration, + maxContainersGetAncestors int, + reg prometheus.Registerer, ) (common.AllGetsServer, error) { ssVM, _ := vm.(block.StateSyncableVM) gh := &getter{ - vm: vm, - ssVM: ssVM, - sender: commonCfg.Sender, - cfg: commonCfg, - log: commonCfg.Ctx.Log, + vm: vm, + ssVM: ssVM, + sender: sender, + log: log, + maxTimeGetAncestors: maxTimeGetAncestors, + maxContainersGetAncestors: maxContainersGetAncestors, } var err error @@ -38,18 +47,23 @@ func New( "bs", "get_ancestors_blks", "blocks fetched in a call to GetAncestors", - commonCfg.Ctx.Registerer, + reg, ) return gh, err } type getter struct { - vm block.ChainVM - ssVM block.StateSyncableVM // can be nil + vm block.ChainVM + ssVM block.StateSyncableVM // can be nil + sender common.Sender - cfg common.Config + log logging.Logger + // Max time to spend fetching a container and its ancestors when responding + // to a GetAncestors + maxTimeGetAncestors time.Duration + // Max number of containers in an ancestors message sent by this node. + maxContainersGetAncestors int - log logging.Logger getAncestorsBlks metric.Averager } @@ -81,10 +95,10 @@ func (gh *getter) GetStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID return nil } -func (gh *getter) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights []uint64) error { +func (gh *getter) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights set.Set[uint64]) error { // If there are no requested heights, then we can return the result // immediately, regardless of if the underlying VM implements state sync. - if len(heights) == 0 { + if heights.Len() == 0 { gh.sender.SendAcceptedStateSummary(ctx, nodeID, requestID, nil) return nil } @@ -101,8 +115,8 @@ func (gh *getter) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID return nil } - summaryIDs := make([]ids.ID, 0, len(heights)) - for _, height := range heights { + summaryIDs := make([]ids.ID, 0, heights.Len()) + for height := range heights { summary, err := gh.ssVM.GetStateSummary(ctx, height) if err == block.ErrStateSyncableVMNotImplemented { gh.log.Debug("dropping GetAcceptedStateSummary message", @@ -135,9 +149,9 @@ func (gh *getter) GetAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, re return nil } -func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - acceptedIDs := make([]ids.ID, 0, len(containerIDs)) - for _, blkID := range containerIDs { +func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { + acceptedIDs := make([]ids.ID, 0, containerIDs.Len()) + for blkID := range containerIDs { blk, err := gh.vm.GetBlock(ctx, blkID) if err == nil && blk.Status() == choices.Accepted { acceptedIDs = append(acceptedIDs, blkID) @@ -153,9 +167,9 @@ func (gh *getter) GetAncestors(ctx context.Context, nodeID ids.NodeID, requestID gh.log, gh.vm, blkID, - gh.cfg.AncestorsMaxContainersSent, + gh.maxContainersGetAncestors, constants.MaxContainersLen, - gh.cfg.MaxTimeGetAncestors, + gh.maxTimeGetAncestors, ) if err != nil { gh.log.Verbo("dropping GetAncestors message", diff --git a/snow/engine/snowman/getter/getter_test.go b/snow/engine/snowman/getter/getter_test.go index 35a0e11f9ebb..12ecd1abdd80 100644 --- a/snow/engine/snowman/getter/getter_test.go +++ b/snow/engine/snowman/getter/getter_test.go @@ -7,19 +7,22 @@ import ( "context" "errors" "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" - "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" ) var errUnknownBlock = errors.New("unknown block") @@ -29,86 +32,40 @@ type StateSyncEnabledMock struct { *mocks.MockStateSyncableVM } -func testSetup( - t *testing.T, - ctrl *gomock.Controller, -) (StateSyncEnabledMock, *common.SenderTest, common.Config) { - ctx := snow.DefaultConsensusContextTest() +func newTest(t *testing.T) (common.AllGetsServer, StateSyncEnabledMock, *common.SenderTest) { + ctrl := gomock.NewController(t) - peers := validators.NewManager() - sender := &common.SenderTest{} vm := StateSyncEnabledMock{ TestVM: &block.TestVM{}, MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), } - sender.T = t - - sender.Default(true) - - isBootstrapped := false - bootstrapTracker := &common.BootstrapTrackerTest{ + sender := &common.SenderTest{ T: t, - IsBootstrappedF: func() bool { - return isBootstrapped - }, - BootstrappedF: func(ids.ID) { - isBootstrapped = true - }, } + sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - - peer := ids.GenerateTestNodeID() - require.NoError(t, peers.AddStaker(ctx.SubnetID, peer, nil, ids.Empty, 1)) - totalWeight, err := peers.TotalWeight(ctx.SubnetID) + bs, err := New( + vm, + sender, + logging.NoLog{}, + time.Second, + 2000, + prometheus.NewRegistry(), + ) require.NoError(t, err) - commonConfig := common.Config{ - Ctx: ctx, - Beacons: peers, - SampleK: peers.Count(ctx.SubnetID), - Alpha: totalWeight/2 + 1, - Sender: sender, - BootstrapTracker: bootstrapTracker, - Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, - AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - return vm, sender, commonConfig + return bs, vm, sender } func TestAcceptedFrontier(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - - vm, sender, config := testSetup(t, ctrl) + bs, vm, sender := newTest(t) blkID := ids.GenerateTestID() - - dummyBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID, - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: []byte{1, 2, 3}, - } - vm.CantLastAccepted = false vm.LastAcceptedF = func(context.Context) (ids.ID, error) { return blkID, nil } - vm.GetBlockF = func(_ context.Context, bID ids.ID) (snowman.Block, error) { - require.Equal(blkID, bID) - return dummyBlk, nil - } - - bsIntf, err := New(vm, config) - require.NoError(err) - require.IsType(&getter{}, bsIntf) - bs := bsIntf.(*getter) var accepted ids.ID sender.SendAcceptedFrontierF = func(_ context.Context, _ ids.NodeID, _ uint32, containerID ids.ID) { @@ -121,9 +78,7 @@ func TestAcceptedFrontier(t *testing.T) { func TestFilterAccepted(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - - vm, sender, config := testSetup(t, ctrl) + bs, vm, sender := newTest(t) blkID0 := ids.GenerateTestID() blkID1 := ids.GenerateTestID() @@ -138,21 +93,6 @@ func TestFilterAccepted(t *testing.T) { StatusV: choices.Accepted, }} - vm.CantLastAccepted = false - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk1.ID(), nil - } - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(blk1.ID(), blkID) - return blk1, nil - } - - bsIntf, err := New(vm, config) - require.NoError(err) - require.IsType(&getter{}, bsIntf) - bs := bsIntf.(*getter) - - blkIDs := []ids.ID{blkID0, blkID1, blkID2} vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blkID0: @@ -171,6 +111,7 @@ func TestFilterAccepted(t *testing.T) { accepted = frontier } + blkIDs := set.Of(blkID0, blkID1, blkID2) require.NoError(bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, blkIDs)) require.Len(accepted, 2) diff --git a/snow/engine/snowman/syncer/state_syncer.go b/snow/engine/snowman/syncer/state_syncer.go index 87e6d1786173..c912bbd12093 100644 --- a/snow/engine/snowman/syncer/state_syncer.go +++ b/snow/engine/snowman/syncer/state_syncer.go @@ -108,6 +108,10 @@ func New( } } +func (ss *stateSyncer) Context() *snow.ConsensusContext { + return ss.Ctx +} + func (ss *stateSyncer) StateSummaryFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryBytes []byte) error { // ignores any late responses if requestID != ss.requestID { @@ -223,7 +227,7 @@ func (ss *stateSyncer) receivedStateSummaryFrontier(ctx context.Context) error { return nil } -func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { +func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs set.Set[ids.ID]) error { // ignores any late responses if requestID != ss.requestID { ss.Ctx.Log.Debug("received out-of-sync AcceptedStateSummary message", @@ -248,10 +252,10 @@ func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.Node ss.Ctx.Log.Debug("adding weight to summaries", zap.Stringer("nodeID", nodeID), zap.Stringer("subnetID", ss.Ctx.SubnetID), - zap.Stringers("summaryIDs", summaryIDs), + zap.Reflect("summaryIDs", summaryIDs), zap.Uint64("nodeWeight", nodeWeight), ) - for _, summaryID := range summaryIDs { + for summaryID := range summaryIDs { ws, ok := ss.weightedSummaries[summaryID] if !ok { ss.Ctx.Log.Debug("skipping summary", @@ -484,9 +488,7 @@ func (ss *stateSyncer) startup(ctx context.Context) error { } // list all beacons, to reach them for voting on frontier - for _, nodeID := range ss.StateSyncBeacons.GetValidatorIDs(ss.Ctx.SubnetID) { - ss.targetVoters.Add(nodeID) - } + ss.targetVoters.Add(ss.StateSyncBeacons.GetValidatorIDs(ss.Ctx.SubnetID)...) // check if there is an ongoing state sync; if so add its state summary // to the frontier to request votes on diff --git a/snow/engine/snowman/syncer/state_syncer_test.go b/snow/engine/snowman/syncer/state_syncer_test.go index 47a00e744471..d3f26b129907 100644 --- a/snow/engine/snowman/syncer/state_syncer_test.go +++ b/snow/engine/snowman/syncer/state_syncer_test.go @@ -9,6 +9,9 @@ import ( "errors" "math" "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" @@ -19,6 +22,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" @@ -45,7 +49,14 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { nonStateSyncableVM := &block.TestVM{ TestVM: common.TestVM{T: t}, } - dummyGetter, err := getter.New(nonStateSyncableVM, *commonCfg) + dummyGetter, err := getter.New( + nonStateSyncableVM, + sender, + logging.NoLog{}, + time.Second, + 2000, + prometheus.NewRegistry(), + ) require.NoError(err) cfg, err := NewConfig(*commonCfg, nil, dummyGetter, nonStateSyncableVM) @@ -59,8 +70,6 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { require.False(enabled) // State syncableVM case - commonCfg.Ctx = snow.DefaultConsensusContextTest() // reset metrics - fullVM := &fullVM{ TestVM: &block.TestVM{ TestVM: common.TestVM{T: t}, @@ -69,7 +78,13 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { T: t, }, } - dummyGetter, err = getter.New(fullVM, *commonCfg) + dummyGetter, err = getter.New( + fullVM, + sender, + logging.NoLog{}, + time.Second, + 2000, + prometheus.NewRegistry()) require.NoError(err) cfg, err = NewConfig(*commonCfg, nil, dummyGetter, fullVM) @@ -790,7 +805,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { context.Background(), responsiveVoterID, math.MaxInt32, - []ids.ID{summaryID}, + set.Of(summaryID), )) // responsiveVoter still pending @@ -803,7 +818,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { context.Background(), unsolicitedVoterID, responsiveVoterReqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) require.Zero(syncer.weightedSummaries[summaryID].weight) @@ -812,7 +827,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { context.Background(), responsiveVoterID, responsiveVoterReqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) // responsiveBeacon not pending anymore @@ -913,7 +928,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { context.Background(), responsiveVoterID, responsiveVoterReqID, - []ids.ID{unknownSummaryID}, + set.Of(unknownSummaryID), )) _, found = syncer.weightedSummaries[unknownSummaryID] require.False(found) @@ -924,7 +939,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { context.Background(), responsiveVoterID, responsiveVoterReqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) require.Zero(syncer.weightedSummaries[summaryID].weight) @@ -1058,7 +1073,7 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { context.Background(), voterID, reqID, - []ids.ID{summaryID, minoritySummaryID}, + set.Of(summaryID, minoritySummaryID), )) cumulatedWeight += vdrs.GetWeight(ctx.SubnetID, voterID) @@ -1067,7 +1082,7 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { context.Background(), voterID, reqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) cumulatedWeight += vdrs.GetWeight(ctx.SubnetID, voterID) @@ -1185,7 +1200,7 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { context.Background(), voterID, reqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) } } @@ -1328,7 +1343,7 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. context.Background(), voterID, reqID, - []ids.ID{minoritySummary1.ID(), minoritySummary2.ID()}, + set.Of(minoritySummary1.ID(), minoritySummary2.ID()), )) votingWeightStake += vdrs.GetWeight(ctx.SubnetID, voterID) @@ -1337,7 +1352,7 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. context.Background(), voterID, reqID, - []ids.ID{{'u', 'n', 'k', 'n', 'o', 'w', 'n', 'I', 'D'}}, + set.Of(ids.ID{'u', 'n', 'k', 'n', 'o', 'w', 'n', 'I', 'D'}), )) votingWeightStake += vdrs.GetWeight(ctx.SubnetID, voterID) } diff --git a/snow/engine/snowman/syncer/utils_test.go b/snow/engine/snowman/syncer/utils_test.go index f83a3006aaa1..b9a31fbc18bc 100644 --- a/snow/engine/snowman/syncer/utils_test.go +++ b/snow/engine/snowman/syncer/utils_test.go @@ -6,6 +6,7 @@ package syncer import ( "context" "testing" + "time" "github.com/stretchr/testify/require" @@ -83,7 +84,14 @@ func buildTestsObjects(t *testing.T, commonCfg *common.Config) ( T: t, }, } - dummyGetter, err := getter.New(fullVM, *commonCfg) + dummyGetter, err := getter.New( + fullVM, + commonCfg.Sender, + commonCfg.Ctx.Log, + time.Second, + 2000, + commonCfg.Ctx.Registerer, + ) require.NoError(err) cfg, err := NewConfig(*commonCfg, nil, dummyGetter, fullVM) diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index 803c03237c96..b4c5e3e54b51 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -116,6 +116,16 @@ func newTransitive(config Config) (*Transitive, error) { config.Params.AlphaPreference, config.Params.AlphaConfidence, ) + polls, err := poll.NewSet( + factory, + config.Ctx.Log, + "", + config.Ctx.Registerer, + ) + if err != nil { + return nil, err + } + t := &Transitive{ Config: config, StateSummaryFrontierHandler: common.NewNoOpStateSummaryFrontierHandler(config.Ctx.Log), @@ -129,12 +139,7 @@ func newTransitive(config Config) (*Transitive, error) { nonVerifieds: ancestor.NewTree(), nonVerifiedCache: nonVerifiedCache, acceptedFrontiers: acceptedFrontiers, - polls: poll.NewSet( - factory, - config.Ctx.Log, - "", - config.Ctx.Registerer, - ), + polls: polls, } return t, t.metrics.Initialize("", config.Ctx.Registerer) diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 8993a4e90f9b..96cb2d72ba76 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -8,6 +8,7 @@ import ( "context" "errors" "testing" + "time" "github.com/stretchr/testify/require" @@ -50,7 +51,14 @@ func setup(t *testing.T, commonCfg common.Config, engCfg Config) (ids.NodeID, va vm.T = t engCfg.VM = vm - snowGetHandler, err := getter.New(vm, commonCfg) + snowGetHandler, err := getter.New( + vm, + sender, + commonCfg.Ctx.Log, + time.Second, + 2000, + commonCfg.Ctx.Registerer, + ) require.NoError(err) engCfg.AllGetsServer = snowGetHandler diff --git a/snow/networking/handler/handler.go b/snow/networking/handler/handler.go index 1a9a1d89b6ae..68ec1e5a0f36 100644 --- a/snow/networking/handler/handler.go +++ b/snow/networking/handler/handler.go @@ -31,6 +31,7 @@ import ( "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" @@ -454,7 +455,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { h.ctx.Log.Verbo("forwarding sync message to consensus", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", op), - zap.Any("message", body), + zap.Stringer("message", body), ) } else { h.ctx.Log.Debug("forwarding sync message to consensus", @@ -487,7 +488,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { zap.Duration("msgHandlingTime", msgHandlingTime), zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", op), - zap.Any("message", body), + zap.Stringer("message", body), ) } }() @@ -556,23 +557,11 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { return engine.GetStateSummaryFrontierFailed(ctx, nodeID, msg.RequestID) case *p2p.GetAcceptedStateSummary: - // TODO: Enforce that the numbers are sorted to make this verification - // more efficient. - if !utils.IsUnique(msg.Heights) { - h.ctx.Log.Debug("message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", message.GetAcceptedStateSummaryOp), - zap.Uint32("requestID", msg.RequestId), - zap.String("field", "Heights"), - ) - return engine.GetAcceptedStateSummaryFailed(ctx, nodeID, msg.RequestId) - } - return engine.GetAcceptedStateSummary( ctx, nodeID, msg.RequestId, - msg.Heights, + set.Of(msg.Heights...), ) case *p2p.AcceptedStateSummary: @@ -804,7 +793,7 @@ func (h *handler) executeAsyncMsg(ctx context.Context, msg Message) error { h.ctx.Log.Verbo("forwarding async message to consensus", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", op), - zap.Any("message", body), + zap.Stringer("message", body), ) } else { h.ctx.Log.Debug("forwarding async message to consensus", @@ -904,7 +893,7 @@ func (h *handler) handleChanMsg(msg message.InboundMessage) error { if h.ctx.Log.Enabled(logging.Verbo) { h.ctx.Log.Verbo("forwarding chan message to consensus", zap.Stringer("messageOp", op), - zap.Any("message", body), + zap.Stringer("message", body), ) } else { h.ctx.Log.Debug("forwarding chan message to consensus", @@ -933,7 +922,7 @@ func (h *handler) handleChanMsg(msg message.InboundMessage) error { zap.Duration("processingTime", processingTime), zap.Duration("msgHandlingTime", msgHandlingTime), zap.Stringer("messageOp", op), - zap.Any("message", body), + zap.Stringer("message", body), ) } }() diff --git a/snow/networking/handler/handler_test.go b/snow/networking/handler/handler_test.go index c28da4bc8b71..0c87ed752f8b 100644 --- a/snow/networking/handler/handler_test.go +++ b/snow/networking/handler/handler_test.go @@ -26,6 +26,7 @@ import ( "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" + "github.com/ava-labs/avalanchego/utils/set" commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" ) @@ -78,11 +79,11 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.GetAcceptedFrontierF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + bootstrapper.GetAcceptedFrontierF = func(context.Context, ids.NodeID, uint32) error { require.FailNow("GetAcceptedFrontier message should have timed out") return nil } - bootstrapper.GetAcceptedF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { + bootstrapper.GetAcceptedF = func(context.Context, ids.NodeID, uint32, set.Set[ids.ID]) error { called <- struct{}{} return nil } diff --git a/snow/networking/handler/parser.go b/snow/networking/handler/parser.go index 9349b073fbb6..148572484ef5 100644 --- a/snow/networking/handler/parser.go +++ b/snow/networking/handler/parser.go @@ -4,27 +4,18 @@ package handler import ( - "errors" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" ) -var errDuplicatedID = errors.New("inbound message contains duplicated ID") - -func getIDs(idsBytes [][]byte) ([]ids.ID, error) { - res := make([]ids.ID, len(idsBytes)) - idSet := set.NewSet[ids.ID](len(idsBytes)) - for i, bytes := range idsBytes { +func getIDs(idsBytes [][]byte) (set.Set[ids.ID], error) { + var res set.Set[ids.ID] + for _, bytes := range idsBytes { id, err := ids.ToID(bytes) if err != nil { return nil, err } - if idSet.Contains(id) { - return nil, errDuplicatedID - } - res[i] = id - idSet.Add(id) + res.Add(id) } return res, nil } diff --git a/snow/networking/router/chain_router_metrics.go b/snow/networking/router/chain_router_metrics.go index cfcc96134c29..58440377ba82 100644 --- a/snow/networking/router/chain_router_metrics.go +++ b/snow/networking/router/chain_router_metrics.go @@ -6,7 +6,7 @@ package router import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) // routerMetrics about router messages @@ -40,11 +40,10 @@ func newRouterMetrics(namespace string, registerer prometheus.Registerer) (*rout }, ) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( registerer.Register(rMetrics.outstandingRequests), registerer.Register(rMetrics.longestRunningRequest), registerer.Register(rMetrics.droppedRequests), ) - return rMetrics, errs.Err + return rMetrics, err } diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 6355834dcf78..88c02b9a7f3f 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -312,7 +312,7 @@ func TestReliableMessages(t *testing.T) { ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewManager() - require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.NodeID{1}, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.BuildTestNodeID([]byte{1}), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -443,7 +443,7 @@ func TestReliableMessages(t *testing.T) { go func() { for i := 0; i < queriesToSend; i++ { - vdrIDs := set.Of(ids.NodeID{1}) + vdrIDs := set.Of(ids.BuildTestNodeID([]byte{1})) sender.SendPullQuery(context.Background(), vdrIDs, uint32(i), ids.Empty, 0) time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond))) // #nosec G404 diff --git a/snow/networking/timeout/manager.go b/snow/networking/timeout/manager.go index d94c34a1f663..f1db8a1e01a0 100644 --- a/snow/networking/timeout/manager.go +++ b/snow/networking/timeout/manager.go @@ -163,7 +163,5 @@ func (m *manager) RegisterRequestToUnreachableValidator() { } func (m *manager) Stop() { - m.stopOnce.Do(func() { - m.tm.Stop() - }) + m.stopOnce.Do(m.tm.Stop) } diff --git a/snow/networking/timeout/manager_test.go b/snow/networking/timeout/manager_test.go index ce412150b1b6..582da3a9ea1b 100644 --- a/snow/networking/timeout/manager_test.go +++ b/snow/networking/timeout/manager_test.go @@ -39,7 +39,7 @@ func TestManagerFire(t *testing.T) { wg.Add(1) manager.RegisterRequest( - ids.NodeID{}, + ids.EmptyNodeID, ids.ID{}, true, ids.RequestID{}, diff --git a/snow/networking/tracker/resource_tracker.go b/snow/networking/tracker/resource_tracker.go index 721d531eea5b..7910c2fff475 100644 --- a/snow/networking/tracker/resource_tracker.go +++ b/snow/networking/tracker/resource_tracker.go @@ -11,10 +11,10 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/linkedhashmap" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" - "github.com/ava-labs/avalanchego/utils/wrappers" ) const epsilon = 1e-9 @@ -321,13 +321,12 @@ func newCPUTrackerMetrics(namespace string, reg prometheus.Registerer) (*tracker Help: "Available space remaining (bytes) on the database volume", }), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( reg.Register(m.processingTimeMetric), reg.Register(m.cpuMetric), reg.Register(m.diskReadsMetric), reg.Register(m.diskWritesMetric), reg.Register(m.diskSpaceAvailable), ) - return m, errs.Err + return m, err } diff --git a/snow/networking/tracker/resource_tracker_test.go b/snow/networking/tracker/resource_tracker_test.go index 4bc78eb4827a..64a897589f90 100644 --- a/snow/networking/tracker/resource_tracker_test.go +++ b/snow/networking/tracker/resource_tracker_test.go @@ -48,8 +48,8 @@ func TestCPUTracker(t *testing.T) { tracker, err := NewResourceTracker(prometheus.NewRegistry(), mockUser, meter.ContinuousFactory{}, time.Second) require.NoError(err) - node1 := ids.NodeID{1} - node2 := ids.NodeID{2} + node1 := ids.BuildTestNodeID([]byte{1}) + node2 := ids.BuildTestNodeID([]byte{2}) // Note that all the durations between start and end are [halflife]. startTime1 := time.Now() diff --git a/snow/networking/tracker/targeter_test.go b/snow/networking/tracker/targeter_test.go index 23096adbed28..55974dbf4ac6 100644 --- a/snow/networking/tracker/targeter_test.go +++ b/snow/networking/tracker/targeter_test.go @@ -46,10 +46,10 @@ func TestNewTargeter(t *testing.T) { func TestTarget(t *testing.T) { ctrl := gomock.NewController(t) - vdr := ids.NodeID{1} + vdr := ids.BuildTestNodeID([]byte{1}) vdrWeight := uint64(1) totalVdrWeight := uint64(10) - nonVdr := ids.NodeID{2} + nonVdr := ids.BuildTestNodeID([]byte{2}) vdrs := validators.NewManager() require.NoError(t, vdrs.AddStaker(constants.PrimaryNetworkID, vdr, nil, ids.Empty, 1)) require.NoError(t, vdrs.AddStaker(constants.PrimaryNetworkID, ids.GenerateTestNodeID(), nil, ids.Empty, totalVdrWeight-vdrWeight)) diff --git a/snow/validators/gvalidators/validator_state_server.go b/snow/validators/gvalidators/validator_state_server.go index 5f0dbc7f46c4..ad9b75197947 100644 --- a/snow/validators/gvalidators/validator_state_server.go +++ b/snow/validators/gvalidators/validator_state_server.go @@ -66,7 +66,7 @@ func (s *Server) GetValidatorSet(ctx context.Context, req *pb.GetValidatorSetReq i := 0 for _, vdr := range vdrs { vdrPB := &pb.Validator{ - NodeId: vdr.NodeID[:], + NodeId: vdr.NodeID.Bytes(), Weight: vdr.Weight, } if vdr.PublicKey != nil { diff --git a/snow/validators/logger.go b/snow/validators/logger.go index 124aef423fc4..2e672a1827ba 100644 --- a/snow/validators/logger.go +++ b/snow/validators/logger.go @@ -7,7 +7,6 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -18,7 +17,6 @@ var _ SetCallbackListener = (*logger)(nil) type logger struct { log logging.Logger - enabled *utils.Atomic[bool] subnetID ids.ID nodeIDs set.Set[ids.NodeID] } @@ -27,14 +25,12 @@ type logger struct { // the specified validators func NewLogger( log logging.Logger, - enabled *utils.Atomic[bool], subnetID ids.ID, nodeIDs ...ids.NodeID, ) SetCallbackListener { nodeIDSet := set.Of(nodeIDs...) return &logger{ log: log, - enabled: enabled, subnetID: subnetID, nodeIDs: nodeIDSet, } @@ -46,7 +42,7 @@ func (l *logger) OnValidatorAdded( txID ids.ID, weight uint64, ) { - if l.enabled.Get() && l.nodeIDs.Contains(nodeID) { + if l.nodeIDs.Contains(nodeID) { var pkBytes []byte if pk != nil { pkBytes = bls.PublicKeyToBytes(pk) @@ -65,7 +61,7 @@ func (l *logger) OnValidatorRemoved( nodeID ids.NodeID, weight uint64, ) { - if l.enabled.Get() && l.nodeIDs.Contains(nodeID) { + if l.nodeIDs.Contains(nodeID) { l.log.Info("node removed from validator set", zap.Stringer("subnetID", l.subnetID), zap.Stringer("nodeID", nodeID), @@ -79,7 +75,7 @@ func (l *logger) OnValidatorWeightChanged( oldWeight uint64, newWeight uint64, ) { - if l.enabled.Get() && l.nodeIDs.Contains(nodeID) { + if l.nodeIDs.Contains(nodeID) { l.log.Info("validator weight changed", zap.Stringer("subnetID", l.subnetID), zap.Stringer("nodeID", nodeID), diff --git a/snow/validators/manager_test.go b/snow/validators/manager_test.go index 01a84201f91d..f93ab719e18a 100644 --- a/snow/validators/manager_test.go +++ b/snow/validators/manager_test.go @@ -324,9 +324,9 @@ func TestGetMap(t *testing.T) { func TestWeight(t *testing.T) { require := require.New(t) - vdr0 := ids.NodeID{1} + vdr0 := ids.BuildTestNodeID([]byte{1}) weight0 := uint64(93) - vdr1 := ids.NodeID{2} + vdr1 := ids.BuildTestNodeID([]byte{2}) weight1 := uint64(123) m := NewManager() @@ -411,7 +411,7 @@ func TestString(t *testing.T) { func TestAddCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) sk0, err := bls.NewSecretKey() require.NoError(err) pk0 := bls.PublicFromSecretKey(sk0) @@ -442,7 +442,7 @@ func TestAddCallback(t *testing.T) { func TestAddWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(1) weight1 := uint64(93) @@ -480,7 +480,7 @@ func TestAddWeightCallback(t *testing.T) { func TestRemoveWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) weight1 := uint64(92) @@ -518,7 +518,7 @@ func TestRemoveWeightCallback(t *testing.T) { func TestValidatorRemovedCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) diff --git a/snow/validators/set_test.go b/snow/validators/set_test.go index 99651e7930e0..0067a520b5a9 100644 --- a/snow/validators/set_test.go +++ b/snow/validators/set_test.go @@ -273,9 +273,9 @@ func TestSetMap(t *testing.T) { func TestSetWeight(t *testing.T) { require := require.New(t) - vdr0 := ids.NodeID{1} + vdr0 := ids.BuildTestNodeID([]byte{1}) weight0 := uint64(93) - vdr1 := ids.NodeID{2} + vdr1 := ids.BuildTestNodeID([]byte{2}) weight1 := uint64(123) s := newSet() @@ -332,10 +332,10 @@ func TestSetString(t *testing.T) { require := require.New(t) nodeID0 := ids.EmptyNodeID - nodeID1 := ids.NodeID{ + nodeID1 := ids.BuildTestNodeID([]byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - } + }) s := newSet() require.NoError(s.Add(nodeID0, nil, ids.Empty, 1)) @@ -385,7 +385,7 @@ func (c *callbackListener) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight func TestSetAddCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) sk0, err := bls.NewSecretKey() require.NoError(err) pk0 := bls.PublicFromSecretKey(sk0) @@ -413,7 +413,7 @@ func TestSetAddCallback(t *testing.T) { func TestSetAddWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(1) weight1 := uint64(93) @@ -447,7 +447,7 @@ func TestSetAddWeightCallback(t *testing.T) { func TestSetRemoveWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) weight1 := uint64(92) @@ -481,7 +481,7 @@ func TestSetRemoveWeightCallback(t *testing.T) { func TestSetValidatorRemovedCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) diff --git a/tests/e2e/banff/suites.go b/tests/e2e/banff/suites.go index 6adeb1476cfa..37d0aa90156a 100644 --- a/tests/e2e/banff/suites.go +++ b/tests/e2e/banff/suites.go @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -25,7 +25,7 @@ var _ = ginkgo.Describe("[Banff]", func() { ginkgo.It("can send custom assets X->P and P->X", func() { keychain := e2e.Env.NewKeychain(1) - wallet := e2e.Env.NewWallet(keychain, e2e.Env.GetRandomNodeURI()) + wallet := e2e.NewWallet(keychain, e2e.Env.GetRandomNodeURI()) // Get the P-chain and the X-chain wallets pWallet := wallet.P() diff --git a/tests/e2e/c/dynamic_fees.go b/tests/e2e/c/dynamic_fees.go index f3a1daaf3d2c..8f15b6d43caf 100644 --- a/tests/e2e/c/dynamic_fees.go +++ b/tests/e2e/c/dynamic_fees.go @@ -19,7 +19,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/tests/fixture/testnet" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" ) @@ -51,7 +51,7 @@ var _ = e2e.DescribeCChain("[Dynamic Fees]", func() { NodeID: node.GetID(), URI: node.GetProcessContext().URI, } - ethClient := e2e.Env.NewEthClient(nodeURI) + ethClient := e2e.NewEthClient(nodeURI) ginkgo.By("initializing a transaction signer") cChainID, err := ethClient.ChainID(e2e.DefaultContext()) @@ -143,8 +143,7 @@ var _ = e2e.DescribeCChain("[Dynamic Fees]", func() { ginkgo.By("sending funds at the current gas price", func() { // Create a recipient address - factory := secp256k1.Factory{} - recipientKey, err := factory.NewPrivateKey() + recipientKey, err := secp256k1.NewPrivateKey() require.NoError(err) recipientEthAddress := evm.GetEthAddress(recipientKey) diff --git a/tests/e2e/c/interchain_workflow.go b/tests/e2e/c/interchain_workflow.go index 47c430d4c12a..2c9bd198ec39 100644 --- a/tests/e2e/c/interchain_workflow.go +++ b/tests/e2e/c/interchain_workflow.go @@ -14,7 +14,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/set" @@ -34,13 +34,12 @@ var _ = e2e.DescribeCChain("[Interchain Workflow]", func() { // the wallet to avoid having to verify that all nodes are at // the same height before initializing the wallet. nodeURI := e2e.Env.GetRandomNodeURI() - ethClient := e2e.Env.NewEthClient(nodeURI) + ethClient := e2e.NewEthClient(nodeURI) ginkgo.By("allocating a pre-funded key to send from and a recipient key to deliver to") senderKey := e2e.Env.AllocateFundedKey() senderEthAddress := evm.GetEthAddress(senderKey) - factory := secp256k1.Factory{} - recipientKey, err := factory.NewPrivateKey() + recipientKey, err := secp256k1.NewPrivateKey() require.NoError(err) recipientEthAddress := evm.GetEthAddress(recipientKey) @@ -80,7 +79,7 @@ var _ = e2e.DescribeCChain("[Interchain Workflow]", func() { // matches on-chain state. ginkgo.By("initializing a keychain and associated wallet") keychain := secp256k1fx.NewKeychain(senderKey, recipientKey) - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + baseWallet := e2e.NewWallet(keychain, nodeURI) xWallet := baseWallet.X() cWallet := baseWallet.C() pWallet := baseWallet.P() diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index 2e9a86684df0..3245516262d2 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -4,22 +4,13 @@ package e2e_test import ( - "encoding/json" - "flag" - "fmt" - "os" "testing" ginkgo "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" - "github.com/ava-labs/avalanchego/tests/fixture" - "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" // ensure test packages are scanned by ginkgo _ "github.com/ava-labs/avalanchego/tests/e2e/banff" @@ -36,75 +27,18 @@ func TestE2E(t *testing.T) { ginkgo.RunSpecs(t, "e2e test suites") } -var ( - avalancheGoExecPath string - persistentNetworkDir string - usePersistentNetwork bool -) +var flagVars *e2e.FlagVars func init() { - flag.StringVar( - &avalancheGoExecPath, - "avalanchego-path", - os.Getenv(local.AvalancheGoPathEnvName), - fmt.Sprintf("avalanchego executable path (required if not using a persistent network). Also possible to configure via the %s env variable.", local.AvalancheGoPathEnvName), - ) - flag.StringVar( - &persistentNetworkDir, - "network-dir", - "", - fmt.Sprintf("[optional] the dir containing the configuration of a persistent network to target for testing. Useful for speeding up test development. Also possible to configure via the %s env variable.", local.NetworkDirEnvName), - ) - flag.BoolVar( - &usePersistentNetwork, - "use-persistent-network", - false, - "[optional] whether to target the persistent network identified by --network-dir.", - ) + flagVars = e2e.RegisterFlags() } var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Run only once in the first ginkgo process - - require := require.New(ginkgo.GinkgoT()) - - if usePersistentNetwork && len(persistentNetworkDir) == 0 { - persistentNetworkDir = os.Getenv(local.NetworkDirEnvName) - } - - // Load or create a test network - var network *local.LocalNetwork - if len(persistentNetworkDir) > 0 { - tests.Outf("{{yellow}}Using a persistent network configured at %s{{/}}\n", persistentNetworkDir) - - var err error - network, err = local.ReadNetwork(persistentNetworkDir) - require.NoError(err) - } else { - network = e2e.StartLocalNetwork(avalancheGoExecPath, e2e.DefaultNetworkDir) - } - - uris := network.GetURIs() - require.NotEmpty(uris, "network contains no nodes") - tests.Outf("{{green}}network URIs: {{/}} %+v\n", uris) - - testDataServerURI, err := fixture.ServeTestData(fixture.TestData{ - FundedKeys: network.FundedKeys, - }) - tests.Outf("{{green}}test data server URI: {{/}} %+v\n", testDataServerURI) - require.NoError(err) - - env := &e2e.TestEnvironment{ - NetworkDir: network.Dir, - URIs: uris, - TestDataServerURI: testDataServerURI, - } - bytes, err := json.Marshal(env) - require.NoError(err) - return bytes + return e2e.NewTestEnvironment(flagVars).Marshal() }, func(envBytes []byte) { // Run in every ginkgo process // Initialize the local test environment from the global state - e2e.InitTestEnvironment(envBytes) + e2e.InitSharedTestEnvironment(envBytes) }) diff --git a/tests/e2e/faultinjection/duplicate_node_id.go b/tests/e2e/faultinjection/duplicate_node_id.go index bcdd05cb35b0..1d865d840f51 100644 --- a/tests/e2e/faultinjection/duplicate_node_id.go +++ b/tests/e2e/faultinjection/duplicate_node_id.go @@ -14,7 +14,7 @@ import ( "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/tests/fixture/testnet" "github.com/ava-labs/avalanchego/utils/set" ) diff --git a/tests/e2e/ignore.go b/tests/e2e/ignore.go new file mode 100644 index 000000000000..50332a1ac80e --- /dev/null +++ b/tests/e2e/ignore.go @@ -0,0 +1,13 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package e2e + +// This file is required by ginkgo to accurately report compilation errors in test packages. Without +// it, the following error will mask the actual errors: +// +// ``` +// Failed to compile e2e: +// +// github.com/ava-labs/avalanchego/tests/e2e: no non-test Go files in /path/to/avalanchego/tests/e2e +// ``` diff --git a/tests/e2e/p/interchain_workflow.go b/tests/e2e/p/interchain_workflow.go index 10c15fd002a7..44a6912715ef 100644 --- a/tests/e2e/p/interchain_workflow.go +++ b/tests/e2e/p/interchain_workflow.go @@ -18,7 +18,7 @@ import ( "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/tests/fixture/testnet" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" @@ -48,13 +48,12 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL }) ginkgo.By("creating wallet with a funded key to send from and recipient key to deliver to") - factory := secp256k1.Factory{} - recipientKey, err := factory.NewPrivateKey() + recipientKey, err := secp256k1.NewPrivateKey() require.NoError(err) keychain := e2e.Env.NewKeychain(1) keychain.Add(recipientKey) nodeURI := e2e.Env.GetRandomNodeURI() - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + baseWallet := e2e.NewWallet(keychain, nodeURI) xWallet := baseWallet.X() cWallet := baseWallet.C() pWallet := baseWallet.P() @@ -103,7 +102,7 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL // doesn't break interchain transfer. endTime := startTime.Add(30 * time.Second) - rewardKey, err := factory.NewPrivateKey() + rewardKey, err := secp256k1.NewPrivateKey() require.NoError(err) const ( @@ -144,7 +143,7 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL // doesn't break interchain transfer. endTime := startTime.Add(15 * time.Second) - rewardKey, err := factory.NewPrivateKey() + rewardKey, err := secp256k1.NewPrivateKey() require.NoError(err) _, err = pWallet.IssueAddPermissionlessDelegatorTx( @@ -203,7 +202,7 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL }) ginkgo.By("initializing a new eth client") - ethClient := e2e.Env.NewEthClient(nodeURI) + ethClient := e2e.NewEthClient(nodeURI) ginkgo.By("importing AVAX from the P-Chain to the C-Chain", func() { _, err := cWallet.IssueImportTx( diff --git a/tests/e2e/p/permissionless_subnets.go b/tests/e2e/p/permissionless_subnets.go index 1369685bf077..0521306b9b40 100644 --- a/tests/e2e/p/permissionless_subnets.go +++ b/tests/e2e/p/permissionless_subnets.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -32,7 +32,7 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { nodeURI := e2e.Env.GetRandomNodeURI() keychain := e2e.Env.NewKeychain(1) - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + baseWallet := e2e.NewWallet(keychain, nodeURI) pWallet := baseWallet.P() xWallet := baseWallet.X() diff --git a/tests/e2e/p/staking_rewards.go b/tests/e2e/p/staking_rewards.go index 009980c71afb..41a77985729b 100644 --- a/tests/e2e/p/staking_rewards.go +++ b/tests/e2e/p/staking_rewards.go @@ -19,7 +19,7 @@ import ( "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/tests/fixture/testnet" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" @@ -59,22 +59,21 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { e2e.WaitForHealthy(betaNode) ginkgo.By("generating reward keys") - factory := secp256k1.Factory{} - alphaValidationRewardKey, err := factory.NewPrivateKey() + alphaValidationRewardKey, err := secp256k1.NewPrivateKey() require.NoError(err) - alphaDelegationRewardKey, err := factory.NewPrivateKey() + alphaDelegationRewardKey, err := secp256k1.NewPrivateKey() require.NoError(err) - betaValidationRewardKey, err := factory.NewPrivateKey() + betaValidationRewardKey, err := secp256k1.NewPrivateKey() require.NoError(err) - betaDelegationRewardKey, err := factory.NewPrivateKey() + betaDelegationRewardKey, err := secp256k1.NewPrivateKey() require.NoError(err) - gammaDelegationRewardKey, err := factory.NewPrivateKey() + gammaDelegationRewardKey, err := secp256k1.NewPrivateKey() require.NoError(err) - deltaDelegationRewardKey, err := factory.NewPrivateKey() + deltaDelegationRewardKey, err := secp256k1.NewPrivateKey() require.NoError(err) rewardKeys := []*secp256k1.PrivateKey{ @@ -91,7 +90,7 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { fundedKey := e2e.Env.AllocateFundedKey() keychain.Add(fundedKey) nodeURI := e2e.Env.GetRandomNodeURI() - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + baseWallet := e2e.NewWallet(keychain, nodeURI) pWallet := baseWallet.P() ginkgo.By("retrieving alpha node id and pop") @@ -262,7 +261,7 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { rewardBalances := make(map[ids.ShortID]uint64, len(rewardKeys)) for _, rewardKey := range rewardKeys { keychain := secp256k1fx.NewKeychain(rewardKey) - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + baseWallet := e2e.NewWallet(keychain, nodeURI) pWallet := baseWallet.P() balances, err := pWallet.Builder().GetBalance() require.NoError(err) diff --git a/tests/e2e/p/workflow.go b/tests/e2e/p/workflow.go index 96bf8bafc02c..3f0440ac49b6 100644 --- a/tests/e2e/p/workflow.go +++ b/tests/e2e/p/workflow.go @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/units" @@ -36,7 +36,7 @@ var _ = e2e.DescribePChain("[Workflow]", func() { func() { nodeURI := e2e.Env.GetRandomNodeURI() keychain := e2e.Env.NewKeychain(2) - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + baseWallet := e2e.NewWallet(keychain, nodeURI) pWallet := baseWallet.P() avaxAssetID := baseWallet.P().AVAXAssetID() diff --git a/tests/e2e/static-handlers/suites.go b/tests/e2e/static-handlers/suites.go index 2791f8085a80..67b12a0cbb04 100644 --- a/tests/e2e/static-handlers/suites.go +++ b/tests/e2e/static-handlers/suites.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" @@ -115,7 +115,6 @@ var _ = ginkgo.Describe("[StaticHandlers]", func() { ginkgo.It("can make calls to platformvm static api", func() { keys := []*secp256k1.PrivateKey{} - factory := secp256k1.Factory{} for _, key := range []string{ "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", @@ -125,7 +124,7 @@ var _ = ginkgo.Describe("[StaticHandlers]", func() { } { privKeyBytes, err := cb58.Decode(key) require.NoError(err) - pk, err := factory.ToPrivateKey(privKeyBytes) + pk, err := secp256k1.ToPrivateKey(privKeyBytes) require.NoError(err) keys = append(keys, pk) } @@ -142,16 +141,16 @@ var _ = ginkgo.Describe("[StaticHandlers]", func() { } } - genesisValidators := make([]api.PermissionlessValidator, len(keys)) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(keys)) for i, key := range keys { id := key.PublicKey().Address() addr, err := address.FormatBech32(hrp, id.Bytes()) require.NoError(err) - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(time.Date(1997, 1, 1, 0, 0, 0, 0, time.UTC).Unix()), EndTime: json.Uint64(time.Date(1997, 1, 30, 0, 0, 0, 0, time.UTC).Unix()), - NodeID: ids.NodeID(id), + NodeID: ids.BuildTestNodeID(id[:]), }, RewardOwner: &api.Owner{ Threshold: 1, diff --git a/tests/e2e/x/interchain_workflow.go b/tests/e2e/x/interchain_workflow.go index 550689ff60c9..f0c2951feb84 100644 --- a/tests/e2e/x/interchain_workflow.go +++ b/tests/e2e/x/interchain_workflow.go @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/set" @@ -32,12 +32,11 @@ var _ = e2e.DescribeXChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL nodeURI := e2e.Env.GetRandomNodeURI() ginkgo.By("creating wallet with a funded key to send from and recipient key to deliver to") - factory := secp256k1.Factory{} - recipientKey, err := factory.NewPrivateKey() + recipientKey, err := secp256k1.NewPrivateKey() require.NoError(err) keychain := e2e.Env.NewKeychain(1) keychain.Add(recipientKey) - baseWallet := e2e.Env.NewWallet(keychain, nodeURI) + baseWallet := e2e.NewWallet(keychain, nodeURI) xWallet := baseWallet.X() cWallet := baseWallet.C() pWallet := baseWallet.P() @@ -104,7 +103,7 @@ var _ = e2e.DescribeXChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL }) ginkgo.By("initializing a new eth client") - ethClient := e2e.Env.NewEthClient(nodeURI) + ethClient := e2e.NewEthClient(nodeURI) ginkgo.By("importing AVAX from the X-Chain to the C-Chain", func() { _, err := cWallet.IssueImportTx( diff --git a/tests/e2e/x/transfer/virtuous.go b/tests/e2e/x/transfer/virtuous.go index d0ee950a53d6..7a1eb1bb6b91 100644 --- a/tests/e2e/x/transfer/virtuous.go +++ b/tests/e2e/x/transfer/virtuous.go @@ -16,7 +16,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -83,7 +83,7 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { } keychain := secp256k1fx.NewKeychain(testKeys...) - baseWallet := e2e.Env.NewWallet(keychain, e2e.Env.GetRandomNodeURI()) + baseWallet := e2e.NewWallet(keychain, e2e.Env.GetRandomNodeURI()) avaxAssetID := baseWallet.X().AVAXAssetID() wallets := make([]primary.Wallet, len(testKeys)) diff --git a/tests/e2e/describe.go b/tests/fixture/e2e/describe.go similarity index 100% rename from tests/e2e/describe.go rename to tests/fixture/e2e/describe.go diff --git a/tests/fixture/e2e/env.go b/tests/fixture/e2e/env.go new file mode 100644 index 000000000000..54a4676482e1 --- /dev/null +++ b/tests/fixture/e2e/env.go @@ -0,0 +1,138 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package e2e + +import ( + "encoding/json" + "math/rand" + "os" + "path/filepath" + "time" + + ginkgo "github.com/onsi/ginkgo/v2" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/tests" + "github.com/ava-labs/avalanchego/tests/fixture" + "github.com/ava-labs/avalanchego/tests/fixture/testnet" + "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +// Env is used to access shared test fixture. Intended to be +// initialized from SynchronizedBeforeSuite. +var Env *TestEnvironment + +func InitSharedTestEnvironment(envBytes []byte) { + require := require.New(ginkgo.GinkgoT()) + require.Nil(Env, "env already initialized") + Env = &TestEnvironment{ + require: require, + } + require.NoError(json.Unmarshal(envBytes, Env)) +} + +type TestEnvironment struct { + // The directory where the test network configuration is stored + NetworkDir string + // URIs used to access the API endpoints of nodes of the network + URIs []testnet.NodeURI + // The URI used to access the http server that allocates test data + TestDataServerURI string + + require *require.Assertions +} + +func (te *TestEnvironment) Marshal() []byte { + bytes, err := json.Marshal(te) + require.NoError(ginkgo.GinkgoT(), err) + return bytes +} + +// Initialize a new test environment with a shared network (either pre-existing or newly created). +func NewTestEnvironment(flagVars *FlagVars) *TestEnvironment { + require := require.New(ginkgo.GinkgoT()) + + persistentNetworkDir := flagVars.PersistentNetworkDir() + + // Load or create a test network + var network *local.LocalNetwork + if len(persistentNetworkDir) > 0 { + tests.Outf("{{yellow}}Using a persistent network configured at %s{{/}}\n", persistentNetworkDir) + + var err error + network, err = local.ReadNetwork(persistentNetworkDir) + require.NoError(err) + } else { + network = StartLocalNetwork(flagVars.AvalancheGoExecPath(), DefaultNetworkDir) + } + + uris := network.GetURIs() + require.NotEmpty(uris, "network contains no nodes") + tests.Outf("{{green}}network URIs: {{/}} %+v\n", uris) + + testDataServerURI, err := fixture.ServeTestData(fixture.TestData{ + FundedKeys: network.FundedKeys, + }) + tests.Outf("{{green}}test data server URI: {{/}} %+v\n", testDataServerURI) + require.NoError(err) + + return &TestEnvironment{ + NetworkDir: network.Dir, + URIs: uris, + TestDataServerURI: testDataServerURI, + } +} + +// Retrieve a random URI to naively attempt to spread API load across +// nodes. +func (te *TestEnvironment) GetRandomNodeURI() testnet.NodeURI { + r := rand.New(rand.NewSource(time.Now().Unix())) //#nosec G404 + nodeURI := te.URIs[r.Intn(len(te.URIs))] + tests.Outf("{{blue}} targeting node %s with URI: %s{{/}}\n", nodeURI.NodeID, nodeURI.URI) + return nodeURI +} + +// Retrieve the network to target for testing. +func (te *TestEnvironment) GetNetwork() testnet.Network { + network, err := local.ReadNetwork(te.NetworkDir) + te.require.NoError(err) + return network +} + +// Retrieve the specified number of funded keys allocated for the caller's exclusive use. +func (te *TestEnvironment) AllocateFundedKeys(count int) []*secp256k1.PrivateKey { + keys, err := fixture.AllocateFundedKeys(te.TestDataServerURI, count) + te.require.NoError(err) + tests.Outf("{{blue}} allocated funded key(s): %+v{{/}}\n", keys) + return keys +} + +// Retrieve a funded key allocated for the caller's exclusive use. +func (te *TestEnvironment) AllocateFundedKey() *secp256k1.PrivateKey { + return te.AllocateFundedKeys(1)[0] +} + +// Create a new keychain with the specified number of test keys. +func (te *TestEnvironment) NewKeychain(count int) *secp256k1fx.Keychain { + keys := te.AllocateFundedKeys(count) + return secp256k1fx.NewKeychain(keys...) +} + +// Create a new private network that is not shared with other tests. +func (te *TestEnvironment) NewPrivateNetwork() testnet.Network { + // Load the shared network to retrieve its path and exec path + sharedNetwork, err := local.ReadNetwork(te.NetworkDir) + te.require.NoError(err) + + // The private networks dir is under the shared network dir to ensure it + // will be included in the artifact uploaded in CI. + privateNetworksDir := filepath.Join(sharedNetwork.Dir, PrivateNetworksDirName) + te.require.NoError(os.MkdirAll(privateNetworksDir, perms.ReadWriteExecute)) + + return StartLocalNetwork(sharedNetwork.ExecPath, privateNetworksDir) +} diff --git a/tests/fixture/e2e/flags.go b/tests/fixture/e2e/flags.go new file mode 100644 index 000000000000..c7838cb7c761 --- /dev/null +++ b/tests/fixture/e2e/flags.go @@ -0,0 +1,57 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package e2e + +import ( + "flag" + "fmt" + "os" + + "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" +) + +type FlagVars struct { + avalancheGoExecPath string + persistentNetworkDir string + usePersistentNetwork bool +} + +func (v *FlagVars) PersistentNetworkDir() string { + if v.usePersistentNetwork && len(v.persistentNetworkDir) == 0 { + return os.Getenv(local.NetworkDirEnvName) + } + return v.persistentNetworkDir +} + +func (v *FlagVars) AvalancheGoExecPath() string { + return v.avalancheGoExecPath +} + +func (v *FlagVars) UsePersistentNetwork() bool { + return v.usePersistentNetwork +} + +func RegisterFlags() *FlagVars { + vars := FlagVars{} + flag.StringVar( + &vars.avalancheGoExecPath, + "avalanchego-path", + os.Getenv(local.AvalancheGoPathEnvName), + fmt.Sprintf("avalanchego executable path (required if not using a persistent network). Also possible to configure via the %s env variable.", local.AvalancheGoPathEnvName), + ) + flag.StringVar( + &vars.persistentNetworkDir, + "network-dir", + "", + fmt.Sprintf("[optional] the dir containing the configuration of a persistent network to target for testing. Useful for speeding up test development. Also possible to configure via the %s env variable.", local.NetworkDirEnvName), + ) + flag.BoolVar( + &vars.usePersistentNetwork, + "use-persistent-network", + false, + "[optional] whether to target the persistent network identified by --network-dir.", + ) + + return &vars +} diff --git a/tests/e2e/e2e.go b/tests/fixture/e2e/helpers.go similarity index 69% rename from tests/e2e/e2e.go rename to tests/fixture/e2e/helpers.go index 130f33f1197c..15da611324e0 100644 --- a/tests/e2e/e2e.go +++ b/tests/fixture/e2e/helpers.go @@ -1,18 +1,14 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -// e2e implements the e2e tests. package e2e import ( "context" - "encoding/json" "errors" "fmt" "math/big" - "math/rand" "os" - "path/filepath" "strings" "time" @@ -26,11 +22,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/fixture" "github.com/ava-labs/avalanchego/tests/fixture/testnet" "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" @@ -68,75 +61,15 @@ const ( PrivateNetworksDirName = "private_networks" ) -// Env is used to access shared test fixture. Intended to be -// initialized by SynchronizedBeforeSuite. -var Env *TestEnvironment - -type TestEnvironment struct { - // The directory where the test network configuration is stored - NetworkDir string - // URIs used to access the API endpoints of nodes of the network - URIs []testnet.NodeURI - // The URI used to access the http server that allocates test data - TestDataServerURI string - - require *require.Assertions -} - -func InitTestEnvironment(envBytes []byte) { - require := require.New(ginkgo.GinkgoT()) - require.Nil(Env, "env already initialized") - Env = &TestEnvironment{ - require: require, - } - require.NoError(json.Unmarshal(envBytes, Env)) -} - -// Retrieve a random URI to naively attempt to spread API load across -// nodes. -func (te *TestEnvironment) GetRandomNodeURI() testnet.NodeURI { - r := rand.New(rand.NewSource(time.Now().Unix())) //#nosec G404 - nodeURI := te.URIs[r.Intn(len(te.URIs))] - tests.Outf("{{blue}} targeting node %s with URI: %s{{/}}\n", nodeURI.NodeID, nodeURI.URI) - return nodeURI -} - -// Retrieve the network to target for testing. -func (te *TestEnvironment) GetNetwork() testnet.Network { - network, err := local.ReadNetwork(te.NetworkDir) - te.require.NoError(err) - return network -} - -// Retrieve the specified number of funded keys allocated for the caller's exclusive use. -func (te *TestEnvironment) AllocateFundedKeys(count int) []*secp256k1.PrivateKey { - keys, err := fixture.AllocateFundedKeys(te.TestDataServerURI, count) - te.require.NoError(err) - tests.Outf("{{blue}} allocated funded key(s): %+v{{/}}\n", keys) - return keys -} - -// Retrieve a funded key allocated for the caller's exclusive use. -func (te *TestEnvironment) AllocateFundedKey() *secp256k1.PrivateKey { - return te.AllocateFundedKeys(1)[0] -} - -// Create a new keychain with the specified number of test keys. -func (te *TestEnvironment) NewKeychain(count int) *secp256k1fx.Keychain { - keys := te.AllocateFundedKeys(count) - return secp256k1fx.NewKeychain(keys...) -} - // Create a new wallet for the provided keychain against the specified node URI. -// TODO(marun) Make this a regular function. -func (te *TestEnvironment) NewWallet(keychain *secp256k1fx.Keychain, nodeURI testnet.NodeURI) primary.Wallet { +func NewWallet(keychain *secp256k1fx.Keychain, nodeURI testnet.NodeURI) primary.Wallet { tests.Outf("{{blue}} initializing a new wallet for node %s with URI: %s {{/}}\n", nodeURI.NodeID, nodeURI.URI) baseWallet, err := primary.MakeWallet(DefaultContext(), &primary.WalletConfig{ URI: nodeURI.URI, AVAXKeychain: keychain, EthKeychain: keychain, }) - te.require.NoError(err) + require.NoError(ginkgo.GinkgoT(), err) return primary.NewWalletWithOptions( baseWallet, common.WithPostIssuanceFunc( @@ -148,30 +81,15 @@ func (te *TestEnvironment) NewWallet(keychain *secp256k1fx.Keychain, nodeURI tes } // Create a new eth client targeting the specified node URI. -// TODO(marun) Make this a regular function. -func (te *TestEnvironment) NewEthClient(nodeURI testnet.NodeURI) ethclient.Client { +func NewEthClient(nodeURI testnet.NodeURI) ethclient.Client { tests.Outf("{{blue}} initializing a new eth client for node %s with URI: %s {{/}}\n", nodeURI.NodeID, nodeURI.URI) nodeAddress := strings.Split(nodeURI.URI, "//")[1] uri := fmt.Sprintf("ws://%s/ext/bc/C/ws", nodeAddress) client, err := ethclient.Dial(uri) - te.require.NoError(err) + require.NoError(ginkgo.GinkgoT(), err) return client } -// Create a new private network that is not shared with other tests. -func (te *TestEnvironment) NewPrivateNetwork() testnet.Network { - // Load the shared network to retrieve its path and exec path - sharedNetwork, err := local.ReadNetwork(te.NetworkDir) - te.require.NoError(err) - - // The private networks dir is under the shared network dir to ensure it - // will be included in the artifact uploaded in CI. - privateNetworksDir := filepath.Join(sharedNetwork.Dir, PrivateNetworksDirName) - te.require.NoError(os.MkdirAll(privateNetworksDir, perms.ReadWriteExecute)) - - return StartLocalNetwork(sharedNetwork.ExecPath, privateNetworksDir) -} - // Helper simplifying use of a timed context by canceling the context on ginkgo teardown. func ContextWithTimeout(duration time.Duration) context.Context { ctx, cancel := context.WithTimeout(context.Background(), duration) @@ -228,7 +146,10 @@ func AddEphemeralNode(network testnet.Network, flags testnet.FlagsMap) testnet.N // Wait for the given node to report healthy. func WaitForHealthy(node testnet.Node) { - require.NoError(ginkgo.GinkgoT(), testnet.WaitForHealthy(DefaultContext(), node)) + // Need to use explicit context (vs DefaultContext()) to support use with DeferCleanup + ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) + defer cancel() + require.NoError(ginkgo.GinkgoT(), testnet.WaitForHealthy(ctx, node)) } // Sends an eth transaction, waits for the transaction receipt to be issued @@ -277,12 +198,26 @@ func WithSuggestedGasPrice(ethClient ethclient.Client) common.Option { // Verify that a new node can bootstrap into the network. func CheckBootstrapIsPossible(network testnet.Network) { + require := require.New(ginkgo.GinkgoT()) + if len(os.Getenv(SkipBootstrapChecksEnvName)) > 0 { tests.Outf("{{yellow}}Skipping bootstrap check due to the %s env var being set", SkipBootstrapChecksEnvName) return } ginkgo.By("checking if bootstrap is possible with the current network state") - node := AddEphemeralNode(network, testnet.FlagsMap{}) + + // Call network.AddEphemeralNode instead of AddEphemeralNode to support + // checking for bootstrap implicitly on teardown via a function registered + // with ginkgo.DeferCleanup. It's not possible to call DeferCleanup from + // within a function called by DeferCleanup. + node, err := network.AddEphemeralNode(ginkgo.GinkgoWriter, testnet.FlagsMap{}) + require.NoError(err) + + defer func() { + tests.Outf("Shutting down ephemeral node %s\n", node.GetID()) + require.NoError(node.Stop()) + }() + WaitForHealthy(node) } diff --git a/tests/fixture/test_data_server_test.go b/tests/fixture/test_data_server_test.go index daef840528b5..979c927fea7f 100644 --- a/tests/fixture/test_data_server_test.go +++ b/tests/fixture/test_data_server_test.go @@ -17,10 +17,9 @@ import ( func TestAllocateFundedKeys(t *testing.T) { require := require.New(t) - factory := secp256k1.Factory{} keys := make([]*secp256k1.PrivateKey, 5) for i := range keys { - key, err := factory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() require.NoError(err) keys[i] = key } diff --git a/tests/fixture/testnet/local/network.go b/tests/fixture/testnet/local/network.go index fa3b3d22d742..836a1489c2dd 100644 --- a/tests/fixture/testnet/local/network.go +++ b/tests/fixture/testnet/local/network.go @@ -12,6 +12,7 @@ import ( "io/fs" "os" "path/filepath" + "strconv" "time" "github.com/ava-labs/avalanchego/config" @@ -65,7 +66,7 @@ func FindNextNetworkID(rootDir string) (uint32, string, error) { continue } - dirPath = filepath.Join(rootDir, fmt.Sprint(networkID)) + dirPath = filepath.Join(rootDir, strconv.FormatUint(uint64(networkID), 10)) err := os.Mkdir(dirPath, perms.ReadWriteExecute) if err == nil { return networkID, dirPath, nil @@ -253,10 +254,9 @@ func (ln *LocalNetwork) PopulateLocalNetworkConfig(networkID uint32, nodeCount i if keyCount > 0 { // Ensure there are keys for genesis generation to fund - factory := secp256k1.Factory{} keys := make([]*secp256k1.PrivateKey, 0, keyCount) for i := 0; i < keyCount; i++ { - key, err := factory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() if err != nil { return fmt.Errorf("failed to generate private key: %w", err) } @@ -304,7 +304,7 @@ func (ln *LocalNetwork) PopulateNodeConfig(node *LocalNode, nodeParentDir string }) // Convert the network id to a string to ensure consistency in JSON round-tripping. - flags[config.NetworkNameKey] = fmt.Sprintf("%d", ln.Genesis.NetworkID) + flags[config.NetworkNameKey] = strconv.FormatUint(uint64(ln.Genesis.NetworkID), 10) // Ensure keys are added if necessary if err := node.EnsureKeys(); err != nil { @@ -673,7 +673,19 @@ func (ln *LocalNetwork) AddLocalNode(w io.Writer, node *LocalNode, isEphemeral b if err := node.WriteConfig(); err != nil { return nil, err } - return node, node.Start(w, ln.ExecPath) + + err = node.Start(w, ln.ExecPath) + if err != nil { + // Attempt to stop an unhealthy node to provide some assurance to the caller + // that an error condition will not result in a lingering process. + stopErr := node.Stop() + if stopErr != nil { + err = errors.Join(err, stopErr) + } + return nil, err + } + + return node, nil } func (ln *LocalNetwork) GetBootstrapIPsAndIDs() ([]string, []string, error) { diff --git a/tests/upgrade/upgrade_test.go b/tests/upgrade/upgrade_test.go index 8a633fd6244a..b8f8147c831a 100644 --- a/tests/upgrade/upgrade_test.go +++ b/tests/upgrade/upgrade_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/config" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" ) func TestUpgrade(t *testing.T) { diff --git a/utils/beacon/set_test.go b/utils/beacon/set_test.go index 2dc240404988..3f4d6cbc4053 100644 --- a/utils/beacon/set_test.go +++ b/utils/beacon/set_test.go @@ -16,9 +16,9 @@ import ( func TestSet(t *testing.T) { require := require.New(t) - id0 := ids.NodeID{0} - id1 := ids.NodeID{1} - id2 := ids.NodeID{2} + id0 := ids.BuildTestNodeID([]byte{0}) + id1 := ids.BuildTestNodeID([]byte{1}) + id2 := ids.BuildTestNodeID([]byte{2}) ip0 := ips.IPPort{ IP: net.IPv4zero, diff --git a/utils/compare/compare.go b/utils/compare/compare.go deleted file mode 100644 index 13ec52f386cb..000000000000 --- a/utils/compare/compare.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package compare - -// Returns true iff the slices have the same elements, regardless of order. -func UnsortedEquals[T comparable](a, b []T) bool { - if len(a) != len(b) { - return false - } - m := make(map[T]int, len(a)) - for _, v := range a { - m[v]++ - } - for _, v := range b { - switch count := m[v]; count { - case 0: - // There were more instances of [v] in [b] than [a]. - return false - case 1: - delete(m, v) - default: - m[v] = count - 1 - } - } - return len(m) == 0 -} diff --git a/utils/compare/compare_test.go b/utils/compare/compare_test.go deleted file mode 100644 index e46bc838f72b..000000000000 --- a/utils/compare/compare_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package compare - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestUnsortedEquals(t *testing.T) { - require := require.New(t) - - require.True(UnsortedEquals([]int{}, []int{})) - require.True(UnsortedEquals(nil, []int{})) - require.True(UnsortedEquals([]int{}, nil)) - require.False(UnsortedEquals([]int{1}, nil)) - require.False(UnsortedEquals(nil, []int{1})) - require.True(UnsortedEquals([]int{1}, []int{1})) - require.False(UnsortedEquals([]int{1, 2}, []int{})) - require.False(UnsortedEquals([]int{1, 2}, []int{1})) - require.False(UnsortedEquals([]int{1}, []int{1, 2})) - require.True(UnsortedEquals([]int{2, 1}, []int{1, 2})) - require.True(UnsortedEquals([]int{1, 2}, []int{2, 1})) -} diff --git a/utils/crypto/bls/bls_benchmark_test.go b/utils/crypto/bls/bls_benchmark_test.go index a84cdadd80a5..cd3568005764 100644 --- a/utils/crypto/bls/bls_benchmark_test.go +++ b/utils/crypto/bls/bls_benchmark_test.go @@ -4,7 +4,7 @@ package bls import ( - "fmt" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -31,7 +31,7 @@ func BenchmarkSign(b *testing.B) { privateKey, err := NewSecretKey() require.NoError(b, err) for _, messageSize := range sizes { - b.Run(fmt.Sprintf("%d", messageSize), func(b *testing.B) { + b.Run(strconv.Itoa(messageSize), func(b *testing.B) { message := utils.RandomBytes(messageSize) b.ResetTimer() @@ -49,7 +49,7 @@ func BenchmarkVerify(b *testing.B) { publicKey := PublicFromSecretKey(privateKey) for _, messageSize := range sizes { - b.Run(fmt.Sprintf("%d", messageSize), func(b *testing.B) { + b.Run(strconv.Itoa(messageSize), func(b *testing.B) { message := utils.RandomBytes(messageSize) signature := Sign(privateKey, message) @@ -72,7 +72,7 @@ func BenchmarkAggregatePublicKeys(b *testing.B) { } for _, size := range sizes { - b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { + b.Run(strconv.Itoa(size), func(b *testing.B) { for n := 0; n < b.N; n++ { _, err := AggregatePublicKeys(keys[:size]) require.NoError(b, err) diff --git a/utils/crypto/ledger/ledger.go b/utils/crypto/ledger/ledger.go index 37de44fec4ea..5f8c34fe5715 100644 --- a/utils/crypto/ledger/ledger.go +++ b/utils/crypto/ledger/ledger.go @@ -8,6 +8,8 @@ import ( ledger "github.com/ava-labs/ledger-avalanche/go" + bip32 "github.com/tyler-smith/go-bip32" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/keychain" "github.com/ava-labs/avalanchego/utils/hashing" @@ -15,7 +17,7 @@ import ( ) const ( - rootPath = "m/44'/9000'/0'" + rootPath = "m/44'/9000'/0'" // BIP44: m / purpose' / coin_type' / account' ledgerBufferLimit = 8192 ledgerPathSize = 9 ) @@ -26,6 +28,7 @@ var _ keychain.Ledger = (*Ledger)(nil) // provides Avalanche-specific access. type Ledger struct { device *ledger.LedgerAvalanche + epk *bip32.Key } func New() (keychain.Ledger, error) { @@ -40,21 +43,37 @@ func addressPath(index uint32) string { } func (l *Ledger) Address(hrp string, addressIndex uint32) (ids.ShortID, error) { - _, hash, err := l.device.GetPubKey(addressPath(addressIndex), true, hrp, "") + resp, err := l.device.GetPubKey(addressPath(addressIndex), true, hrp, "") if err != nil { return ids.ShortEmpty, err } - return ids.ToShortID(hash) + return ids.ToShortID(resp.Hash) } func (l *Ledger) Addresses(addressIndices []uint32) ([]ids.ShortID, error) { + if l.epk == nil { + pk, chainCode, err := l.device.GetExtPubKey(rootPath, false, "", "") + if err != nil { + return nil, err + } + l.epk = &bip32.Key{ + Key: pk, + ChainCode: chainCode, + } + } + // derivation path rootPath/0 (BIP44 change level, when set to 0, known as external chain) + externalChain, err := l.epk.NewChildKey(0) + if err != nil { + return nil, err + } addresses := make([]ids.ShortID, len(addressIndices)) - for i, v := range addressIndices { - _, hash, err := l.device.GetPubKey(addressPath(v), false, "", "") + for i, addressIndex := range addressIndices { + // derivation path rootPath/0/v (BIP44 address index level) + address, err := externalChain.NewChildKey(addressIndex) if err != nil { return nil, err } - copy(addresses[i][:], hash) + copy(addresses[i][:], hashing.PubkeyBytesToAddress(address.Key)) } return addresses, nil } diff --git a/utils/crypto/ledger/ledger_test.go b/utils/crypto/ledger/ledger_test.go index 1ab163c95aa4..118dc8758d1b 100644 --- a/utils/crypto/ledger/ledger_test.go +++ b/utils/crypto/ledger/ledger_test.go @@ -18,8 +18,6 @@ const ( hrp = "fuji" ) -var factory secp256k1.Factory - // TestLedger will be skipped if a ledger is not connected. func TestLedger(t *testing.T) { require := require.New(t) @@ -66,7 +64,7 @@ func TestLedger(t *testing.T) { for i, addrIndex := range indices { sig := sigs[i] - pk, err := factory.RecoverHashPublicKey(rawHash, sig) + pk, err := secp256k1.RecoverPublicKeyFromHash(rawHash, sig) require.NoError(err) require.Equal(addresses[addrIndex], pk.Address()) } diff --git a/utils/crypto/secp256k1/rfc6979_test.go b/utils/crypto/secp256k1/rfc6979_test.go index d4c0a9c45191..5d9ee8b4f033 100644 --- a/utils/crypto/secp256k1/rfc6979_test.go +++ b/utils/crypto/secp256k1/rfc6979_test.go @@ -58,7 +58,6 @@ type test struct { } func TestRFC6979Compliance(t *testing.T) { - f := Factory{} for i, tt := range rfc6979Tests { t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { require := require.New(t) @@ -66,7 +65,7 @@ func TestRFC6979Compliance(t *testing.T) { skBytes, err := hex.DecodeString(tt.skHex) require.NoError(err) - sk, err := f.ToPrivateKey(skBytes) + sk, err := ToPrivateKey(skBytes) require.NoError(err) msgBytes := []byte(tt.msg) diff --git a/utils/crypto/secp256k1/secp256k1.go b/utils/crypto/secp256k1/secp256k1.go index 857921bf2ea5..022cce2861a0 100644 --- a/utils/crypto/secp256k1/secp256k1.go +++ b/utils/crypto/secp256k1/secp256k1.go @@ -54,16 +54,12 @@ var ( errMutatedSig = errors.New("signature was mutated from its original format") ) -type Factory struct { - Cache cache.LRU[ids.ID, *PublicKey] -} - -func (*Factory) NewPrivateKey() (*PrivateKey, error) { +func NewPrivateKey() (*PrivateKey, error) { k, err := secp256k1.GeneratePrivateKey() return &PrivateKey{sk: k}, err } -func (*Factory) ToPublicKey(b []byte) (*PublicKey, error) { +func ToPublicKey(b []byte) (*PublicKey, error) { if len(b) != PublicKeyLen { return nil, errInvalidPublicKeyLength } @@ -75,7 +71,7 @@ func (*Factory) ToPublicKey(b []byte) (*PublicKey, error) { }, err } -func (*Factory) ToPrivateKey(b []byte) (*PrivateKey, error) { +func ToPrivateKey(b []byte) (*PrivateKey, error) { if len(b) != PrivateKeyLen { return nil, errInvalidPrivateKeyLength } @@ -85,19 +81,11 @@ func (*Factory) ToPrivateKey(b []byte) (*PrivateKey, error) { }, nil } -func (f *Factory) RecoverPublicKey(msg, sig []byte) (*PublicKey, error) { - return f.RecoverHashPublicKey(hashing.ComputeHash256(msg), sig) +func RecoverPublicKey(msg, sig []byte) (*PublicKey, error) { + return RecoverPublicKeyFromHash(hashing.ComputeHash256(msg), sig) } -func (f *Factory) RecoverHashPublicKey(hash, sig []byte) (*PublicKey, error) { - cacheBytes := make([]byte, len(hash)+len(sig)) - copy(cacheBytes, hash) - copy(cacheBytes[len(hash):], sig) - id := hashing.ComputeHash256Array(cacheBytes) - if cachedPublicKey, ok := f.Cache.Get(id); ok { - return cachedPublicKey, nil - } - +func RecoverPublicKeyFromHash(hash, sig []byte) (*PublicKey, error) { if err := verifySECP256K1RSignatureFormat(sig); err != nil { return nil, err } @@ -116,9 +104,33 @@ func (f *Factory) RecoverHashPublicKey(hash, sig []byte) (*PublicKey, error) { return nil, errCompressed } - pubkey := &PublicKey{pk: rawPubkey} - f.Cache.Put(id, pubkey) - return pubkey, nil + return &PublicKey{pk: rawPubkey}, nil +} + +type RecoverCache struct { + cache.LRU[ids.ID, *PublicKey] +} + +func (r *RecoverCache) RecoverPublicKey(msg, sig []byte) (*PublicKey, error) { + return r.RecoverPublicKeyFromHash(hashing.ComputeHash256(msg), sig) +} + +func (r *RecoverCache) RecoverPublicKeyFromHash(hash, sig []byte) (*PublicKey, error) { + cacheBytes := make([]byte, len(hash)+len(sig)) + copy(cacheBytes, hash) + copy(cacheBytes[len(hash):], sig) + id := hashing.ComputeHash256Array(cacheBytes) + if cachedPublicKey, ok := r.Get(id); ok { + return cachedPublicKey, nil + } + + pubKey, err := RecoverPublicKeyFromHash(hash, sig) + if err != nil { + return nil, err + } + + r.Put(id, pubKey) + return pubKey, nil } type PublicKey struct { @@ -132,8 +144,7 @@ func (k *PublicKey) Verify(msg, sig []byte) bool { } func (k *PublicKey) VerifyHash(hash, sig []byte) bool { - factory := Factory{} - pk, err := factory.RecoverHashPublicKey(hash, sig) + pk, err := RecoverPublicKeyFromHash(hash, sig) if err != nil { return false } diff --git a/utils/crypto/secp256k1/secp256k1_benchmark_test.go b/utils/crypto/secp256k1/secp256k1_benchmark_test.go index b7f105b0dfe1..1d55f38f7d86 100644 --- a/utils/crypto/secp256k1/secp256k1_benchmark_test.go +++ b/utils/crypto/secp256k1/secp256k1_benchmark_test.go @@ -15,9 +15,7 @@ import ( func BenchmarkVerify(b *testing.B) { require := require.New(b) - f := &Factory{} - - privateKey, err := f.NewPrivateKey() + privateKey, err := NewPrivateKey() require.NoError(err) message := utils.RandomBytes(512) diff --git a/utils/crypto/secp256k1/secp256k1_test.go b/utils/crypto/secp256k1/secp256k1_test.go index 9a8dbdb89077..a2074dff5229 100644 --- a/utils/crypto/secp256k1/secp256k1_test.go +++ b/utils/crypto/secp256k1/secp256k1_test.go @@ -19,8 +19,7 @@ import ( func TestRecover(t *testing.T) { require := require.New(t) - f := Factory{} - key, err := f.NewPrivateKey() + key, err := NewPrivateKey() require.NoError(err) msg := []byte{1, 2, 3} @@ -28,38 +27,40 @@ func TestRecover(t *testing.T) { require.NoError(err) pub := key.PublicKey() - pubRec, err := f.RecoverPublicKey(msg, sig) + pubRec, err := RecoverPublicKey(msg, sig) require.NoError(err) require.Equal(pub, pubRec) + + require.True(pub.Verify(msg, sig)) } func TestCachedRecover(t *testing.T) { require := require.New(t) - f := Factory{Cache: cache.LRU[ids.ID, *PublicKey]{Size: 1}} - key, err := f.NewPrivateKey() + key, err := NewPrivateKey() require.NoError(err) msg := []byte{1, 2, 3} sig, err := key.Sign(msg) require.NoError(err) - pub1, err := f.RecoverPublicKey(msg, sig) + r := RecoverCache{LRU: cache.LRU[ids.ID, *PublicKey]{Size: 1}} + pub1, err := r.RecoverPublicKey(msg, sig) require.NoError(err) - pub2, err := f.RecoverPublicKey(msg, sig) + pub2, err := r.RecoverPublicKey(msg, sig) require.NoError(err) - require.Equal(pub1, pub2) + require.Equal(key.PublicKey(), pub1) + require.Equal(key.PublicKey(), pub2) } func TestExtensive(t *testing.T) { require := require.New(t) - f := Factory{} hash := hashing.ComputeHash256([]byte{1, 2, 3}) for i := 0; i < 1000; i++ { - key, err := f.NewPrivateKey() + key, err := NewPrivateKey() require.NoError(err) _, err = key.SignHash(hash) @@ -70,13 +71,12 @@ func TestExtensive(t *testing.T) { func TestGenRecreate(t *testing.T) { require := require.New(t) - f := Factory{} for i := 0; i < 1000; i++ { - sk, err := f.NewPrivateKey() + sk, err := NewPrivateKey() require.NoError(err) skBytes := sk.Bytes() - recoveredSk, err := f.ToPrivateKey(skBytes) + recoveredSk, err := ToPrivateKey(skBytes) require.NoError(err) require.Equal(sk.PublicKey(), recoveredSk.PublicKey()) @@ -86,8 +86,7 @@ func TestGenRecreate(t *testing.T) { func TestVerifyMutatedSignature(t *testing.T) { require := require.New(t) - f := Factory{} - sk, err := f.NewPrivateKey() + sk, err := NewPrivateKey() require.NoError(err) msg := []byte{'h', 'e', 'l', 'l', 'o'} @@ -100,15 +99,14 @@ func TestVerifyMutatedSignature(t *testing.T) { newSBytes := s.Bytes() copy(sig[32:], newSBytes[:]) - _, err = f.RecoverPublicKey(msg, sig) + _, err = RecoverPublicKey(msg, sig) require.ErrorIs(err, errMutatedSig) } func TestPrivateKeySECP256K1RUnmarshalJSON(t *testing.T) { require := require.New(t) - f := Factory{} - key, err := f.NewPrivateKey() + key, err := NewPrivateKey() require.NoError(err) keyJSON, err := key.MarshalJSON() @@ -239,13 +237,60 @@ func TestSigning(t *testing.T) { } } -func FuzzVerifySignature(f *testing.F) { - factory := Factory{} +func TestExportedMethods(t *testing.T) { + require := require.New(t) + + key := TestKeys()[0] + + pubKey := key.PublicKey() + require.Equal("111111111111111111116DBWJs", pubKey.addr.String()) + require.Equal("Q4MzFZZDPHRPAHFeDs3NiyyaZDvxHKivf", pubKey.Address().String()) + require.Equal("Q4MzFZZDPHRPAHFeDs3NiyyaZDvxHKivf", pubKey.addr.String()) + require.Equal("Q4MzFZZDPHRPAHFeDs3NiyyaZDvxHKivf", key.Address().String()) + + expectedPubKeyBytes := []byte{ + 0x03, 0x73, 0x93, 0x53, 0x47, 0x88, 0x44, 0x78, + 0xe4, 0x94, 0x5c, 0xd0, 0xfd, 0x94, 0x8e, 0xcf, + 0x08, 0x8b, 0x94, 0xdf, 0xc9, 0x20, 0x74, 0xf0, + 0xfb, 0x03, 0xda, 0x6f, 0x4d, 0xbc, 0x94, 0x35, + 0x7d, + } + require.Equal(expectedPubKeyBytes, pubKey.bytes) + + expectedPubKey, err := ToPublicKey(expectedPubKeyBytes) + require.NoError(err) + require.Equal(expectedPubKey.Address(), pubKey.Address()) + require.Equal(expectedPubKeyBytes, expectedPubKey.Bytes()) + expectedECDSAParams := struct { + X []byte + Y []byte + }{ + []byte{ + 0x73, 0x93, 0x53, 0x47, 0x88, 0x44, 0x78, 0xe4, + 0x94, 0x5c, 0xd0, 0xfd, 0x94, 0x8e, 0xcf, 0x08, + 0x8b, 0x94, 0xdf, 0xc9, 0x20, 0x74, 0xf0, 0xfb, + 0x03, 0xda, 0x6f, 0x4d, 0xbc, 0x94, 0x35, 0x7d, + }, + []byte{ + 0x78, 0xe7, 0x39, 0x45, 0x6c, 0x3b, 0xdb, 0x9e, + 0xe9, 0xb2, 0xa9, 0xf2, 0x84, 0xfa, 0x64, 0x32, + 0xd8, 0x4e, 0xf0, 0xfa, 0x3f, 0x82, 0xf5, 0x56, + 0x10, 0x40, 0x71, 0x7f, 0x1f, 0x5e, 0x8e, 0x27, + }, + } + require.Equal(expectedECDSAParams.X, pubKey.ToECDSA().X.Bytes()) + require.Equal(expectedECDSAParams.Y, pubKey.ToECDSA().Y.Bytes()) + + require.Equal(expectedECDSAParams.X, key.ToECDSA().X.Bytes()) + require.Equal(expectedECDSAParams.Y, key.ToECDSA().Y.Bytes()) +} + +func FuzzVerifySignature(f *testing.F) { f.Fuzz(func(t *testing.T, data []byte) { require := require.New(t) - privateKey, err := factory.NewPrivateKey() + privateKey, err := NewPrivateKey() require.NoError(err) publicKey := privateKey.PublicKey() @@ -253,9 +298,9 @@ func FuzzVerifySignature(f *testing.F) { sig, err := privateKey.Sign(data) require.NoError(err) - recoveredPublicKey, err := factory.RecoverPublicKey(data, sig) + recoveredPublicKey, err := RecoverPublicKey(data, sig) require.NoError(err) - require.Equal(publicKey.Bytes(), recoveredPublicKey.Bytes()) + require.Equal(publicKey, recoveredPublicKey) }) } diff --git a/utils/crypto/secp256k1/test_keys.go b/utils/crypto/secp256k1/test_keys.go index 09e484973519..3122f2617ddf 100644 --- a/utils/crypto/secp256k1/test_keys.go +++ b/utils/crypto/secp256k1/test_keys.go @@ -14,8 +14,7 @@ func TestKeys() []*PrivateKey { "ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN", "2RWLv6YVEXDiWLpaCbXhhqxtLbnFaKQsWPSSMSPhpWo47uJAeV", } - keys = make([]*PrivateKey, len(keyStrings)) - factory = Factory{} + keys = make([]*PrivateKey, len(keyStrings)) ) for i, key := range keyStrings { @@ -24,7 +23,7 @@ func TestKeys() []*PrivateKey { panic(err) } - keys[i], err = factory.ToPrivateKey(privKeyBytes) + keys[i], err = ToPrivateKey(privKeyBytes) if err != nil { panic(err) } diff --git a/utils/error.go b/utils/error.go new file mode 100644 index 000000000000..b58c60cd001a --- /dev/null +++ b/utils/error.go @@ -0,0 +1,13 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +func Err(errors ...error) error { + for _, err := range errors { + if err != nil { + return err + } + } + return nil +} diff --git a/utils/ips/ip_port.go b/utils/ips/ip_port.go index 472b5c372a2d..3ca5bfe176d4 100644 --- a/utils/ips/ip_port.go +++ b/utils/ips/ip_port.go @@ -62,7 +62,7 @@ func (ipPort IPPort) Equal(other IPPort) bool { } func (ipPort IPPort) String() string { - return net.JoinHostPort(ipPort.IP.String(), fmt.Sprintf("%d", ipPort.Port)) + return net.JoinHostPort(ipPort.IP.String(), strconv.FormatUint(uint64(ipPort.Port), 10)) } // IsZero returns if the IP or port is zeroed out diff --git a/utils/ips/ip_test.go b/utils/ips/ip_test.go index c3c569a8ae0a..30a72017e6da 100644 --- a/utils/ips/ip_test.go +++ b/utils/ips/ip_test.go @@ -5,7 +5,6 @@ package ips import ( "encoding/json" - "fmt" "net" "strconv" "testing" @@ -61,7 +60,7 @@ func TestIPPortEqual(t *testing.T) { }, } for i, tt := range tests { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { require := require.New(t) ipPort := IPDesc{} diff --git a/utils/linkedhashmap/linkedhashmap.go b/utils/linkedhashmap/linkedhashmap.go index 12e9569c3391..fa5a123a0942 100644 --- a/utils/linkedhashmap/linkedhashmap.go +++ b/utils/linkedhashmap/linkedhashmap.go @@ -17,7 +17,7 @@ var _ LinkedHashmap[int, struct{}] = (*linkedHashmap[int, struct{}])(nil) type Hashmap[K, V any] interface { Put(key K, val V) Get(key K) (val V, exists bool) - Delete(key K) + Delete(key K) (deleted bool) Len() int } @@ -63,11 +63,11 @@ func (lh *linkedHashmap[K, V]) Get(key K) (V, bool) { return lh.get(key) } -func (lh *linkedHashmap[K, V]) Delete(key K) { +func (lh *linkedHashmap[K, V]) Delete(key K) bool { lh.lock.Lock() defer lh.lock.Unlock() - lh.delete(key) + return lh.delete(key) } func (lh *linkedHashmap[K, V]) Len() int { @@ -114,11 +114,13 @@ func (lh *linkedHashmap[K, V]) get(key K) (V, bool) { return utils.Zero[V](), false } -func (lh *linkedHashmap[K, V]) delete(key K) { - if e, ok := lh.entryMap[key]; ok { +func (lh *linkedHashmap[K, V]) delete(key K) bool { + e, ok := lh.entryMap[key] + if ok { lh.entryList.Remove(e) delete(lh.entryMap, key) } + return ok } func (lh *linkedHashmap[K, V]) len() int { diff --git a/utils/linkedhashmap/linkedhashmap_test.go b/utils/linkedhashmap/linkedhashmap_test.go index 8bd7239ed5d9..0c95c30b24a8 100644 --- a/utils/linkedhashmap/linkedhashmap_test.go +++ b/utils/linkedhashmap/linkedhashmap_test.go @@ -62,7 +62,7 @@ func TestLinkedHashmap(t *testing.T) { require.Equal(key1, rkey1, "wrong key") require.Equal(1, val1, "wrong value") - lh.Delete(key0) + require.True(lh.Delete(key0)) require.Equal(1, lh.Len(), "wrong hashmap length") _, exists = lh.Get(key0) @@ -132,7 +132,7 @@ func TestIterator(t *testing.T) { // Should be empty require.False(iter.Next()) // Delete id1 - lh.Delete(id1) + require.True(lh.Delete(id1)) iter = lh.NewIterator() require.NotNil(iter) // Should immediately be exhausted @@ -169,8 +169,8 @@ func TestIterator(t *testing.T) { iter := lh.NewIterator() require.True(iter.Next()) require.True(iter.Next()) - lh.Delete(id1) - lh.Delete(id2) + require.True(lh.Delete(id1)) + require.True(lh.Delete(id2)) require.True(iter.Next()) require.Equal(id3, iter.Key()) require.Equal(3, iter.Value()) diff --git a/utils/metric/api_interceptor.go b/utils/metric/api_interceptor.go index 57810fce63b5..ab8e4fd8d70c 100644 --- a/utils/metric/api_interceptor.go +++ b/utils/metric/api_interceptor.go @@ -12,7 +12,7 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) type APIInterceptor interface { @@ -55,8 +55,7 @@ func NewAPIInterceptor(namespace string, registerer prometheus.Registerer) (APII []string{"method"}, ) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( registerer.Register(requestDurationCount), registerer.Register(requestDurationSum), registerer.Register(requestErrors), @@ -65,7 +64,7 @@ func NewAPIInterceptor(namespace string, registerer prometheus.Registerer) (APII requestDurationCount: requestDurationCount, requestDurationSum: requestDurationSum, requestErrors: requestErrors, - }, errs.Err + }, err } func (*apiInterceptor) InterceptRequest(i *rpc.RequestInfo) *http.Request { diff --git a/utils/resource/metrics.go b/utils/resource/metrics.go index 96c3c21ad204..e20458c42fb1 100644 --- a/utils/resource/metrics.go +++ b/utils/resource/metrics.go @@ -6,7 +6,7 @@ package resource import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) type metrics struct { @@ -60,13 +60,12 @@ func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, e []string{"processID"}, ), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( registerer.Register(m.numCPUCycles), registerer.Register(m.numDiskReads), registerer.Register(m.numDiskReadBytes), registerer.Register(m.numDiskWrites), registerer.Register(m.numDiskWritesBytes), ) - return m, errs.Err + return m, err } diff --git a/utils/sampler/rand_test.go b/utils/sampler/rand_test.go index b2ef3dfb0f60..362093a695ac 100644 --- a/utils/sampler/rand_test.go +++ b/utils/sampler/rand_test.go @@ -4,9 +4,9 @@ package sampler import ( - "fmt" "math" "math/rand" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -149,7 +149,7 @@ func TestRNG(t *testing.T) { }, } for i, test := range tests { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { require := require.New(t) source := &testSource{ diff --git a/utils/set/bits.go b/utils/set/bits.go index bf7f5f7b0e1f..344c8dff6781 100644 --- a/utils/set/bits.go +++ b/utils/set/bits.go @@ -4,7 +4,7 @@ package set import ( - "fmt" + "encoding/hex" "math/big" "math/bits" ) @@ -98,5 +98,5 @@ func BitsFromBytes(bytes []byte) Bits { // String returns the hex representation of this bitset func (b Bits) String() string { - return fmt.Sprintf("%x", b.bits.Bytes()) + return hex.EncodeToString(b.bits.Bytes()) } diff --git a/utils/sorting.go b/utils/sorting.go index a448b8f5e7ee..74f24abeb69f 100644 --- a/utils/sorting.go +++ b/utils/sorting.go @@ -87,16 +87,3 @@ func IsSortedAndUniqueByHash[T ~[]byte](s []T) bool { } return true } - -// Returns true iff the elements in [s] are unique. -func IsUnique[T comparable](s []T) bool { - // Can't use set.Set because it'd be a circular import. - asMap := make(map[T]struct{}, len(s)) - for _, elt := range s { - if _, ok := asMap[elt]; ok { - return false - } - asMap[elt] = struct{}{} - } - return true -} diff --git a/utils/sorting_test.go b/utils/sorting_test.go index 714fd7d87ec6..464959dd9588 100644 --- a/utils/sorting_test.go +++ b/utils/sorting_test.go @@ -104,31 +104,6 @@ func TestIsSortedAndUniqueSortable(t *testing.T) { require.False(IsSortedAndUnique(s)) } -func TestIsUnique(t *testing.T) { - require := require.New(t) - - var s []int - require.True(IsUnique(s)) - - s = []int{} - require.True(IsUnique(s)) - - s = []int{1} - require.True(IsUnique(s)) - - s = []int{1, 2} - require.True(IsUnique(s)) - - s = []int{1, 1} - require.False(IsUnique(s)) - - s = []int{2, 1} - require.True(IsUnique(s)) - - s = []int{1, 2, 1} - require.False(IsUnique(s)) -} - func TestSortByHash(t *testing.T) { require := require.New(t) diff --git a/utils/timer/adaptive_timeout_manager.go b/utils/timer/adaptive_timeout_manager.go index 95b284a48c5f..a6d00654c064 100644 --- a/utils/timer/adaptive_timeout_manager.go +++ b/utils/timer/adaptive_timeout_manager.go @@ -12,10 +12,10 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" ) var ( @@ -138,14 +138,13 @@ func NewAdaptiveTimeoutManager( tm.timer = NewTimer(tm.timeout) tm.averager = math.NewAverager(float64(config.InitialTimeout), config.TimeoutHalflife, tm.clock.Time()) - errs := &wrappers.Errs{} - errs.Add( + err := utils.Err( metricsRegister.Register(tm.networkTimeoutMetric), metricsRegister.Register(tm.avgLatency), metricsRegister.Register(tm.numTimeouts), metricsRegister.Register(tm.numPendingTimeouts), ) - return tm, errs.Err + return tm, err } func (tm *adaptiveTimeoutManager) TimeoutDuration() time.Duration { diff --git a/version/compatibility.json b/version/compatibility.json index 0d8382e21678..9a63fba3eafc 100644 --- a/version/compatibility.json +++ b/version/compatibility.json @@ -1,4 +1,7 @@ { + "30": [ + "v1.10.15" + ], "29": [ "v1.10.13", "v1.10.14" diff --git a/version/constants.go b/version/constants.go index 356e159d37ff..a0d9b32b3993 100644 --- a/version/constants.go +++ b/version/constants.go @@ -9,6 +9,7 @@ import ( _ "embed" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" ) @@ -16,7 +17,7 @@ const ( Client = "avalanchego" // RPCChainVMProtocol should be bumped anytime changes are made which require // the plugin vm to upgrade to latest avalanchego release to be compatible. - RPCChainVMProtocol uint = 29 + RPCChainVMProtocol uint = 30 ) // These are globals that describe network upgrades and node versions @@ -24,7 +25,7 @@ var ( Current = &Semantic{ Major: 1, Minor: 10, - Patch: 14, + Patch: 15, } CurrentApp = &Application{ Name: Client, @@ -81,7 +82,6 @@ var ( constants.MainnetID: 793005, constants.FujiID: 47437, } - ApricotPhase4DefaultMinPChainHeight uint64 ApricotPhase5Times = map[uint32]time.Time{ constants.MainnetID: time.Date(2021, time.December, 2, 18, 0, 0, 0, time.UTC), @@ -102,6 +102,7 @@ var ( constants.MainnetID: time.Date(2023, time.April, 25, 15, 0, 0, 0, time.UTC), constants.FujiID: time.Date(2023, time.April, 6, 15, 0, 0, 0, time.UTC), } + CortinaXChainStopVertexID map[uint32]ids.ID // TODO: update this before release DTimes = map[uint32]time.Time{ @@ -129,6 +130,29 @@ func init() { } RPCChainVMProtocolCompatibility[rpcChainVMProtocol] = versions } + + // The mainnet stop vertex is well known. It can be verified on any fully + // synced node by looking at the parentID of the genesis block. + // + // Ref: https://subnets.avax.network/x-chain/block/0 + mainnetXChainStopVertexID, err := ids.FromString("jrGWDh5Po9FMj54depyunNixpia5PN4aAYxfmNzU8n752Rjga") + if err != nil { + panic(err) + } + + // The fuji stop vertex is well known. It can be verified on any fully + // synced node by looking at the parentID of the genesis block. + // + // Ref: https://subnets-test.avax.network/x-chain/block/0 + fujiXChainStopVertexID, err := ids.FromString("2D1cmbiG36BqQMRyHt4kFhWarmatA1ighSpND3FeFgz3vFVtCZ") + if err != nil { + panic(err) + } + + CortinaXChainStopVertexID = map[uint32]ids.ID{ + constants.MainnetID: mainnetXChainStopVertexID, + constants.FujiID: fujiXChainStopVertexID, + } } func GetApricotPhase3Time(networkID uint32) time.Time { @@ -145,13 +169,6 @@ func GetApricotPhase4Time(networkID uint32) time.Time { return DefaultUpgradeTime } -func GetApricotPhase4MinPChainHeight(networkID uint32) uint64 { - if minHeight, exists := ApricotPhase4MinPChainHeight[networkID]; exists { - return minHeight - } - return ApricotPhase4DefaultMinPChainHeight -} - func GetApricotPhase5Time(networkID uint32) time.Time { if upgradeTime, exists := ApricotPhase5Times[networkID]; exists { return upgradeTime diff --git a/vms/avm/block/builder/builder_test.go b/vms/avm/block/builder/builder_test.go index 244b5db71561..fdab9d6cf064 100644 --- a/vms/avm/block/builder/builder_test.go +++ b/vms/avm/block/builder/builder_test.go @@ -16,7 +16,7 @@ import ( "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -26,7 +26,6 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/metrics" @@ -525,8 +524,7 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { Codec: parser.Codec(), } - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - baseDB := versiondb.New(baseDBManager.Current().Database) + baseDB := versiondb.New(memdb.New()) state, err := states.New(baseDB, parser, registerer, trackChecksums) require.NoError(err) diff --git a/vms/avm/block/executor/manager.go b/vms/avm/block/executor/manager.go index aa99ede6392f..dd9b8bfab400 100644 --- a/vms/avm/block/executor/manager.go +++ b/vms/avm/block/executor/manager.go @@ -39,8 +39,8 @@ type Manager interface { GetStatelessBlock(blkID ids.ID) (block.Block, error) NewBlock(block.Block) snowman.Block - // VerifyTx verifies that the transaction can be issued based on the - // currently preferred state. + // VerifyTx verifies that the transaction can be issued based on the currently + // preferred state. This should *not* be used to verify transactions in a block. VerifyTx(tx *txs.Tx) error // VerifyUniqueInputs verifies that the inputs are not duplicated in the diff --git a/vms/avm/block/parser.go b/vms/avm/block/parser.go index 4cdab44f25e2..230568149b6d 100644 --- a/vms/avm/block/parser.go +++ b/vms/avm/block/parser.go @@ -8,9 +8,9 @@ import ( "reflect" "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" ) @@ -42,14 +42,13 @@ func NewParser(fxs []fxs.Fx) (Parser, error) { c := p.CodecRegistry() gc := p.GenesisCodecRegistry() - errs := wrappers.Errs{} - errs.Add( + err = utils.Err( c.RegisterType(&StandardBlock{}), gc.RegisterType(&StandardBlock{}), ) return &parser{ Parser: p, - }, errs.Err + }, err } func NewCustomParser( @@ -65,14 +64,13 @@ func NewCustomParser( c := p.CodecRegistry() gc := p.GenesisCodecRegistry() - errs := wrappers.Errs{} - errs.Add( + err = utils.Err( c.RegisterType(&StandardBlock{}), gc.RegisterType(&StandardBlock{}), ) return &parser{ Parser: p, - }, errs.Err + }, err } func (p *parser) ParseBlock(bytes []byte) (Block, error) { diff --git a/vms/avm/environment_test.go b/vms/avm/environment_test.go index a75fb315ccc9..e1e9e29f630e 100644 --- a/vms/avm/environment_test.go +++ b/vms/avm/environment_test.go @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/avalanchego/api/keystore" "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -28,8 +28,8 @@ import ( "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/sampler" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/avm/block/executor" "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" @@ -77,15 +77,13 @@ var ( ) func init() { - factory := secp256k1.Factory{} - for _, key := range []string{ "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", } { keyBytes, _ := cb58.Decode(key) - pk, _ := factory.ToPrivateKey(keyBytes) + pk, _ := secp256k1.ToPrivateKey(keyBytes) keys = append(keys, pk) addrs = append(addrs, pk.PublicKey().Address()) } @@ -135,17 +133,15 @@ func setup(tb testing.TB, c *envConfig) *environment { genesisBytes := buildGenesisTestWithArgs(tb, genesisArgs) ctx := newContext(tb) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) + baseDB := memdb.New() + m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) // NB: this lock is intentionally left locked when this function returns. // The caller of this function is responsible for unlocking. ctx.Lock.Lock() - userKeystore, err := keystore.CreateTestKeystore() - require.NoError(err) + userKeystore := keystore.New(logging.NoLog{}, memdb.New()) ctx.Keystore = userKeystore.NewBlockchainKeyStore(ctx.ChainID) for _, user := range c.keystoreUsers { @@ -183,7 +179,7 @@ func setup(tb testing.TB, c *envConfig) *environment { require.NoError(vm.Initialize( context.Background(), ctx, - baseDBManager.NewPrefixDBManager([]byte{1}), + prefixdb.New([]byte{1}, baseDB), genesisBytes, nil, configBytes, diff --git a/vms/avm/network/network.go b/vms/avm/network/network.go index 1c3e7e5558ae..a1d3337ddb23 100644 --- a/vms/avm/network/network.go +++ b/vms/avm/network/network.go @@ -180,8 +180,6 @@ func (n *network) issueTx(tx *txs.Tx) error { } func (n *network) gossipTx(ctx context.Context, txID ids.ID, msgBytes []byte) { - // This lock is just to ensure there isn't racy behavior between checking if - // the tx was gossiped and marking the tx as gossiped. n.recentTxsLock.Lock() _, has := n.recentTxs.Get(txID) n.recentTxs.Put(txID, struct{}{}) diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index 15ebeba2c954..67a92a663879 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -1776,8 +1776,7 @@ func TestImportExportKey(t *testing.T) { env.vm.ctx.Lock.Unlock() }() - factory := secp256k1.Factory{} - sk, err := factory.NewPrivateKey() + sk, err := secp256k1.NewPrivateKey() require.NoError(err) importArgs := &ImportKeyArgs{ @@ -1821,8 +1820,7 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { env.vm.ctx.Lock.Unlock() }() - factory := secp256k1.Factory{} - sk, err := factory.NewPrivateKey() + sk, err := secp256k1.NewPrivateKey() require.NoError(err) args := ImportKeyArgs{ UserPass: api.UserPass{ diff --git a/vms/avm/states/state.go b/vms/avm/states/state.go index bf1ac3cde471..1167cdb37dce 100644 --- a/vms/avm/states/state.go +++ b/vms/avm/states/state.go @@ -22,9 +22,9 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -522,8 +522,7 @@ func (s *state) CommitBatch() (database.Batch, error) { } func (s *state) Close() error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( s.utxoDB.Close(), s.statusDB.Close(), s.txDB.Close(), @@ -532,19 +531,16 @@ func (s *state) Close() error { s.singletonDB.Close(), s.db.Close(), ) - return errs.Err } func (s *state) write() error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( s.writeUTXOs(), s.writeTxs(), s.writeBlockIDs(), s.writeBlocks(), s.writeMetadata(), ) - return errs.Err } func (s *state) writeUTXOs() error { @@ -691,15 +687,14 @@ func (s *state) Prune(lock sync.Locker, log logging.Logger) error { // attempt to commit to disk while a block is concurrently being // accepted. lock.Lock() - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( s.Commit(), statusIter.Error(), txIter.Error(), ) lock.Unlock() - if errs.Errored() { - return errs.Err + if err != nil { + return err } // We release the iterators here to allow the underlying database to @@ -751,8 +746,7 @@ func (s *state) Prune(lock sync.Locker, log logging.Logger) error { lock.Lock() defer lock.Unlock() - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( s.Commit(), statusIter.Error(), txIter.Error(), @@ -769,7 +763,7 @@ func (s *state) Prune(lock sync.Locker, log logging.Logger) error { zap.Duration("duration", time.Since(startTime)), ) - return errs.Err + return err } // Assumes [lock] is unlocked. @@ -891,12 +885,10 @@ func (s *state) initTxChecksum() error { return errStatusWithoutTx } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( txIt.Error(), statusIt.Error(), ) - return errs.Err } func (s *state) updateTxChecksum(modifiedID ids.ID) { diff --git a/vms/avm/txs/executor/semantic_verifier_test.go b/vms/avm/txs/executor/semantic_verifier_test.go index f73fb47a9a9b..72638762c39b 100644 --- a/vms/avm/txs/executor/semantic_verifier_test.go +++ b/vms/avm/txs/executor/semantic_verifier_test.go @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" @@ -21,7 +21,6 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/states" "github.com/ava-labs/avalanchego/vms/avm/txs" @@ -882,8 +881,7 @@ func TestSemanticVerifierImportTx(t *testing.T) { validatorState.EXPECT().GetSubnetID(gomock.Any(), ctx.CChainID).AnyTimes().Return(ctx.SubnetID, nil) ctx.ValidatorState = validatorState - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) + m := atomic.NewMemory(prefixdb.New([]byte{0}, memdb.New())) ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) typeToFxIndex := make(map[reflect.Type]int) diff --git a/vms/avm/txs/mempool/mempool.go b/vms/avm/txs/mempool/mempool.go index b64002e8f39d..84a3583781ef 100644 --- a/vms/avm/txs/mempool/mempool.go +++ b/vms/avm/txs/mempool/mempool.go @@ -48,8 +48,7 @@ type Mempool interface { Get(txID ids.ID) *txs.Tx Remove(txs []*txs.Tx) - // Peek returns the next first tx that was added to the mempool whose size - // is less than or equal to maxTxSize. + // Peek returns the first tx in the mempool whose size is <= [maxTxSize]. Peek(maxTxSize int) *txs.Tx // RequestBuildBlock notifies the consensus engine that a block should be @@ -162,23 +161,20 @@ func (m *mempool) Has(txID ids.ID) bool { } func (m *mempool) Get(txID ids.ID) *txs.Tx { - unissuedTxs, _ := m.unissuedTxs.Get(txID) - return unissuedTxs + tx, _ := m.unissuedTxs.Get(txID) + return tx } func (m *mempool) Remove(txsToRemove []*txs.Tx) { for _, tx := range txsToRemove { txID := tx.ID() - if _, ok := m.unissuedTxs.Get(txID); !ok { - // If tx isn't in the mempool, there is nothing to do. + if !m.unissuedTxs.Delete(txID) { continue } - txBytes := tx.Bytes() - m.bytesAvailable += len(txBytes) + m.bytesAvailable += len(tx.Bytes()) m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) - m.unissuedTxs.Delete(txID) m.numTxs.Dec() inputs := tx.Unsigned.InputIDs() diff --git a/vms/avm/txs/parser.go b/vms/avm/txs/parser.go index f5f16d59432e..def42dfed501 100644 --- a/vms/avm/txs/parser.go +++ b/vms/avm/txs/parser.go @@ -11,9 +11,9 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/codec/reflectcodec" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/avm/fxs" ) @@ -64,8 +64,7 @@ func NewCustomParser( gcm := codec.NewManager(math.MaxInt32) cm := codec.NewDefaultManager() - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( c.RegisterType(&BaseTx{}), c.RegisterType(&CreateAssetTx{}), c.RegisterType(&OperationTx{}), @@ -80,8 +79,8 @@ func NewCustomParser( gc.RegisterType(&ExportTx{}), gcm.RegisterCodec(CodecVersion, gc), ) - if errs.Errored() { - return nil, errs.Err + if err != nil { + return nil, err } vm := &fxVM{ diff --git a/vms/avm/vm.go b/vms/avm/vm.go index 50bc646580a0..cae4514ff3a6 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -20,7 +20,6 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/pubsub" @@ -29,11 +28,11 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/linkedhashmap" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/config" @@ -144,7 +143,7 @@ type Config struct { func (vm *VM) Initialize( _ context.Context, ctx *snow.Context, - dbManager manager.Manager, + db database.Database, genesisBytes []byte, _ []byte, configBytes []byte, @@ -181,7 +180,6 @@ func (vm *VM) Initialize( vm.AddressManager = avax.NewAddressManager(ctx) vm.Aliaser = ids.NewAliaser() - db := dbManager.Current().Database vm.ctx = ctx vm.appSender = appSender vm.baseDB = db @@ -308,12 +306,10 @@ func (vm *VM) Shutdown(context.Context) error { return nil } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( vm.state.Close(), vm.baseDB.Close(), ) - return errs.Err } func (*VM) Version(context.Context) (string, error) { diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index 10a8ed50d051..3b4dc9558807 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -13,12 +13,11 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" @@ -42,14 +41,14 @@ func TestInvalidGenesis(t *testing.T) { err := vm.Initialize( context.Background(), - ctx, // context - manager.NewMemDB(version.Semantic1_0_0), // dbManager - nil, // genesisState - nil, // upgradeBytes - nil, // configBytes - make(chan common.Message, 1), // engineMessenger - nil, // fxs - nil, // AppSender + ctx, // context + memdb.New(), // database + nil, // genesisState + nil, // upgradeBytes + nil, // configBytes + make(chan common.Message, 1), // engineMessenger + nil, // fxs + nil, // AppSender ) require.ErrorIs(err, codec.ErrCantUnpackVersion) } @@ -68,12 +67,12 @@ func TestInvalidFx(t *testing.T) { genesisBytes := buildGenesisTest(t) err := vm.Initialize( context.Background(), - ctx, // context - manager.NewMemDB(version.Semantic1_0_0), // dbManager - genesisBytes, // genesisState - nil, // upgradeBytes - nil, // configBytes - make(chan common.Message, 1), // engineMessenger + ctx, // context + memdb.New(), // database + genesisBytes, // genesisState + nil, // upgradeBytes + nil, // configBytes + make(chan common.Message, 1), // engineMessenger []*common.Fx{ // fxs nil, }, @@ -96,12 +95,12 @@ func TestFxInitializationFailure(t *testing.T) { genesisBytes := buildGenesisTest(t) err := vm.Initialize( context.Background(), - ctx, // context - manager.NewMemDB(version.Semantic1_0_0), // dbManager - genesisBytes, // genesisState - nil, // upgradeBytes - nil, // configBytes - make(chan common.Message, 1), // engineMessenger + ctx, // context + memdb.New(), // database + genesisBytes, // genesisState + nil, // upgradeBytes + nil, // configBytes + make(chan common.Message, 1), // engineMessenger []*common.Fx{{ // fxs ID: ids.Empty, Fx: &FxTest{ diff --git a/vms/components/keystore/codec.go b/vms/components/keystore/codec.go index 6e547c9e4f86..5acb1725aa6e 100644 --- a/vms/components/keystore/codec.go +++ b/vms/components/keystore/codec.go @@ -8,7 +8,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) const ( @@ -28,12 +28,11 @@ func init() { lc := linearcodec.NewCustomMaxLength(math.MaxUint32) LegacyCodec = codec.NewManager(math.MaxInt32) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( Codec.RegisterCodec(CodecVersion, c), LegacyCodec.RegisterCodec(CodecVersion, lc), ) - if errs.Errored() { - panic(errs.Err) + if err != nil { + panic(err) } } diff --git a/vms/components/keystore/user.go b/vms/components/keystore/user.go index 17c95c943555..561e2f52b819 100644 --- a/vms/components/keystore/user.go +++ b/vms/components/keystore/user.go @@ -43,8 +43,7 @@ type User interface { } type user struct { - factory secp256k1.Factory - db *encdb.Database + db *encdb.Database } // NewUserFromKeystore tracks a keystore user from the provided keystore @@ -125,7 +124,7 @@ func (u *user) GetKey(address ids.ShortID) (*secp256k1.PrivateKey, error) { if err != nil { return nil, err } - return u.factory.ToPrivateKey(bytes) + return secp256k1.ToPrivateKey(bytes) } func (u *user) Close() error { @@ -143,11 +142,9 @@ func NewKey(u User) (*secp256k1.PrivateKey, error) { // Create and store [numKeys] new keys that will be controlled by this user. func NewKeys(u User, numKeys int) ([]*secp256k1.PrivateKey, error) { - factory := secp256k1.Factory{} - keys := make([]*secp256k1.PrivateKey, numKeys) for i := range keys { - sk, err := factory.NewPrivateKey() + sk, err := secp256k1.NewPrivateKey() if err != nil { return nil, err } diff --git a/vms/components/keystore/user_test.go b/vms/components/keystore/user_test.go index a06c13a340a1..9f94cf03b7c6 100644 --- a/vms/components/keystore/user_test.go +++ b/vms/components/keystore/user_test.go @@ -37,8 +37,7 @@ func TestUserClosedDB(t *testing.T) { _, err = GetKeychain(u, nil) require.ErrorIs(err, database.ErrClosed) - factory := secp256k1.Factory{} - sk, err := factory.NewPrivateKey() + sk, err := secp256k1.NewPrivateKey() require.NoError(err) err = u.PutKeys(sk) @@ -57,8 +56,7 @@ func TestUser(t *testing.T) { require.NoError(err) require.Empty(addresses, "new user shouldn't have address") - factory := secp256k1.Factory{} - sk, err := factory.NewPrivateKey() + sk, err := secp256k1.NewPrivateKey() require.NoError(err) require.NoError(u.PutKeys(sk)) diff --git a/vms/components/message/codec.go b/vms/components/message/codec.go index d41de9b2dd71..3a5eee5416ca 100644 --- a/vms/components/message/codec.go +++ b/vms/components/message/codec.go @@ -6,8 +6,8 @@ package message import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" ) const ( @@ -23,12 +23,11 @@ func init() { c = codec.NewManager(maxMessageSize) lc := linearcodec.NewCustomMaxLength(maxSliceLen) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( lc.RegisterType(&Tx{}), c.RegisterCodec(codecVersion, lc), ) - if errs.Errored() { - panic(errs.Err) + if err != nil { + panic(err) } } diff --git a/vms/example/xsvm/execute/tx.go b/vms/example/xsvm/execute/tx.go index 6c8276af8e79..01bfc1fb7d6d 100644 --- a/vms/example/xsvm/execute/tx.go +++ b/vms/example/xsvm/execute/tx.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/example/xsvm/state" @@ -55,14 +56,12 @@ func (t *Tx) Transfer(tf *tx.Transfer) error { return errWrongChainID } - var errs wrappers.Errs - errs.Add( + return utils.Err( state.IncrementNonce(t.Database, t.Sender, tf.Nonce), state.DecreaseBalance(t.Database, t.Sender, tf.ChainID, t.TransferFee), state.DecreaseBalance(t.Database, t.Sender, tf.AssetID, tf.Amount), state.IncreaseBalance(t.Database, tf.To, tf.AssetID, tf.Amount), ) - return errs.Err } func (t *Tx) Export(e *tx.Export) error { diff --git a/vms/example/xsvm/tx/codec.go b/vms/example/xsvm/tx/codec.go index aa8ce5738415..c91a2165f1f6 100644 --- a/vms/example/xsvm/tx/codec.go +++ b/vms/example/xsvm/tx/codec.go @@ -8,7 +8,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) // Version is the current default codec version @@ -20,14 +20,13 @@ func init() { c := linearcodec.NewCustomMaxLength(math.MaxInt32) Codec = codec.NewManager(math.MaxInt32) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( c.RegisterType(&Transfer{}), c.RegisterType(&Export{}), c.RegisterType(&Import{}), Codec.RegisterCodec(Version, c), ) - if errs.Errored() { - panic(errs.Err) + if err != nil { + panic(err) } } diff --git a/vms/example/xsvm/tx/tx.go b/vms/example/xsvm/tx/tx.go index 5ada3bf79129..fae58bae0806 100644 --- a/vms/example/xsvm/tx/tx.go +++ b/vms/example/xsvm/tx/tx.go @@ -10,8 +10,8 @@ import ( "github.com/ava-labs/avalanchego/utils/hashing" ) -var secp256k1r = secp256k1.Factory{ - Cache: cache.LRU[ids.ID, *secp256k1.PublicKey]{ +var secpCache = secp256k1.RecoverCache{ + LRU: cache.LRU[ids.ID, *secp256k1.PublicKey]{ Size: 2048, }, } @@ -56,7 +56,7 @@ func (tx *Tx) SenderID() (ids.ShortID, error) { return ids.ShortEmpty, err } - pk, err := secp256k1r.RecoverPublicKey(unsignedBytes, tx.Signature[:]) + pk, err := secpCache.RecoverPublicKey(unsignedBytes, tx.Signature[:]) if err != nil { return ids.ShortEmpty, err } diff --git a/vms/example/xsvm/vm.go b/vms/example/xsvm/vm.go index f164787e9972..3150fc59b481 100644 --- a/vms/example/xsvm/vm.go +++ b/vms/example/xsvm/vm.go @@ -13,7 +13,6 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -52,7 +51,7 @@ type VM struct { func (vm *VM) Initialize( _ context.Context, chainContext *snow.Context, - dbManager manager.Manager, + db database.Database, genesisBytes []byte, _ []byte, _ []byte, @@ -67,8 +66,7 @@ func (vm *VM) Initialize( ) vm.chainContext = chainContext - vm.db = dbManager.Current().Database - + vm.db = db g, err := genesis.Parse(genesisBytes) if err != nil { return fmt.Errorf("failed to parse genesis bytes: %w", err) diff --git a/vms/metervm/block_vm.go b/vms/metervm/block_vm.go index 89200c3561cf..8f9fee1e247d 100644 --- a/vms/metervm/block_vm.go +++ b/vms/metervm/block_vm.go @@ -9,7 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/api/metrics" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" @@ -50,7 +50,7 @@ func NewBlockVM(vm block.ChainVM) block.ChainVM { func (vm *blockVM) Initialize( ctx context.Context, chainCtx *snow.Context, - db manager.Manager, + db database.Database, genesisBytes, upgradeBytes, configBytes []byte, diff --git a/vms/metervm/vertex_vm.go b/vms/metervm/vertex_vm.go index aa8a0e71d3d5..827bb535fcbd 100644 --- a/vms/metervm/vertex_vm.go +++ b/vms/metervm/vertex_vm.go @@ -9,7 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/api/metrics" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" @@ -37,7 +37,7 @@ type vertexVM struct { func (vm *vertexVM) Initialize( ctx context.Context, chainCtx *snow.Context, - db manager.Manager, + db database.Database, genesisBytes, upgradeBytes, configBytes []byte, diff --git a/vms/nftfx/fx.go b/vms/nftfx/fx.go index d11e47e42be1..f56ffcc10e5d 100644 --- a/vms/nftfx/fx.go +++ b/vms/nftfx/fx.go @@ -7,7 +7,7 @@ import ( "bytes" "errors" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -34,15 +34,13 @@ func (fx *Fx) Initialize(vmIntf interface{}) error { log.Debug("initializing nft fx") c := fx.VM.CodecRegistry() - errs := wrappers.Errs{} - errs.Add( + return utils.Err( c.RegisterType(&MintOutput{}), c.RegisterType(&TransferOutput{}), c.RegisterType(&MintOperation{}), c.RegisterType(&TransferOperation{}), c.RegisterType(&Credential{}), ) - return errs.Err } func (fx *Fx) VerifyOperation(txIntf, opIntf, credIntf interface{}, utxosIntf []interface{}) error { diff --git a/vms/platformvm/api/static_service.go b/vms/platformvm/api/static_service.go index d06c2e757962..1cf2fbe29096 100644 --- a/vms/platformvm/api/static_service.go +++ b/vms/platformvm/api/static_service.go @@ -98,6 +98,9 @@ type Staker struct { StakeAmount *json.Uint64 `json:"stakeAmount,omitempty"` } +// GenesisValidator should to be used for genesis validators only. +type GenesisValidator Staker + // Owner is the repr. of a reward owner sent over APIs. type Owner struct { Locktime json.Uint64 `json:"locktime"` @@ -132,6 +135,15 @@ type PermissionlessValidator struct { Delegators *[]PrimaryDelegator `json:"delegators,omitempty"` } +// GenesisPermissionlessValidator should to be used for genesis validators only. +type GenesisPermissionlessValidator struct { + GenesisValidator + RewardOwner *Owner `json:"rewardOwner,omitempty"` + DelegationFee json.Float32 `json:"delegationFee"` + ExactDelegationFee *json.Uint32 `json:"exactDelegationFee,omitempty"` + Staked []UTXO `json:"staked,omitempty"` +} + // PermissionedValidator is the repr. of a permissioned validator sent over APIs. type PermissionedValidator struct { Staker @@ -170,15 +182,15 @@ type Chain struct { // [Chains] are the chains that exist at genesis. // [Time] is the Platform Chain's time at network genesis. type BuildGenesisArgs struct { - AvaxAssetID ids.ID `json:"avaxAssetID"` - NetworkID json.Uint32 `json:"networkID"` - UTXOs []UTXO `json:"utxos"` - Validators []PermissionlessValidator `json:"validators"` - Chains []Chain `json:"chains"` - Time json.Uint64 `json:"time"` - InitialSupply json.Uint64 `json:"initialSupply"` - Message string `json:"message"` - Encoding formatting.Encoding `json:"encoding"` + AvaxAssetID ids.ID `json:"avaxAssetID"` + NetworkID json.Uint32 `json:"networkID"` + UTXOs []UTXO `json:"utxos"` + Validators []GenesisPermissionlessValidator `json:"validators"` + Chains []Chain `json:"chains"` + Time json.Uint64 `json:"time"` + InitialSupply json.Uint64 `json:"initialSupply"` + Message string `json:"message"` + Encoding formatting.Encoding `json:"encoding"` } // BuildGenesisReply is the reply from BuildGenesis diff --git a/vms/platformvm/api/static_service_test.go b/vms/platformvm/api/static_service_test.go index 49822d9679d4..8bcbf4e766db 100644 --- a/vms/platformvm/api/static_service_test.go +++ b/vms/platformvm/api/static_service_test.go @@ -18,7 +18,7 @@ import ( func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1, 2, 3} + nodeID := ids.BuildTestNodeID([]byte{1, 2, 3}) addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) @@ -27,8 +27,8 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { Amount: 0, } weight := json.Uint64(987654321) - validator := PermissionlessValidator{ - Staker: Staker{ + validator := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ EndTime: 15, Weight: weight, NodeID: nodeID, @@ -47,7 +47,7 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { UTXOs: []UTXO{ utxo, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ validator, }, Time: 5, @@ -62,7 +62,7 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { func TestBuildGenesisInvalidStakeWeight(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1, 2, 3} + nodeID := ids.BuildTestNodeID([]byte{1, 2, 3}) addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) @@ -71,8 +71,8 @@ func TestBuildGenesisInvalidStakeWeight(t *testing.T) { Amount: 123456789, } weight := json.Uint64(0) - validator := PermissionlessValidator{ - Staker: Staker{ + validator := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 15, NodeID: nodeID, @@ -91,7 +91,7 @@ func TestBuildGenesisInvalidStakeWeight(t *testing.T) { UTXOs: []UTXO{ utxo, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ validator, }, Time: 5, @@ -106,7 +106,7 @@ func TestBuildGenesisInvalidStakeWeight(t *testing.T) { func TestBuildGenesisInvalidEndtime(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1, 2, 3} + nodeID := ids.BuildTestNodeID([]byte{1, 2, 3}) addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) @@ -116,8 +116,8 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { } weight := json.Uint64(987654321) - validator := PermissionlessValidator{ - Staker: Staker{ + validator := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 5, NodeID: nodeID, @@ -136,7 +136,7 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { UTXOs: []UTXO{ utxo, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ validator, }, Time: 5, @@ -151,7 +151,7 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { func TestBuildGenesisReturnsSortedValidators(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1} + nodeID := ids.BuildTestNodeID([]byte{1}) addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) @@ -161,8 +161,8 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { } weight := json.Uint64(987654321) - validator1 := PermissionlessValidator{ - Staker: Staker{ + validator1 := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 20, NodeID: nodeID, @@ -177,8 +177,8 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { }}, } - validator2 := PermissionlessValidator{ - Staker: Staker{ + validator2 := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 3, EndTime: 15, NodeID: nodeID, @@ -193,8 +193,8 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { }}, } - validator3 := PermissionlessValidator{ - Staker: Staker{ + validator3 := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 1, EndTime: 10, NodeID: nodeID, @@ -214,7 +214,7 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { UTXOs: []UTXO{ utxo, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ validator1, validator2, validator3, diff --git a/vms/platformvm/block/builder/builder.go b/vms/platformvm/block/builder/builder.go index 66a90f99e7f5..13a1c7902b6d 100644 --- a/vms/platformvm/block/builder/builder.go +++ b/vms/platformvm/block/builder/builder.go @@ -36,22 +36,11 @@ var ( ErrEndOfTime = errors.New("program time is suspiciously far in the future") ErrNoPendingBlocks = errors.New("no pending blocks") - ErrChainNotSynced = errors.New("chain not synced") ) type Builder interface { mempool.Mempool mempool.BlockTimer - Network - - // set preferred block on top of which we'll build next - SetPreference(blockID ids.ID) - - // get preferred block on top of which we'll build next - Preferred() (snowman.Block, error) - - // AddUnverifiedTx verifier the tx before adding it to mempool - AddUnverifiedTx(tx *txs.Tx) error // BuildBlock is called on timer clock to attempt to create // next block @@ -64,15 +53,11 @@ type Builder interface { // builder implements a simple builder to convert txs into valid blocks type builder struct { mempool.Mempool - Network txBuilder txbuilder.Builder txExecutorBackend *txexecutor.Backend blkManager blockexecutor.Manager - // ID of the preferred block to build on top of - preferredBlockID ids.ID - // channel to send messages to the consensus engine toEngine chan<- common.Message @@ -88,7 +73,6 @@ func New( txExecutorBackend *txexecutor.Backend, blkManager blockexecutor.Manager, toEngine chan<- common.Message, - appSender common.AppSender, ) Builder { builder := &builder{ Mempool: mempool, @@ -100,63 +84,10 @@ func New( builder.timer = timer.NewTimer(builder.setNextBuildBlockTime) - builder.Network = NewNetwork( - txExecutorBackend.Ctx, - builder, - appSender, - ) - go txExecutorBackend.Ctx.Log.RecoverAndPanic(builder.timer.Dispatch) return builder } -func (b *builder) SetPreference(blockID ids.ID) { - if blockID == b.preferredBlockID { - // If the preference didn't change, then this is a noop - return - } - b.preferredBlockID = blockID - b.ResetBlockTimer() -} - -func (b *builder) Preferred() (snowman.Block, error) { - return b.blkManager.GetBlock(b.preferredBlockID) -} - -// AddUnverifiedTx verifies a transaction and attempts to add it to the mempool -func (b *builder) AddUnverifiedTx(tx *txs.Tx) error { - if !b.txExecutorBackend.Bootstrapped.Get() { - return ErrChainNotSynced - } - - txID := tx.ID() - if b.Mempool.Has(txID) { - // If the transaction is already in the mempool - then it looks the same - // as if it was successfully added - return nil - } - - verifier := txexecutor.MempoolTxVerifier{ - Backend: b.txExecutorBackend, - ParentID: b.preferredBlockID, // We want to build off of the preferred block - StateVersions: b.blkManager, - Tx: tx, - } - if err := tx.Unsigned.Visit(&verifier); err != nil { - b.MarkDropped(txID, err) - return err - } - - // If we are partially syncing the Primary Network, we should not be - // maintaining the transaction mempool locally. - if !b.txExecutorBackend.Config.PartialSyncPrimaryNetwork { - if err := b.Mempool.Add(tx); err != nil { - return err - } - } - return b.GossipTx(tx) -} - // BuildBlock builds a block to be added to consensus. // This method removes the transactions from the returned // blocks from the mempool. @@ -186,11 +117,11 @@ func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { // Only modifies state to remove expired proposal txs. func (b *builder) buildBlock() (block.Block, error) { // Get the block to build on top of and retrieve the new block's context. - preferred, err := b.Preferred() + preferredID := b.blkManager.Preferred() + preferred, err := b.blkManager.GetBlock(preferredID) if err != nil { return nil, err } - preferredID := preferred.ID() nextHeight := preferred.Height() + 1 preferredState, ok := b.blkManager.GetState(preferredID) if !ok { @@ -244,36 +175,6 @@ func (b *builder) ResetBlockTimer() { b.timer.SetTimeoutIn(0) } -// dropExpiredStakerTxs drops add validator/delegator transactions in the -// mempool whose start time is not sufficiently far in the future -// (i.e. within local time plus [MaxFutureStartFrom]). -func (b *builder) dropExpiredStakerTxs(timestamp time.Time) { - minStartTime := timestamp.Add(txexecutor.SyncBound) - for b.Mempool.HasStakerTx() { - tx := b.Mempool.PeekStakerTx() - startTime := tx.Unsigned.(txs.Staker).StartTime() - if !startTime.Before(minStartTime) { - // The next proposal tx in the mempool starts sufficiently far in - // the future. - return - } - - txID := tx.ID() - err := fmt.Errorf( - "synchrony bound (%s) is later than staker start time (%s)", - minStartTime, - startTime, - ) - - b.Mempool.Remove([]*txs.Tx{tx}) - b.Mempool.MarkDropped(txID, err) // cache tx as dropped - b.txExecutorBackend.Ctx.Log.Debug("dropping tx", - zap.Stringer("txID", txID), - zap.Error(err), - ) - } -} - func (b *builder) setNextBuildBlockTime() { ctx := b.txExecutorBackend.Ctx @@ -296,11 +197,12 @@ func (b *builder) setNextBuildBlockTime() { } // Wake up when it's time to add/remove the next validator/delegator - preferredState, ok := b.blkManager.GetState(b.preferredBlockID) + preferredID := b.blkManager.Preferred() + preferredState, ok := b.blkManager.GetState(preferredID) if !ok { // The preferred block should always be a decision block ctx.Log.Error("couldn't get preferred block state", - zap.Stringer("preferredID", b.preferredBlockID), + zap.Stringer("preferredID", preferredID), zap.Stringer("lastAcceptedID", b.blkManager.LastAccepted()), ) return @@ -309,7 +211,7 @@ func (b *builder) setNextBuildBlockTime() { nextStakerChangeTime, err := txexecutor.GetNextStakerChangeTime(preferredState) if err != nil { ctx.Log.Error("couldn't get next staker change time", - zap.Stringer("preferredID", b.preferredBlockID), + zap.Stringer("preferredID", preferredID), zap.Stringer("lastAcceptedID", b.blkManager.LastAccepted()), zap.Error(err), ) @@ -368,7 +270,13 @@ func buildBlock( } // Clean out the mempool's transactions with invalid timestamps. - builder.dropExpiredStakerTxs(timestamp) + droppedStakerTxIDs := mempool.DropExpiredStakerTxs(builder.Mempool, timestamp.Add(txexecutor.SyncBound)) + for _, txID := range droppedStakerTxIDs { + builder.txExecutorBackend.Ctx.Log.Debug("dropping tx", + zap.Stringer("txID", txID), + zap.Error(err), + ) + } // If there is no reason to build a block, don't. if !builder.Mempool.HasTxs() && !forceAdvanceTime { diff --git a/vms/platformvm/block/builder/builder_test.go b/vms/platformvm/block/builder/builder_test.go index d945c3555c12..2bf97a8c8ef2 100644 --- a/vms/platformvm/block/builder/builder_test.go +++ b/vms/platformvm/block/builder/builder_test.go @@ -51,7 +51,7 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { env.sender.SendAppGossipF = func(context.Context, []byte) error { return nil } - require.NoError(env.Builder.AddUnverifiedTx(tx)) + require.NoError(env.network.IssueTx(context.Background(), tx)) require.True(env.mempool.Has(txID)) // show that build block include that tx and removes it from mempool @@ -110,7 +110,7 @@ func TestNoErrorOnUnexpectedSetPreferenceDuringBootstrapping(t *testing.T) { require.NoError(t, shutdownEnvironment(env)) }() - env.Builder.SetPreference(ids.GenerateTestID()) // should not panic + require.False(t, env.blkManager.SetPreference(ids.GenerateTestID())) // should not panic } func TestGetNextStakerToReward(t *testing.T) { diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index 58c71418b6e2..f419c38c4ac0 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -18,7 +18,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" @@ -35,13 +35,12 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/network" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" @@ -78,9 +77,19 @@ var ( testSubnet1 *txs.Tx testSubnet1ControlKeys = preFundedKeys[0:3] + // Node IDs of genesis validators. Initialized in init function + genesisNodeIDs []ids.NodeID + errMissing = errors.New("missing") ) +func init() { + genesisNodeIDs = make([]ids.NodeID, len(preFundedKeys)) + for i := range preFundedKeys { + genesisNodeIDs[i] = ids.GenerateTestNodeID() + } +} + type mutableSharedMemory struct { atomic.SharedMemory } @@ -89,6 +98,7 @@ type environment struct { Builder blkManager blockexecutor.Manager mempool mempool.Mempool + network network.Network sender *common.SenderTest isBootstrapped *utils.Atomic[bool] @@ -116,8 +126,7 @@ func newEnvironment(t *testing.T) *environment { } res.isBootstrapped.Set(true) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - res.baseDB = versiondb.New(baseDBManager.Current().Database) + res.baseDB = versiondb.New(memdb.New()) res.ctx, res.msm = defaultCtx(res.baseDB) res.ctx.Lock.Lock() @@ -160,7 +169,7 @@ func newEnvironment(t *testing.T) *environment { metrics, err := metrics.New("", registerer) require.NoError(err) - res.mempool, err = mempool.NewMempool("mempool", registerer, res) + res.mempool, err = mempool.New("mempool", registerer, res) require.NoError(err) res.blkManager = blockexecutor.NewManager( @@ -171,16 +180,23 @@ func newEnvironment(t *testing.T) *environment { pvalidators.TestManager, ) + res.network = network.New( + res.backend.Ctx, + res.blkManager, + res.mempool, + res.backend.Config.PartialSyncPrimaryNetwork, + res.sender, + ) + res.Builder = New( res.mempool, res.txBuilder, &res.backend, res.blkManager, nil, // toEngine, - res.sender, ) - res.Builder.SetPreference(genesisID) + res.blkManager.SetPreference(genesisID) addSubnet(t, res) return res @@ -239,7 +255,6 @@ func defaultState( ctx, metrics.Noop, rewards, - &utils.Atomic[bool]{}, ) require.NoError(err) @@ -361,13 +376,12 @@ func buildGenesisTest(t *testing.T, ctx *snow.Context) []byte { } } - genesisValidators := make([]api.PermissionlessValidator, len(preFundedKeys)) - for i, key := range preFundedKeys { - nodeID := ids.NodeID(key.PublicKey().Address()) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(genesisNodeIDs)) + for i, nodeID := range genesisNodeIDs { addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), NodeID: nodeID, @@ -419,10 +433,22 @@ func shutdownEnvironment(env *environment) error { } } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( env.state.Close(), env.baseDB.Close(), ) - return errs.Err +} + +func getValidTx(txBuilder txbuilder.Builder, t *testing.T) *txs.Tx { + tx, err := txBuilder.NewCreateChainTx( + testSubnet1.ID(), + nil, + constants.AVMID, + nil, + "chain name", + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + ids.ShortEmpty, + ) + require.NoError(t, err) + return tx } diff --git a/vms/platformvm/block/builder/network.go b/vms/platformvm/block/builder/network.go deleted file mode 100644 index 3e1576d958fb..000000000000 --- a/vms/platformvm/block/builder/network.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// TODO: consider moving the network implementation to a separate package - -package builder - -import ( - "context" - "fmt" - "time" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/vms/components/message" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -const ( - // We allow [recentCacheSize] to be fairly large because we only store hashes - // in the cache, not entire transactions. - recentCacheSize = 512 -) - -var _ Network = (*network)(nil) - -type Network interface { - common.AppHandler - - // GossipTx gossips the transaction to some of the connected peers - GossipTx(tx *txs.Tx) error -} - -type network struct { - ctx *snow.Context - blkBuilder *builder - - // gossip related attributes - appSender common.AppSender - recentTxs *cache.LRU[ids.ID, struct{}] -} - -func NewNetwork( - ctx *snow.Context, - blkBuilder *builder, - appSender common.AppSender, -) Network { - return &network{ - ctx: ctx, - blkBuilder: blkBuilder, - appSender: appSender, - recentTxs: &cache.LRU[ids.ID, struct{}]{Size: recentCacheSize}, - } -} - -func (*network) CrossChainAppRequestFailed(context.Context, ids.ID, uint32) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) CrossChainAppRequest(context.Context, ids.ID, uint32, time.Time, []byte) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) CrossChainAppResponse(context.Context, ids.ID, uint32, []byte) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) AppRequestFailed(context.Context, ids.NodeID, uint32) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) AppRequest(context.Context, ids.NodeID, uint32, time.Time, []byte) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) AppResponse(context.Context, ids.NodeID, uint32, []byte) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (n *network) AppGossip(_ context.Context, nodeID ids.NodeID, msgBytes []byte) error { - n.ctx.Log.Debug("called AppGossip message handler", - zap.Stringer("nodeID", nodeID), - zap.Int("messageLen", len(msgBytes)), - ) - - if n.blkBuilder.txExecutorBackend.Config.PartialSyncPrimaryNetwork { - n.ctx.Log.Debug("dropping AppGossip message", - zap.String("reason", "primary network is not being fully synced"), - ) - return nil - } - - msgIntf, err := message.Parse(msgBytes) - if err != nil { - n.ctx.Log.Debug("dropping AppGossip message", - zap.String("reason", "failed to parse message"), - ) - return nil - } - - msg, ok := msgIntf.(*message.Tx) - if !ok { - n.ctx.Log.Debug("dropping unexpected message", - zap.Stringer("nodeID", nodeID), - ) - return nil - } - - tx, err := txs.Parse(txs.Codec, msg.Tx) - if err != nil { - n.ctx.Log.Verbo("received invalid tx", - zap.Stringer("nodeID", nodeID), - zap.Binary("tx", msg.Tx), - zap.Error(err), - ) - return nil - } - - txID := tx.ID() - - // We need to grab the context lock here to avoid racy behavior with - // transaction verification + mempool modifications. - n.ctx.Lock.Lock() - defer n.ctx.Lock.Unlock() - - if reason := n.blkBuilder.GetDropReason(txID); reason != nil { - // If the tx is being dropped - just ignore it - return nil - } - - // add to mempool - if err := n.blkBuilder.AddUnverifiedTx(tx); err != nil { - n.ctx.Log.Debug("tx failed verification", - zap.Stringer("nodeID", nodeID), - zap.Error(err), - ) - } - return nil -} - -func (n *network) GossipTx(tx *txs.Tx) error { - txID := tx.ID() - // Don't gossip a transaction if it has been recently gossiped. - if _, has := n.recentTxs.Get(txID); has { - return nil - } - n.recentTxs.Put(txID, struct{}{}) - - n.ctx.Log.Debug("gossiping tx", - zap.Stringer("txID", txID), - ) - - msg := &message.Tx{Tx: tx.Bytes()} - msgBytes, err := message.Build(msg) - if err != nil { - return fmt.Errorf("GossipTx: failed to build Tx message: %w", err) - } - return n.appSender.SendAppGossip(context.TODO(), msgBytes) -} diff --git a/vms/platformvm/block/builder/network_test.go b/vms/platformvm/block/builder/network_test.go deleted file mode 100644 index 365fc130553a..000000000000 --- a/vms/platformvm/block/builder/network_test.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package builder - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/components/message" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - - txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" -) - -func getValidTx(txBuilder txbuilder.Builder, t *testing.T) *txs.Tx { - tx, err := txBuilder.NewCreateChainTx( - testSubnet1.ID(), - nil, - constants.AVMID, - nil, - "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, - ) - require.NoError(t, err) - return tx -} - -// show that a tx learned from gossip is validated and added to mempool -func TestMempoolValidGossipedTxIsAddedToMempool(t *testing.T) { - require := require.New(t) - - env := newEnvironment(t) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - var gossipedBytes []byte - env.sender.SendAppGossipF = func(_ context.Context, b []byte) error { - gossipedBytes = b - return nil - } - - nodeID := ids.GenerateTestNodeID() - - // create a tx - tx := getValidTx(env.txBuilder, t) - txID := tx.ID() - - msg := message.Tx{Tx: tx.Bytes()} - msgBytes, err := message.Build(&msg) - require.NoError(err) - // Free lock because [AppGossip] waits for the context lock - env.ctx.Lock.Unlock() - // show that unknown tx is added to mempool - require.NoError(env.AppGossip(context.Background(), nodeID, msgBytes)) - require.True(env.Builder.Has(txID)) - // Grab lock back - env.ctx.Lock.Lock() - - // and gossiped if it has just been discovered - require.NotNil(gossipedBytes) - - // show gossiped bytes can be decoded to the original tx - replyIntf, err := message.Parse(gossipedBytes) - require.NoError(err) - - reply := replyIntf.(*message.Tx) - retrivedTx, err := txs.Parse(txs.Codec, reply.Tx) - require.NoError(err) - - require.Equal(txID, retrivedTx.ID()) -} - -// show that txs already marked as invalid are not re-requested on gossiping -func TestMempoolInvalidGossipedTxIsNotAddedToMempool(t *testing.T) { - require := require.New(t) - - env := newEnvironment(t) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - // create a tx and mark as invalid - tx := getValidTx(env.txBuilder, t) - txID := tx.ID() - env.Builder.MarkDropped(txID, errTestingDropped) - - // show that the invalid tx is not requested - nodeID := ids.GenerateTestNodeID() - msg := message.Tx{Tx: tx.Bytes()} - msgBytes, err := message.Build(&msg) - require.NoError(err) - env.ctx.Lock.Unlock() - require.NoError(env.AppGossip(context.Background(), nodeID, msgBytes)) - env.ctx.Lock.Lock() - require.False(env.Builder.Has(txID)) -} - -// show that locally generated txs are gossiped -func TestMempoolNewLocaTxIsGossiped(t *testing.T) { - require := require.New(t) - - env := newEnvironment(t) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - var gossipedBytes []byte - env.sender.SendAppGossipF = func(_ context.Context, b []byte) error { - gossipedBytes = b - return nil - } - - // add a tx to the mempool and show it gets gossiped - tx := getValidTx(env.txBuilder, t) - txID := tx.ID() - - require.NoError(env.Builder.AddUnverifiedTx(tx)) - require.NotNil(gossipedBytes) - - // show gossiped bytes can be decoded to the original tx - replyIntf, err := message.Parse(gossipedBytes) - require.NoError(err) - - reply := replyIntf.(*message.Tx) - retrivedTx, err := txs.Parse(txs.Codec, reply.Tx) - require.NoError(err) - - require.Equal(txID, retrivedTx.ID()) - - // show that transaction is not re-gossiped is recently added to mempool - gossipedBytes = nil - env.Builder.Remove([]*txs.Tx{tx}) - require.NoError(env.Builder.Add(tx)) - - require.Nil(gossipedBytes) -} diff --git a/vms/platformvm/block/codec.go b/vms/platformvm/block/codec.go index efffedcb551b..1034ee9f759a 100644 --- a/vms/platformvm/block/codec.go +++ b/vms/platformvm/block/codec.go @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) @@ -53,24 +54,20 @@ func init() { // subpackage-level codecs were introduced, each handling serialization of // specific types. func RegisterApricotBlockTypes(targetCodec codec.Registry) error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( targetCodec.RegisterType(&ApricotProposalBlock{}), targetCodec.RegisterType(&ApricotAbortBlock{}), targetCodec.RegisterType(&ApricotCommitBlock{}), targetCodec.RegisterType(&ApricotStandardBlock{}), targetCodec.RegisterType(&ApricotAtomicBlock{}), ) - return errs.Err } func RegisterBanffBlockTypes(targetCodec codec.Registry) error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( targetCodec.RegisterType(&BanffProposalBlock{}), targetCodec.RegisterType(&BanffAbortBlock{}), targetCodec.RegisterType(&BanffCommitBlock{}), targetCodec.RegisterType(&BanffStandardBlock{}), ) - return errs.Err } diff --git a/vms/platformvm/block/executor/helpers_test.go b/vms/platformvm/block/executor/helpers_test.go index 499d82669ad9..9c9135fe6f9f 100644 --- a/vms/platformvm/block/executor/helpers_test.go +++ b/vms/platformvm/block/executor/helpers_test.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" @@ -35,8 +36,6 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/config" @@ -51,7 +50,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - db_manager "github.com/ava-labs/avalanchego/database/manager" p_tx_builder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" pvalidators "github.com/ava-labs/avalanchego/vms/platformvm/validators" ) @@ -83,9 +81,19 @@ var ( genesisBlkID ids.ID testSubnet1 *txs.Tx + // Node IDs of genesis validators. Initialized in init function + genesisNodeIDs []ids.NodeID + errMissing = errors.New("missing") ) +func init() { + genesisNodeIDs = make([]ids.NodeID, len(preFundedKeys)) + for i := range preFundedKeys { + genesisNodeIDs[i] = ids.GenerateTestNodeID() + } +} + type stakerStatus uint type staker struct { @@ -135,8 +143,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { } res.isBootstrapped.Set(true) - baseDBManager := db_manager.NewMemDB(version.Semantic1_0_0) - res.baseDB = versiondb.New(baseDBManager.Current().Database) + res.baseDB = versiondb.New(memdb.New()) res.ctx = defaultCtx(res.baseDB) res.fx = defaultFx(res.clk, res.ctx.Log, res.isBootstrapped.Get()) @@ -192,7 +199,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { metrics := metrics.Noop var err error - res.mempool, err = mempool.NewMempool("mempool", registerer, res) + res.mempool, err = mempool.New("mempool", registerer, res) if err != nil { panic(fmt.Errorf("failed to create mempool: %w", err)) } @@ -278,7 +285,6 @@ func defaultState( ctx, metrics.Noop, rewards, - &utils.Atomic[bool]{}, ) if err != nil { panic(err) @@ -403,15 +409,14 @@ func buildGenesisTest(ctx *snow.Context) []byte { } } - genesisValidators := make([]api.PermissionlessValidator, len(preFundedKeys)) - for i, key := range preFundedKeys { - nodeID := ids.NodeID(key.PublicKey().Address()) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(genesisNodeIDs)) + for i, nodeID := range genesisNodeIDs { addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) if err != nil { panic(err) } - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), NodeID: nodeID, @@ -470,12 +475,14 @@ func shutdownEnvironment(t *environment) error { } } - errs := wrappers.Errs{} + var err error if t.state != nil { - errs.Add(t.state.Close()) + err = t.state.Close() } - errs.Add(t.baseDB.Close()) - return errs.Err + return utils.Err( + err, + t.baseDB.Close(), + ) } func addPendingValidator( diff --git a/vms/platformvm/block/executor/manager.go b/vms/platformvm/block/executor/manager.go index ea3609349b74..9af9cbce2c4a 100644 --- a/vms/platformvm/block/executor/manager.go +++ b/vms/platformvm/block/executor/manager.go @@ -4,26 +4,41 @@ package executor import ( + "errors" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" "github.com/ava-labs/avalanchego/vms/platformvm/validators" ) -var _ Manager = (*manager)(nil) +var ( + _ Manager = (*manager)(nil) + + ErrChainNotSynced = errors.New("chain not synced") +) type Manager interface { state.Versions // Returns the ID of the most recently accepted block. LastAccepted() ids.ID + + SetPreference(blkID ids.ID) (updated bool) + Preferred() ids.ID + GetBlock(blkID ids.ID) (snowman.Block, error) GetStatelessBlock(blkID ids.ID) (block.Block, error) NewBlock(block.Block) snowman.Block + + // VerifyTx verifies that the transaction can be issued based on the currently + // preferred state. This should *not* be used to verify transactions in a block. + VerifyTx(tx *txs.Tx) error } func NewManager( @@ -33,9 +48,10 @@ func NewManager( txExecutorBackend *executor.Backend, validatorManager validators.Manager, ) Manager { + lastAccepted := s.GetLastAccepted() backend := &backend{ Mempool: mempool, - lastAccepted: s.GetLastAccepted(), + lastAccepted: lastAccepted, state: s, ctx: txExecutorBackend.Ctx, blkIDToState: map[ids.ID]*blockState{}, @@ -57,6 +73,8 @@ func NewManager( backend: backend, addTxsToMempool: !txExecutorBackend.Config.PartialSyncPrimaryNetwork, }, + preferred: lastAccepted, + txExecutorBackend: txExecutorBackend, } } @@ -65,6 +83,9 @@ type manager struct { verifier block.Visitor acceptor block.Visitor rejector block.Visitor + + preferred ids.ID + txExecutorBackend *executor.Backend } func (m *manager) GetBlock(blkID ids.ID) (snowman.Block, error) { @@ -85,3 +106,26 @@ func (m *manager) NewBlock(blk block.Block) snowman.Block { Block: blk, } } + +func (m *manager) SetPreference(blockID ids.ID) (updated bool) { + updated = m.preferred == blockID + m.preferred = blockID + return updated +} + +func (m *manager) Preferred() ids.ID { + return m.preferred +} + +func (m *manager) VerifyTx(tx *txs.Tx) error { + if !m.txExecutorBackend.Bootstrapped.Get() { + return ErrChainNotSynced + } + + return tx.Unsigned.Visit(&executor.MempoolTxVerifier{ + Backend: m.txExecutorBackend, + ParentID: m.preferred, + StateVersions: m, + Tx: tx, + }) +} diff --git a/vms/platformvm/block/executor/mock_manager.go b/vms/platformvm/block/executor/mock_manager.go index 07f163fad635..f5b2bcb3608c 100644 --- a/vms/platformvm/block/executor/mock_manager.go +++ b/vms/platformvm/block/executor/mock_manager.go @@ -14,6 +14,7 @@ import ( snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" block "github.com/ava-labs/avalanchego/vms/platformvm/block" state "github.com/ava-labs/avalanchego/vms/platformvm/state" + txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" gomock "go.uber.org/mock/gomock" ) @@ -112,3 +113,45 @@ func (mr *MockManagerMockRecorder) NewBlock(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlock", reflect.TypeOf((*MockManager)(nil).NewBlock), arg0) } + +// Preferred mocks base method. +func (m *MockManager) Preferred() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Preferred") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// Preferred indicates an expected call of Preferred. +func (mr *MockManagerMockRecorder) Preferred() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Preferred", reflect.TypeOf((*MockManager)(nil).Preferred)) +} + +// SetPreference mocks base method. +func (m *MockManager) SetPreference(arg0 ids.ID) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPreference", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// SetPreference indicates an expected call of SetPreference. +func (mr *MockManagerMockRecorder) SetPreference(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockManager)(nil).SetPreference), arg0) +} + +// VerifyTx mocks base method. +func (m *MockManager) VerifyTx(arg0 *txs.Tx) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyTx", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyTx indicates an expected call of VerifyTx. +func (mr *MockManagerMockRecorder) VerifyTx(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyTx", reflect.TypeOf((*MockManager)(nil).VerifyTx), arg0) +} diff --git a/vms/platformvm/block/executor/proposal_block_test.go b/vms/platformvm/block/executor/proposal_block_test.go index 9a9dc3037287..b69708f8ca9f 100644 --- a/vms/platformvm/block/executor/proposal_block_test.go +++ b/vms/platformvm/block/executor/proposal_block_test.go @@ -392,57 +392,52 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { // Staker5: |--------------------| // Staker0 it's here just to allow to issue a proposal block with the chosen endTime. - staker0RewardAddress := ids.ShortID{2} + + // In this test multiple stakers may join and leave the staker set at the same time. + // The order in which they do it is asserted; the order may depend on the staker.TxID, + // which in turns depend on every feature of the transaction creating the staker. + // So in this test we avoid ids.GenerateTestNodeID, in favour of ids.BuildTestNodeID + // so that TxID does not depend on the order we run tests. staker0 := staker{ - nodeID: ids.NodeID(staker0RewardAddress), - rewardAddress: staker0RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf0}), + rewardAddress: ids.ShortID{0xf0}, startTime: defaultGenesisTime, endTime: time.Time{}, // actual endTime depends on specific test } - staker1RewardAddress := ids.GenerateTestShortID() staker1 := staker{ - nodeID: ids.NodeID(staker1RewardAddress), - rewardAddress: staker1RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf1}), + rewardAddress: ids.ShortID{0xf1}, startTime: defaultGenesisTime.Add(1 * time.Minute), endTime: defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute), } - - staker2RewardAddress := ids.ShortID{1} staker2 := staker{ - nodeID: ids.NodeID(staker2RewardAddress), - rewardAddress: staker2RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf2}), + rewardAddress: ids.ShortID{0xf2}, startTime: staker1.startTime.Add(1 * time.Minute), endTime: staker1.startTime.Add(1 * time.Minute).Add(defaultMinStakingDuration), } - - staker3RewardAddress := ids.GenerateTestShortID() staker3 := staker{ - nodeID: ids.NodeID(staker3RewardAddress), - rewardAddress: staker3RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf3}), + rewardAddress: ids.ShortID{0xf3}, startTime: staker2.startTime.Add(1 * time.Minute), endTime: staker2.endTime.Add(1 * time.Minute), } - staker3Sub := staker{ - nodeID: staker3.nodeID, - rewardAddress: staker3.rewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf3}), + rewardAddress: ids.ShortID{0xff}, startTime: staker3.startTime.Add(1 * time.Minute), endTime: staker3.endTime.Add(-1 * time.Minute), } - - staker4RewardAddress := ids.GenerateTestShortID() staker4 := staker{ - nodeID: ids.NodeID(staker4RewardAddress), - rewardAddress: staker4RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf4}), + rewardAddress: ids.ShortID{0xf4}, startTime: staker3.startTime, endTime: staker3.endTime, } - - staker5RewardAddress := ids.GenerateTestShortID() staker5 := staker{ - nodeID: ids.NodeID(staker5RewardAddress), - rewardAddress: staker5RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf5}), + rewardAddress: ids.ShortID{0xf5}, startTime: staker2.endTime, endTime: staker2.endTime.Add(defaultMinStakingDuration), } @@ -541,15 +536,19 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { }, }, { - description: "advance time to staker5 end", + description: "advance time to staker5 start", stakers: []staker{staker1, staker2, staker3, staker4, staker5}, advanceTimeTo: []time.Time{staker1.startTime, staker2.startTime, staker3.startTime, staker5.startTime}, expectedStakers: map[ids.NodeID]stakerStatus{ staker1.nodeID: current, - // given its txID, staker2 will be - // rewarded and moved out of current stakers set - // staker2.nodeID: current, + // Staker2's end time matches staker5's start time, so typically + // the block builder would produce a ProposalBlock to remove + // staker2 when advancing the time. However, this test injects + // staker0 into the staker set artificially to advance the time. + // This means that staker2 is not removed by the ProposalBlock + // when advancing the time. + staker2.nodeID: current, staker3.nodeID: current, staker4.nodeID: current, staker5.nodeID: current, @@ -564,7 +563,6 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { defer func() { require.NoError(shutdownEnvironment(env)) }() - env.config.BanffTime = time.Time{} // activate Banff subnetID := testSubnet1.ID() @@ -721,8 +719,7 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { env.config.TrackedSubnets.Add(subnetID) // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - // Starts after the corre + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -750,7 +747,7 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { // The above validator is now part of the staking set // Queue a staker that joins the staker set after the above validator leaves - subnetVdr2NodeID := ids.NodeID(preFundedKeys[1].PublicKey().Address()) + subnetVdr2NodeID := genesisNodeIDs[1] tx, err = env.txBuilder.NewAddSubnetValidatorTx( 1, // Weight uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time @@ -862,8 +859,7 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { } // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultGenesisTime.Add(1 * time.Minute) subnetVdr1EndTime := defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -947,7 +943,7 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { require.NoError(propBlk.Accept(context.Background())) require.NoError(commitBlk.Accept(context.Background())) _, ok := env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) - require.Equal(tracked, ok) + require.True(ok) }) } } @@ -1143,10 +1139,9 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { // Add a pending validator pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMinStakingDuration) - factory := secp256k1.Factory{} - nodeIDKey, _ := factory.NewPrivateKey() + nodeIDKey, _ := secp256k1.NewPrivateKey() rewardAddress := nodeIDKey.PublicKey().Address() - nodeID := ids.NodeID(rewardAddress) + nodeID := ids.BuildTestNodeID(rewardAddress[:]) _, err := addPendingValidator( env, diff --git a/vms/platformvm/block/executor/standard_block_test.go b/vms/platformvm/block/executor/standard_block_test.go index 76ae7ca55de6..af1a7562cdd0 100644 --- a/vms/platformvm/block/executor/standard_block_test.go +++ b/vms/platformvm/block/executor/standard_block_test.go @@ -363,39 +363,45 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { // Staker3sub: |----------------| // Staker4: |------------------------| // Staker5: |--------------------| + + // In this test multiple stakers may join and leave the staker set at the same time. + // The order in which they do it is asserted; the order may depend on the staker.TxID, + // which in turns depend on every feature of the transaction creating the staker. + // So in this test we avoid ids.GenerateTestNodeID, in favour of ids.BuildTestNodeID + // so that TxID does not depend on the order we run tests. staker1 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf1}), + rewardAddress: ids.ShortID{0xf1}, startTime: defaultGenesisTime.Add(1 * time.Minute), endTime: defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute), } staker2 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf2}), + rewardAddress: ids.ShortID{0xf2}, startTime: staker1.startTime.Add(1 * time.Minute), endTime: staker1.startTime.Add(1 * time.Minute).Add(defaultMinStakingDuration), } staker3 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf3}), + rewardAddress: ids.ShortID{0xf3}, startTime: staker2.startTime.Add(1 * time.Minute), endTime: staker2.endTime.Add(1 * time.Minute), } staker3Sub := staker{ - nodeID: staker3.nodeID, - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf3}), + rewardAddress: ids.ShortID{0xff}, startTime: staker3.startTime.Add(1 * time.Minute), endTime: staker3.endTime.Add(-1 * time.Minute), } staker4 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf4}), + rewardAddress: ids.ShortID{0xf4}, startTime: staker3.startTime, endTime: staker3.endTime, } staker5 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf5}), + rewardAddress: ids.ShortID{0xf5}, startTime: staker2.endTime, endTime: staker2.endTime.Add(defaultMinStakingDuration), } @@ -474,11 +480,17 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { }, }, { - description: "advance time to staker5 end", + description: "advance time to staker5 start", stakers: []staker{staker1, staker2, staker3, staker4, staker5}, advanceTimeTo: []time.Time{staker1.startTime, staker2.startTime, staker3.startTime, staker5.startTime}, expectedStakers: map[ids.NodeID]stakerStatus{ staker1.nodeID: current, + + // Staker2's end time matches staker5's start time, so typically + // the block builder would produce a ProposalBlock to remove + // staker2 when advancing the time. However, it is valid to only + // advance the time with a StandardBlock and not remove staker2, + // which is what this test does. staker2.nodeID: current, staker3.nodeID: current, staker4.nodeID: current, @@ -602,8 +614,7 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { env.config.TrackedSubnets.Add(subnetID) // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - // Starts after the corre + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -631,7 +642,7 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { // The above validator is now part of the staking set // Queue a staker that joins the staker set after the above validator leaves - subnetVdr2NodeID := ids.NodeID(preFundedKeys[1].PublicKey().Address()) + subnetVdr2NodeID := genesisNodeIDs[1] tx, err = env.txBuilder.NewAddSubnetValidatorTx( 1, // Weight uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time @@ -702,8 +713,7 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { } // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultGenesisTime.Add(1 * time.Minute) subnetVdr1EndTime := defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -747,7 +757,7 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { require.NoError(block.Verify(context.Background())) require.NoError(block.Accept(context.Background())) _, ok := env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) - require.Equal(tracked, ok) + require.True(ok) }) } } diff --git a/vms/platformvm/metrics/metrics.go b/vms/platformvm/metrics/metrics.go index a73c8d168793..7c0e616dd9b2 100644 --- a/vms/platformvm/metrics/metrics.go +++ b/vms/platformvm/metrics/metrics.go @@ -110,9 +110,9 @@ func New( errs := wrappers.Errs{Err: err} apiRequestMetrics, err := metric.NewAPIInterceptor(namespace, registerer) + errs.Add(err) m.APIInterceptor = apiRequestMetrics errs.Add( - err, registerer.Register(m.timeUntilUnstake), registerer.Register(m.timeUntilSubnetUnstake), registerer.Register(m.localStake), diff --git a/vms/platformvm/metrics/tx_metrics.go b/vms/platformvm/metrics/tx_metrics.go index 17d6a090957b..9ed07bce7ec9 100644 --- a/vms/platformvm/metrics/tx_metrics.go +++ b/vms/platformvm/metrics/tx_metrics.go @@ -28,7 +28,8 @@ type txMetrics struct { numTransformSubnetTxs, numAddPermissionlessValidatorTxs, numAddPermissionlessDelegatorTxs, - numTransferSubnetOwnershipTxs prometheus.Counter + numTransferSubnetOwnershipTxs, + numBaseTxs prometheus.Counter } func newTxMetrics( @@ -51,6 +52,7 @@ func newTxMetrics( numAddPermissionlessValidatorTxs: newTxMetric(namespace, "add_permissionless_validator", registerer, &errs), numAddPermissionlessDelegatorTxs: newTxMetric(namespace, "add_permissionless_delegator", registerer, &errs), numTransferSubnetOwnershipTxs: newTxMetric(namespace, "transfer_subnet_ownership", registerer, &errs), + numBaseTxs: newTxMetric(namespace, "base", registerer, &errs), } return m, errs.Err } @@ -139,3 +141,8 @@ func (m *txMetrics) TransferSubnetOwnershipTx(*txs.TransferSubnetOwnershipTx) er m.numTransferSubnetOwnershipTxs.Inc() return nil } + +func (m *txMetrics) BaseTx(*txs.BaseTx) error { + m.numBaseTxs.Inc() + return nil +} diff --git a/vms/platformvm/network/main_test.go b/vms/platformvm/network/main_test.go new file mode 100644 index 000000000000..be0fab18f587 --- /dev/null +++ b/vms/platformvm/network/main_test.go @@ -0,0 +1,14 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/vms/platformvm/network/network.go b/vms/platformvm/network/network.go new file mode 100644 index 000000000000..0bbfc4f86eaf --- /dev/null +++ b/vms/platformvm/network/network.go @@ -0,0 +1,208 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "sync" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/vms/components/message" + "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" +) + +// We allow [recentCacheSize] to be fairly large because we only store hashes +// in the cache, not entire transactions. +const recentCacheSize = 512 + +var _ Network = (*network)(nil) + +type Network interface { + common.AppHandler + + // IssueTx verifies the transaction at the currently preferred state, adds + // it to the mempool, and gossips it to the network. + // + // Invariant: Assumes the context lock is held. + IssueTx(context.Context, *txs.Tx) error +} + +type network struct { + // We embed a noop handler for all unhandled messages + common.AppHandler + + ctx *snow.Context + manager executor.Manager + mempool mempool.Mempool + partialSyncPrimaryNetwork bool + appSender common.AppSender + + // gossip related attributes + recentTxsLock sync.Mutex + recentTxs *cache.LRU[ids.ID, struct{}] +} + +func New( + ctx *snow.Context, + manager executor.Manager, + mempool mempool.Mempool, + partialSyncPrimaryNetwork bool, + appSender common.AppSender, +) Network { + return &network{ + AppHandler: common.NewNoOpAppHandler(ctx.Log), + + ctx: ctx, + manager: manager, + mempool: mempool, + partialSyncPrimaryNetwork: partialSyncPrimaryNetwork, + appSender: appSender, + recentTxs: &cache.LRU[ids.ID, struct{}]{Size: recentCacheSize}, + } +} + +func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []byte) error { + n.ctx.Log.Debug("called AppGossip message handler", + zap.Stringer("nodeID", nodeID), + zap.Int("messageLen", len(msgBytes)), + ) + + if n.partialSyncPrimaryNetwork { + n.ctx.Log.Debug("dropping AppGossip message", + zap.String("reason", "primary network is not being fully synced"), + ) + return nil + } + + msgIntf, err := message.Parse(msgBytes) + if err != nil { + n.ctx.Log.Debug("dropping AppGossip message", + zap.String("reason", "failed to parse message"), + ) + return nil + } + + msg, ok := msgIntf.(*message.Tx) + if !ok { + n.ctx.Log.Debug("dropping unexpected message", + zap.Stringer("nodeID", nodeID), + ) + return nil + } + + tx, err := txs.Parse(txs.Codec, msg.Tx) + if err != nil { + n.ctx.Log.Verbo("received invalid tx", + zap.Stringer("nodeID", nodeID), + zap.Binary("tx", msg.Tx), + zap.Error(err), + ) + return nil + } + txID := tx.ID() + + // We need to grab the context lock here to avoid racy behavior with + // transaction verification + mempool modifications. + // + // Invariant: tx should not be referenced again without the context lock + // held to avoid any data races. + n.ctx.Lock.Lock() + defer n.ctx.Lock.Unlock() + + if reason := n.mempool.GetDropReason(txID); reason != nil { + // If the tx is being dropped - just ignore it + return nil + } + if err := n.issueTx(tx); err == nil { + n.gossipTx(ctx, txID, msgBytes) + } + return nil +} + +func (n *network) IssueTx(ctx context.Context, tx *txs.Tx) error { + if err := n.issueTx(tx); err != nil { + return err + } + + txBytes := tx.Bytes() + msg := &message.Tx{ + Tx: txBytes, + } + msgBytes, err := message.Build(msg) + if err != nil { + return err + } + + txID := tx.ID() + n.gossipTx(ctx, txID, msgBytes) + return nil +} + +// returns nil if the tx is in the mempool +func (n *network) issueTx(tx *txs.Tx) error { + txID := tx.ID() + if n.mempool.Has(txID) { + // The tx is already in the mempool + return nil + } + + // Verify the tx at the currently preferred state + if err := n.manager.VerifyTx(tx); err != nil { + n.ctx.Log.Debug("tx failed verification", + zap.Stringer("txID", txID), + zap.Error(err), + ) + + n.mempool.MarkDropped(txID, err) + return err + } + + // If we are partially syncing the Primary Network, we should not be + // maintaining the transaction mempool locally. + if n.partialSyncPrimaryNetwork { + return nil + } + + if err := n.mempool.Add(tx); err != nil { + n.ctx.Log.Debug("tx failed to be added to the mempool", + zap.Stringer("txID", txID), + zap.Error(err), + ) + + n.mempool.MarkDropped(txID, err) + return err + } + + return nil +} + +func (n *network) gossipTx(ctx context.Context, txID ids.ID, msgBytes []byte) { + n.recentTxsLock.Lock() + _, has := n.recentTxs.Get(txID) + n.recentTxs.Put(txID, struct{}{}) + n.recentTxsLock.Unlock() + + // Don't gossip a transaction if it has been recently gossiped. + if has { + return + } + + n.ctx.Log.Debug("gossiping tx", + zap.Stringer("txID", txID), + ) + + if err := n.appSender.SendAppGossip(ctx, msgBytes); err != nil { + n.ctx.Log.Error("failed to gossip tx", + zap.Stringer("txID", txID), + zap.Error(err), + ) + } +} diff --git a/vms/platformvm/network/network_test.go b/vms/platformvm/network/network_test.go new file mode 100644 index 000000000000..8c17bb0491b5 --- /dev/null +++ b/vms/platformvm/network/network_test.go @@ -0,0 +1,353 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "go.uber.org/mock/gomock" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/message" + "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" +) + +var errTest = errors.New("test error") + +func TestNetworkAppGossip(t *testing.T) { + testTx := &txs.Tx{ + Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: 1, + BlockchainID: ids.GenerateTestID(), + Ins: []*avax.TransferableInput{}, + Outs: []*avax.TransferableOutput{}, + }, + }, + } + require.NoError(t, testTx.Initialize(txs.Codec)) + + type test struct { + name string + msgBytesFunc func() []byte + mempoolFunc func(*gomock.Controller) mempool.Mempool + partialSyncPrimaryNetwork bool + appSenderFunc func(*gomock.Controller) common.AppSender + } + + tests := []test{ + { + // Shouldn't attempt to issue or gossip the tx + name: "invalid message bytes", + msgBytesFunc: func() []byte { + return []byte{0x00} + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + // Unused in this test + return nil + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Unused in this test + return nil + }, + }, + { + // Shouldn't attempt to issue or gossip the tx + name: "invalid tx bytes", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: []byte{0x00}, + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + // Unused in this test + return mempool.NewMockMempool(ctrl) + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Unused in this test + return common.NewMockSender(ctrl) + }, + }, + { + // Issue returns nil because mempool has tx. We should gossip the tx. + name: "issuance succeeds", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(true) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()) + return appSender + }, + }, + { + // Issue returns error because tx was dropped. We shouldn't gossip the tx. + name: "issuance fails", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Unused in this test + return common.NewMockSender(ctrl) + }, + }, + { + name: "should AppGossip if primary network is not being fully synced", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + // mempool.EXPECT().Has(gomock.Any()).Return(true) + return mempool + }, + partialSyncPrimaryNetwork: true, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + appSender := common.NewMockSender(ctrl) + // appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()) + return appSender + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + n := New( + &snow.Context{ + Log: logging.NoLog{}, + }, + executor.NewMockManager(ctrl), // Manager is unused in this test + tt.mempoolFunc(ctrl), + tt.partialSyncPrimaryNetwork, + tt.appSenderFunc(ctrl), + ) + require.NoError(n.AppGossip(context.Background(), ids.GenerateTestNodeID(), tt.msgBytesFunc())) + }) + } +} + +func TestNetworkIssueTx(t *testing.T) { + type test struct { + name string + mempoolFunc func(*gomock.Controller) mempool.Mempool + managerFunc func(*gomock.Controller) executor.Manager + partialSyncPrimaryNetwork bool + appSenderFunc func(*gomock.Controller) common.AppSender + expectedErr error + } + + tests := []test{ + { + name: "mempool has transaction", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(true) + return mempool + }, + managerFunc: func(ctrl *gomock.Controller) executor.Manager { + // Unused in this test + return executor.NewMockManager(ctrl) + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Should gossip the tx + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil) + return appSender + }, + expectedErr: nil, + }, + { + name: "transaction marked as dropped in mempool", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) + return mempool + }, + managerFunc: func(ctrl *gomock.Controller) executor.Manager { + manager := executor.NewMockManager(ctrl) + manager.EXPECT().VerifyTx(gomock.Any()).Return(errTest) + return manager + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Shouldn't gossip the tx + return common.NewMockSender(ctrl) + }, + expectedErr: errTest, + }, + { + name: "transaction invalid", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) + return mempool + }, + managerFunc: func(ctrl *gomock.Controller) executor.Manager { + manager := executor.NewMockManager(ctrl) + manager.EXPECT().VerifyTx(gomock.Any()).Return(errTest) + return manager + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Shouldn't gossip the tx + return common.NewMockSender(ctrl) + }, + expectedErr: errTest, + }, + { + name: "can't add transaction to mempool", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().Add(gomock.Any()).Return(errTest) + mempool.EXPECT().MarkDropped(gomock.Any(), errTest) + return mempool + }, + managerFunc: func(ctrl *gomock.Controller) executor.Manager { + manager := executor.NewMockManager(ctrl) + manager.EXPECT().VerifyTx(gomock.Any()).Return(nil) + return manager + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Shouldn't gossip the tx + return common.NewMockSender(ctrl) + }, + expectedErr: errTest, + }, + { + name: "AppGossip tx but do not add to mempool if primary network is not being fully synced", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(false) + return mempool + }, + managerFunc: func(ctrl *gomock.Controller) executor.Manager { + manager := executor.NewMockManager(ctrl) + manager.EXPECT().VerifyTx(gomock.Any()).Return(nil) + return manager + }, + partialSyncPrimaryNetwork: true, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Should gossip the tx + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil) + return appSender + }, + expectedErr: nil, + }, + { + name: "happy path", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().Add(gomock.Any()).Return(nil) + return mempool + }, + managerFunc: func(ctrl *gomock.Controller) executor.Manager { + manager := executor.NewMockManager(ctrl) + manager.EXPECT().VerifyTx(gomock.Any()).Return(nil) + return manager + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Should gossip the tx + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil) + return appSender + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + n := New( + &snow.Context{ + Log: logging.NoLog{}, + }, + tt.managerFunc(ctrl), + tt.mempoolFunc(ctrl), + tt.partialSyncPrimaryNetwork, + tt.appSenderFunc(ctrl), + ) + err := n.IssueTx(context.Background(), &txs.Tx{}) + require.ErrorIs(err, tt.expectedErr) + }) + } +} + +func TestNetworkGossipTx(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + appSender := common.NewMockSender(ctrl) + + nIntf := New( + &snow.Context{ + Log: logging.NoLog{}, + }, + executor.NewMockManager(ctrl), + mempool.NewMockMempool(ctrl), + false, + appSender, + ) + require.IsType(&network{}, nIntf) + n := nIntf.(*network) + + // Case: Tx was recently gossiped + txID := ids.GenerateTestID() + n.recentTxs.Put(txID, struct{}{}) + n.gossipTx(context.Background(), txID, []byte{}) + // Didn't make a call to SendAppGossip + + // Case: Tx was not recently gossiped + msgBytes := []byte{1, 2, 3} + appSender.EXPECT().SendAppGossip(gomock.Any(), msgBytes).Return(nil) + n.gossipTx(context.Background(), ids.GenerateTestID(), msgBytes) + // Did make a call to SendAppGossip +} diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index a2871faf0682..fe9fa66f7b4c 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -30,7 +30,6 @@ import ( "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/keystore" "github.com/ava-labs/avalanchego/vms/platformvm/fx" @@ -1186,7 +1185,7 @@ type AddValidatorArgs struct { // AddValidator creates and signs and issues a transaction to add a validator to // the primary network -func (s *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *api.JSONTxIDChangeAddr) error { +func (s *Service) AddValidator(req *http.Request, args *AddValidatorArgs, reply *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "addValidator"), @@ -1283,13 +1282,11 @@ func (s *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *a reply.TxID = tx.ID() reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - errs := wrappers.Errs{} - errs.Add( + return utils.Err( err, - s.vm.Builder.AddUnverifiedTx(tx), + s.vm.Network.IssueTx(req.Context(), tx), user.Close(), ) - return errs.Err } // AddDelegatorArgs are the arguments to AddDelegator @@ -1302,7 +1299,7 @@ type AddDelegatorArgs struct { // AddDelegator creates and signs and issues a transaction to add a delegator to // the primary network -func (s *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *api.JSONTxIDChangeAddr) error { +func (s *Service) AddDelegator(req *http.Request, args *AddDelegatorArgs, reply *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "addDelegator"), @@ -1395,13 +1392,11 @@ func (s *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *a reply.TxID = tx.ID() reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - errs := wrappers.Errs{} - errs.Add( + return utils.Err( err, - s.vm.Builder.AddUnverifiedTx(tx), + s.vm.Network.IssueTx(req.Context(), tx), user.Close(), ) - return errs.Err } // AddSubnetValidatorArgs are the arguments to AddSubnetValidator @@ -1415,7 +1410,7 @@ type AddSubnetValidatorArgs struct { // AddSubnetValidator creates and signs and issues a transaction to add a // validator to a subnet other than the primary network -func (s *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValidatorArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) AddSubnetValidator(req *http.Request, args *AddSubnetValidatorArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "addSubnetValidator"), @@ -1503,13 +1498,11 @@ func (s *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValidatorAr response.TxID = tx.ID() response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - errs := wrappers.Errs{} - errs.Add( + return utils.Err( err, - s.vm.Builder.AddUnverifiedTx(tx), + s.vm.Network.IssueTx(req.Context(), tx), user.Close(), ) - return errs.Err } // CreateSubnetArgs are the arguments to CreateSubnet @@ -1522,7 +1515,7 @@ type CreateSubnetArgs struct { // CreateSubnet creates and signs and issues a transaction to create a new // subnet -func (s *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) CreateSubnet(req *http.Request, args *CreateSubnetArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "createSubnet"), @@ -1581,13 +1574,11 @@ func (s *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response response.TxID = tx.ID() response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - errs := wrappers.Errs{} - errs.Add( + return utils.Err( err, - s.vm.Builder.AddUnverifiedTx(tx), + s.vm.Network.IssueTx(req.Context(), tx), user.Close(), ) - return errs.Err } // ExportAVAXArgs are the arguments to ExportAVAX @@ -1608,7 +1599,7 @@ type ExportAVAXArgs struct { // ExportAVAX exports AVAX from the P-Chain to the X-Chain // It must be imported on the X-Chain to complete the transfer -func (s *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) ExportAVAX(req *http.Request, args *ExportAVAXArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "exportAVAX"), @@ -1679,13 +1670,11 @@ func (s *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, response *ap response.TxID = tx.ID() response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - errs := wrappers.Errs{} - errs.Add( + return utils.Err( err, - s.vm.Builder.AddUnverifiedTx(tx), + s.vm.Network.IssueTx(req.Context(), tx), user.Close(), ) - return errs.Err } // ImportAVAXArgs are the arguments to ImportAVAX @@ -1702,7 +1691,7 @@ type ImportAVAXArgs struct { // ImportAVAX issues a transaction to import AVAX from the X-chain. The AVAX // must have already been exported from the X-Chain. -func (s *Service) ImportAVAX(_ *http.Request, args *ImportAVAXArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) ImportAVAX(req *http.Request, args *ImportAVAXArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "importAVAX"), @@ -1766,13 +1755,11 @@ func (s *Service) ImportAVAX(_ *http.Request, args *ImportAVAXArgs, response *ap response.TxID = tx.ID() response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - errs := wrappers.Errs{} - errs.Add( + return utils.Err( err, - s.vm.Builder.AddUnverifiedTx(tx), + s.vm.Network.IssueTx(req.Context(), tx), user.Close(), ) - return errs.Err } /* @@ -1800,7 +1787,7 @@ type CreateBlockchainArgs struct { } // CreateBlockchain issues a transaction to create a new blockchain -func (s *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) CreateBlockchain(req *http.Request, args *CreateBlockchainArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "createBlockchain"), @@ -1892,13 +1879,11 @@ func (s *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, response.TxID = tx.ID() response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - errs := wrappers.Errs{} - errs.Add( + return utils.Err( err, - s.vm.Builder.AddUnverifiedTx(tx), + s.vm.Network.IssueTx(req.Context(), tx), user.Close(), ) - return errs.Err } // GetBlockchainStatusArgs is the arguments for calling GetBlockchainStatus @@ -1958,11 +1943,8 @@ func (s *Service) GetBlockchainStatus(r *http.Request, args *GetBlockchainStatus return nil } - preferredBlk, err := s.vm.Preferred() - if err != nil { - return fmt.Errorf("could not retrieve preferred block, err %w", err) - } - preferred, err := s.chainExists(ctx, preferredBlk.ID(), blockchainID) + preferredBlkID := s.vm.manager.Preferred() + preferred, err := s.chainExists(ctx, preferredBlkID, blockchainID) if err != nil { return fmt.Errorf("problem looking up blockchain: %w", err) } @@ -2173,7 +2155,7 @@ func (s *Service) GetBlockchains(_ *http.Request, _ *struct{}, response *GetBloc return nil } -func (s *Service) IssueTx(_ *http.Request, args *api.FormattedTx, response *api.JSONTxID) error { +func (s *Service) IssueTx(req *http.Request, args *api.FormattedTx, response *api.JSONTxID) error { s.vm.ctx.Log.Debug("API called", zap.String("service", "platform"), zap.String("method", "issueTx"), @@ -2191,7 +2173,7 @@ func (s *Service) IssueTx(_ *http.Request, args *api.FormattedTx, response *api. s.vm.ctx.Lock.Lock() defer s.vm.ctx.Lock.Unlock() - if err := s.vm.Builder.AddUnverifiedTx(tx); err != nil { + if err := s.vm.Network.IssueTx(req.Context(), tx); err != nil { return fmt.Errorf("couldn't issue tx: %w", err) } @@ -2261,12 +2243,7 @@ func (s *Service) GetTxStatus(_ *http.Request, args *GetTxStatusArgs, response * // The status of this transaction is not in the database - check if the tx // is in the preferred block's db. If so, return that it's processing. - prefBlk, err := s.vm.Preferred() - if err != nil { - return err - } - - preferredID := prefBlk.ID() + preferredID := s.vm.manager.Preferred() onAccept, ok := s.vm.manager.GetState(preferredID) if !ok { return fmt.Errorf("could not retrieve state for block %s", preferredID) diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 9836ec0a9b0a..8e2cc3790fc3 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -23,7 +23,7 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -35,7 +35,6 @@ import ( "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/state" @@ -77,7 +76,7 @@ func defaultService(t *testing.T) (*Service, *mutableSharedMemory) { vm, _, mutableSharedMemory := defaultVM(t) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - ks := keystore.New(logging.NoLog{}, manager.NewMemDB(version.Semantic1_0_0)) + ks := keystore.New(logging.NoLog{}, memdb.New()) require.NoError(t, ks.CreateUser(testUsername, testPassword)) vm.ctx.Keystore = ks.NewBlockchainKeyStore(vm.ctx.ChainID) @@ -99,7 +98,7 @@ func defaultAddress(t *testing.T, service *Service) { user, err := vmkeystore.NewUserFromKeystore(service.vm.ctx.Keystore, testUsername, testPassword) require.NoError(err) - pk, err := testKeyFactory.ToPrivateKey(testPrivateKey) + pk, err := secp256k1.ToPrivateKey(testPrivateKey) require.NoError(err) require.NoError(user.PutKeys(pk, keys[0])) @@ -176,11 +175,10 @@ func TestGetTxStatus(t *testing.T) { service.vm.ctx.Lock.Unlock() }() - factory := secp256k1.Factory{} - recipientKey, err := factory.NewPrivateKey() + recipientKey, err := secp256k1.NewPrivateKey() require.NoError(err) - m := atomic.NewMemory(prefixdb.New([]byte{}, service.vm.dbManager.Current().Database)) + m := atomic.NewMemory(prefixdb.New([]byte{}, service.vm.db)) sm := m.NewSharedMemory(service.vm.ctx.ChainID) peerSharedMemory := m.NewSharedMemory(xChainID) @@ -240,12 +238,12 @@ func TestGetTxStatus(t *testing.T) { service.vm.ctx.Lock.Lock() // put the chain in existing chain list - err = service.vm.Builder.AddUnverifiedTx(tx) + err = service.vm.Network.IssueTx(context.Background(), tx) require.ErrorIs(err, database.ErrNotFound) // Missing shared memory UTXO mutableSharedMemory.SharedMemory = sm - require.NoError(service.vm.Builder.AddUnverifiedTx(tx)) + require.NoError(service.vm.Network.IssueTx(context.Background(), tx)) block, err := service.vm.BuildBlock(context.Background()) require.NoError(err) @@ -341,7 +339,7 @@ func TestGetTx(t *testing.T) { service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Builder.AddUnverifiedTx(tx)) + require.NoError(service.vm.Network.IssueTx(context.Background(), tx)) blk, err := service.vm.BuildBlock(context.Background()) require.NoError(err) @@ -496,7 +494,7 @@ func TestGetStake(t *testing.T) { // Add a delegator stakeAmount := service.vm.MinDelegatorStake + 12345 - delegatorNodeID := ids.NodeID(keys[0].PublicKey().Address()) + delegatorNodeID := genesisNodeIDs[0] delegatorEndTime := uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix()) tx, err := service.vm.txBuilder.NewAddDelegatorTx( stakeAmount, @@ -628,7 +626,7 @@ func TestGetCurrentValidators(t *testing.T) { // Add a delegator stakeAmount := service.vm.MinDelegatorStake + 12345 - validatorNodeID := ids.NodeID(keys[1].PublicKey().Address()) + validatorNodeID := genesisNodeIDs[1] delegatorStartTime := uint64(defaultValidateStartTime.Unix()) delegatorEndTime := uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix()) @@ -786,7 +784,8 @@ func TestGetBlock(t *testing.T) { ) require.NoError(err) - preferred, err := service.vm.Builder.Preferred() + preferredID := service.vm.manager.Preferred() + preferred, err := service.vm.manager.GetBlock(preferredID) require.NoError(err) statelessBlock, err := block.NewBanffStandardBlock( diff --git a/vms/platformvm/state/disk_staker_diff_iterator.go b/vms/platformvm/state/disk_staker_diff_iterator.go index 44ee1ed87180..efac5ec7b6d7 100644 --- a/vms/platformvm/state/disk_staker_diff_iterator.go +++ b/vms/platformvm/state/disk_staker_diff_iterator.go @@ -43,7 +43,7 @@ func marshalDiffKey(subnetID ids.ID, height uint64, nodeID ids.NodeID) []byte { key := make([]byte, diffKeyLength) copy(key, subnetID[:]) packIterableHeight(key[ids.IDLen:], height) - copy(key[diffKeyNodeIDOffset:], nodeID[:]) + copy(key[diffKeyNodeIDOffset:], nodeID.Bytes()) return key } diff --git a/vms/platformvm/state/disk_staker_diff_iterator_test.go b/vms/platformvm/state/disk_staker_diff_iterator_test.go index 9439428937b5..543f42a4b9c3 100644 --- a/vms/platformvm/state/disk_staker_diff_iterator_test.go +++ b/vms/platformvm/state/disk_staker_diff_iterator_test.go @@ -58,8 +58,8 @@ func TestDiffIteration(t *testing.T) { subnetID0 := ids.GenerateTestID() subnetID1 := ids.GenerateTestID() - nodeID0 := ids.NodeID{0x00} - nodeID1 := ids.NodeID{0x01} + nodeID0 := ids.BuildTestNodeID([]byte{0x00}) + nodeID1 := ids.BuildTestNodeID([]byte{0x01}) subnetID0Height0NodeID0 := marshalDiffKey(subnetID0, 0, nodeID0) subnetID0Height1NodeID0 := marshalDiffKey(subnetID0, 1, nodeID0) diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 1e0265798d33..41ce946a12e0 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -144,20 +144,6 @@ func (mr *MockStateMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockState)(nil).AddUTXO), arg0) } -// ApplyCurrentValidators mocks base method. -func (m *MockState) ApplyCurrentValidators(arg0 ids.ID, arg1 validators.Manager) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ApplyCurrentValidators", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ApplyCurrentValidators indicates an expected call of ApplyCurrentValidators. -func (mr *MockStateMockRecorder) ApplyCurrentValidators(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyCurrentValidators", reflect.TypeOf((*MockState)(nil).ApplyCurrentValidators), arg0, arg1) -} - // ApplyValidatorPublicKeyDiffs mocks base method. func (m *MockState) ApplyValidatorPublicKeyDiffs(arg0 context.Context, arg1 map[ids.NodeID]*validators.GetValidatorOutput, arg2, arg3 uint64) error { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 23a9412f89d8..fd842f684eae 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -140,10 +140,6 @@ type State interface { GetBlockIDAtHeight(height uint64) (ids.ID, error) - // ApplyCurrentValidators adds all the current validators and delegators of - // [subnetID] into [vdrs]. - ApplyCurrentValidators(subnetID ids.ID, vdrs validators.Manager) error - // ApplyValidatorWeightDiffs iterates from [startHeight] towards the genesis // block until it has applied all of the diffs up to and including // [endHeight]. Applying the diffs modifies [validators]. @@ -290,11 +286,10 @@ type stateBlk struct { type state struct { validatorState - cfg *config.Config - ctx *snow.Context - metrics metrics.Metrics - rewards reward.Calculator - bootstrapped *utils.Atomic[bool] + cfg *config.Config + ctx *snow.Context + metrics metrics.Metrics + rewards reward.Calculator baseDB *versiondb.Database @@ -461,7 +456,6 @@ func New( ctx *snow.Context, metrics metrics.Metrics, rewards reward.Calculator, - bootstrapped *utils.Atomic[bool], ) (State, error) { s, err := newState( db, @@ -471,7 +465,6 @@ func New( ctx, metricsReg, rewards, - bootstrapped, ) if err != nil { return nil, err @@ -516,7 +509,6 @@ func newState( ctx *snow.Context, metricsReg prometheus.Registerer, rewards reward.Calculator, - bootstrapped *utils.Atomic[bool], ) (*state, error) { blockIDCache, err := metercacher.New[uint64, ids.ID]( "block_id_cache", @@ -635,12 +627,11 @@ func newState( return &state{ validatorState: newValidatorState(), - cfg: cfg, - ctx: ctx, - metrics: metrics, - rewards: rewards, - bootstrapped: bootstrapped, - baseDB: baseDB, + cfg: cfg, + ctx: ctx, + metrics: metrics, + rewards: rewards, + baseDB: baseDB, addedBlockIDs: make(map[uint64]ids.ID), blockIDCache: blockIDCache, @@ -1139,26 +1130,6 @@ func (s *state) SetCurrentSupply(subnetID ids.ID, cs uint64) { } } -func (s *state) ApplyCurrentValidators(subnetID ids.ID, vdrs validators.Manager) error { - for nodeID, validator := range s.currentStakers.validators[subnetID] { - staker := validator.validator - if err := vdrs.AddStaker(subnetID, nodeID, staker.PublicKey, staker.TxID, staker.Weight); err != nil { - return err - } - - delegatorIterator := NewTreeIterator(validator.delegators) - for delegatorIterator.Next() { - staker := delegatorIterator.Value() - if err := vdrs.AddWeight(subnetID, nodeID, staker.Weight); err != nil { - delegatorIterator.Release() - return err - } - } - delegatorIterator.Release() - } - return nil -} - func (s *state) ApplyValidatorWeightDiffs( ctx context.Context, validators map[ids.NodeID]*validators.GetValidatorOutput, @@ -1404,14 +1375,12 @@ func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) er // Load pulls data previously stored on disk that is expected to be in memory. func (s *state) load() error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( s.loadMetadata(), s.loadCurrentValidators(), s.loadPendingValidators(), s.initValidatorSets(), ) - return errs.Err } func (s *state) loadMetadata() error { @@ -1596,14 +1565,12 @@ func (s *state) loadCurrentValidators() error { } } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( validatorIt.Error(), subnetValidatorIt.Error(), delegatorIt.Error(), subnetDelegatorIt.Error(), ) - return errs.Err } func (s *state) loadPendingValidators() error { @@ -1682,30 +1649,40 @@ func (s *state) loadPendingValidators() error { } } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( validatorIt.Error(), subnetValidatorIt.Error(), delegatorIt.Error(), subnetDelegatorIt.Error(), ) - return errs.Err } // Invariant: initValidatorSets requires loadCurrentValidators to have already // been called. func (s *state) initValidatorSets() error { - if s.cfg.Validators.Count(constants.PrimaryNetworkID) != 0 { - // Enforce the invariant that the validator set is empty here. - return errValidatorSetAlreadyPopulated - } - err := s.ApplyCurrentValidators(constants.PrimaryNetworkID, s.cfg.Validators) - if err != nil { - return err - } + for subnetID, validators := range s.currentStakers.validators { + if s.cfg.Validators.Count(subnetID) != 0 { + // Enforce the invariant that the validator set is empty here. + return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) + } - vl := validators.NewLogger(s.ctx.Log, s.bootstrapped, constants.PrimaryNetworkID, s.ctx.NodeID) - s.cfg.Validators.RegisterCallbackListener(constants.PrimaryNetworkID, vl) + for nodeID, validator := range validators { + validatorStaker := validator.validator + if err := s.cfg.Validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { + return err + } + + delegatorIterator := NewTreeIterator(validator.delegators) + for delegatorIterator.Next() { + delegatorStaker := delegatorIterator.Value() + if err := s.cfg.Validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { + delegatorIterator.Release() + return err + } + } + delegatorIterator.Release() + } + } s.metrics.SetLocalStake(s.cfg.Validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) totalWeight, err := s.cfg.Validators.TotalWeight(constants.PrimaryNetworkID) @@ -1713,26 +1690,11 @@ func (s *state) initValidatorSets() error { return fmt.Errorf("failed to get total weight of primary network validators: %w", err) } s.metrics.SetTotalStake(totalWeight) - - for subnetID := range s.cfg.TrackedSubnets { - if s.cfg.Validators.Count(subnetID) != 0 { - // Enforce the invariant that the validator set is empty here. - return errValidatorSetAlreadyPopulated - } - err := s.ApplyCurrentValidators(subnetID, s.cfg.Validators) - if err != nil { - return err - } - - vl := validators.NewLogger(s.ctx.Log, s.bootstrapped, subnetID, s.ctx.NodeID) - s.cfg.Validators.RegisterCallbackListener(subnetID, vl) - } return nil } func (s *state) write(updateValidators bool, height uint64) error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( s.writeBlocks(), s.writeCurrentStakers(updateValidators, height), s.writePendingStakers(), @@ -1747,12 +1709,10 @@ func (s *state) write(updateValidators bool, height uint64) error { s.writeChains(), s.writeMetadata(), ) - return errs.Err } func (s *state) Close() error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( s.pendingSubnetValidatorBaseDB.Close(), s.pendingSubnetDelegatorBaseDB.Close(), s.pendingDelegatorBaseDB.Close(), @@ -1775,7 +1735,6 @@ func (s *state) Close() error { s.blockDB.Close(), s.blockIDDB.Close(), ) - return errs.Err } func (s *state) sync(genesis []byte) error { @@ -2071,7 +2030,7 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error // // Note: We store the compressed public key here. pkBytes := bls.PublicKeyToBytes(staker.PublicKey) - if err := nestedPKDiffDB.Put(nodeID[:], pkBytes); err != nil { + if err := nestedPKDiffDB.Put(nodeID.Bytes(), pkBytes); err != nil { return err } } @@ -2110,7 +2069,7 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error if err != nil { return fmt.Errorf("failed to serialize validator weight diff: %w", err) } - if err := nestedWeightDiffDB.Put(nodeID[:], weightDiffBytes); err != nil { + if err := nestedWeightDiffDB.Put(nodeID.Bytes(), weightDiffBytes); err != nil { return err } @@ -2119,11 +2078,6 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error continue } - // We only track the current validator set of tracked subnets. - if subnetID != constants.PrimaryNetworkID && !s.cfg.TrackedSubnets.Contains(subnetID) { - continue - } - if weightDiff.Decrease { err = s.cfg.Validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) } else { @@ -2537,14 +2491,13 @@ func (s *state) PruneAndIndex(lock sync.Locker, log logging.Logger) error { // attempt to commit to disk while a block is concurrently being // accepted. lock.Lock() - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( s.Commit(), blockIterator.Error(), ) lock.Unlock() - if errs.Errored() { - return errs.Err + if err != nil { + return err } // We release the iterator here to allow the underlying database to diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 5a29619c1beb..ae79415f4bbf 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -21,7 +21,6 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/units" @@ -178,7 +177,6 @@ func newStateFromDB(require *require.Assertions, db database.Database) State { MintingPeriod: 365 * 24 * time.Hour, SupplyCap: 720 * units.MegaAvax, }), - &utils.Atomic[bool]{}, ) require.NoError(err) require.NotNil(state) diff --git a/vms/platformvm/txs/add_permissionless_delegator_tx_test.go b/vms/platformvm/txs/add_permissionless_delegator_tx_test.go index 821a3b7da849..c70bf720bfe3 100644 --- a/vms/platformvm/txs/add_permissionless_delegator_tx_test.go +++ b/vms/platformvm/txs/add_permissionless_delegator_tx_test.go @@ -54,11 +54,11 @@ func TestAddPermissionlessPrimaryDelegatorSerialization(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) simpleAddPrimaryTx := &AddPermissionlessDelegatorTx{ BaseTx: BaseTx{ @@ -768,11 +768,11 @@ func TestAddPermissionlessSubnetDelegatorSerialization(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) subnetID := ids.ID{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, diff --git a/vms/platformvm/txs/add_permissionless_validator_tx_test.go b/vms/platformvm/txs/add_permissionless_validator_tx_test.go index 79b1a64abd00..80e4d3b6ae93 100644 --- a/vms/platformvm/txs/add_permissionless_validator_tx_test.go +++ b/vms/platformvm/txs/add_permissionless_validator_tx_test.go @@ -60,11 +60,11 @@ func TestAddPermissionlessPrimaryValidator(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) simpleAddPrimaryTx := &AddPermissionlessValidatorTx{ BaseTx: BaseTx{ @@ -725,11 +725,11 @@ func TestAddPermissionlessSubnetValidator(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) subnetID := ids.ID{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, diff --git a/vms/platformvm/txs/base_tx.go b/vms/platformvm/txs/base_tx.go index 2aa95f56cc68..5ffb308fe425 100644 --- a/vms/platformvm/txs/base_tx.go +++ b/vms/platformvm/txs/base_tx.go @@ -16,6 +16,8 @@ import ( ) var ( + _ UnsignedTx = (*BaseTx)(nil) + ErrNilTx = errors.New("tx is nil") errOutputsNotSorted = errors.New("outputs not sorted") @@ -96,3 +98,7 @@ func (tx *BaseTx) SyntacticVerify(ctx *snow.Context) error { return nil } } + +func (tx *BaseTx) Visit(visitor Visitor) error { + return visitor.BaseTx(tx) +} diff --git a/vms/platformvm/txs/base_tx_test.go b/vms/platformvm/txs/base_tx_test.go index 073b27f25056..c6cba1570312 100644 --- a/vms/platformvm/txs/base_tx_test.go +++ b/vms/platformvm/txs/base_tx_test.go @@ -10,65 +10,443 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/types" ) -func TestBaseTxMarshalJSON(t *testing.T) { +func TestBaseTxSerialization(t *testing.T) { require := require.New(t) - blockchainID := ids.ID{1} - utxoTxID := ids.ID{2} - assetID := ids.ID{3} - fxID := ids.ID{4} - tx := &BaseTx{BaseTx: avax.BaseTx{ - BlockchainID: blockchainID, - NetworkID: 4, - Ins: []*avax.TransferableInput{ - { - FxID: fxID, - UTXOID: avax.UTXOID{TxID: utxoTxID, OutputIndex: 5}, - Asset: avax.Asset{ID: assetID}, - In: &avax.TestTransferable{Val: 100}, + addr := ids.ShortID{ + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + } + + avaxAssetID, err := ids.FromString("FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z") + require.NoError(err) + + customAssetID := ids.ID{ + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + } + + txID := ids.ID{ + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + } + + simpleBaseTx := &BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.MilliAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{5}, + }, + }, + }, }, + Memo: types.JSONByteSlice{}, }, - Outs: []*avax.TransferableOutput{ - { - FxID: fxID, - Asset: avax.Asset{ID: assetID}, - Out: &avax.TestTransferable{Val: 100}, + } + require.NoError(simpleBaseTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedSimpleBaseTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // BaseTx Type ID + 0x00, 0x00, 0x00, 0x22, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of outputs + 0x00, 0x00, 0x00, 0x00, + // Number of inputs + 0x00, 0x00, 0x00, 0x01, + // Inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX assetID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount = 1 MilliAvax + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0x40, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x01, + // index of signer + 0x00, 0x00, 0x00, 0x05, + // length of memo + 0x00, 0x00, 0x00, 0x00, + } + var unsignedSimpleBaseTx UnsignedTx = simpleBaseTx + unsignedSimpleBaseTxBytes, err := Codec.Marshal(Version, &unsignedSimpleBaseTx) + require.NoError(err) + require.Equal(expectedUnsignedSimpleBaseTxBytes, unsignedSimpleBaseTxBytes) + + complexBaseTx := &BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 87654321, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 12345678, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 876543210, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 0xffffffffffffffff, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + }, + }, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.Avax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2, 5}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &stakeable.LockIn{ + Locktime: 876543210, + TransferableIn: &secp256k1fx.TransferInput{ + Amt: 0xefffffffffffffff, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 3, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 0x1000000000000000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{}, + }, + }, + }, }, + Memo: types.JSONByteSlice("😅\nwell that's\x01\x23\x45!"), }, - Memo: []byte{1, 2, 3}, - }} + } + avax.SortTransferableOutputs(complexBaseTx.Outs, Codec) + utils.Sort(complexBaseTx.Ins) + require.NoError(complexBaseTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) - txJSONBytes, err := json.MarshalIndent(tx, "", "\t") + expectedUnsignedComplexBaseTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // BaseTx Type ID + 0x00, 0x00, 0x00, 0x22, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of outputs + 0x00, 0x00, 0x00, 0x02, + // Outputs[0] + // Mainnet AVAX assetID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // Stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x7f, 0xb1, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // secp256k1fx output locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x61, 0x4e, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // Outputs[1] + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // Stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // secp256k1fx output locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // address[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // number of inputs + 0x00, 0x00, 0x00, 0x03, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX assetID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount = 1 Avax + 0x00, 0x00, 0x00, 0x00, 0x3b, 0x9a, 0xca, 0x00, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x02, + // index of first signer + 0x00, 0x00, 0x00, 0x02, + // index of second signer + 0x00, 0x00, 0x00, 0x05, + // inputs[1] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x02, + // Custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // Stakeable locked input type ID + 0x00, 0x00, 0x00, 0x15, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount + 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x01, + // index of signer + 0x00, 0x00, 0x00, 0x00, + // inputs[2] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x03, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x00, + // length of memo + 0x00, 0x00, 0x00, 0x14, + // memo + 0xf0, 0x9f, 0x98, 0x85, 0x0a, 0x77, 0x65, 0x6c, + 0x6c, 0x20, 0x74, 0x68, 0x61, 0x74, 0x27, 0x73, + 0x01, 0x23, 0x45, 0x21, + } + var unsignedComplexBaseTx UnsignedTx = complexBaseTx + unsignedComplexBaseTxBytes, err := Codec.Marshal(Version, &unsignedComplexBaseTx) require.NoError(err) + require.Equal(expectedUnsignedComplexBaseTxBytes, unsignedComplexBaseTxBytes) + aliaser := ids.NewAliaser() + require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) + + unsignedComplexBaseTx.InitCtx(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + BCLookup: aliaser, + }) + + unsignedComplexBaseTxJSONBytes, err := json.MarshalIndent(unsignedComplexBaseTx, "", "\t") + require.NoError(err) require.Equal(`{ - "networkID": 4, - "blockchainID": "SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg", + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", "outputs": [ { - "assetID": "2KdbbWvpeAShCx5hGbtdF15FMMepq9kajsNTqVvvEbhiCRSxU", - "fxID": "2mB8TguRrYvbGw7G2UBqKfmL8osS7CfmzAAHSzuZK8bwpRKdY", + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "output": { - "Err": null, - "Val": 100 + "locktime": 87654321, + "output": { + "addresses": [], + "amount": 1, + "locktime": 12345678, + "threshold": 0 + } + } + }, + { + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 876543210, + "output": { + "addresses": [ + "P-avax1g32kvaugnx4tk3z4vemc3xd2hdz92enh972wxr" + ], + "amount": 18446744073709551615, + "locktime": 0, + "threshold": 1 + } } } ], "inputs": [ { - "txID": "t64jLxDRmxo8y48WjbRALPAZuSDZ6qPVaaeDzxHA4oSojhLt", - "outputIndex": 5, - "assetID": "2KdbbWvpeAShCx5hGbtdF15FMMepq9kajsNTqVvvEbhiCRSxU", - "fxID": "2mB8TguRrYvbGw7G2UBqKfmL8osS7CfmzAAHSzuZK8bwpRKdY", + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 1, + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 1000000000, + "signatureIndices": [ + 2, + 5 + ] + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 2, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "locktime": 876543210, + "input": { + "amount": 17293822569102704639, + "signatureIndices": [ + 0 + ] + } + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 3, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "input": { - "Err": null, - "Val": 100 + "amount": 1152921504606846976, + "signatureIndices": [] } } ], - "memo": "0x010203" -}`, string(txJSONBytes)) + "memo": "0xf09f98850a77656c6c2074686174277301234521" +}`, string(unsignedComplexBaseTxJSONBytes)) } diff --git a/vms/platformvm/txs/builder/builder.go b/vms/platformvm/txs/builder/builder.go index 3f13ec2ecad9..6c796d085abb 100644 --- a/vms/platformvm/txs/builder/builder.go +++ b/vms/platformvm/txs/builder/builder.go @@ -92,6 +92,17 @@ type DecisionTxBuilder interface { keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) + + // amount: amount the sender is sending + // owner: recipient of the funds + // keys: keys to sign the tx and pay the amount + // changeAddr: address to send change to, if there is any + NewBaseTx( + amount uint64, + owner secp256k1fx.OutputOwners, + keys []*secp256k1.PrivateKey, + changeAddr ids.ShortID, + ) (*txs.Tx, error) } type ProposalTxBuilder interface { @@ -661,3 +672,43 @@ func (b *builder) NewTransferSubnetOwnershipTx( } return tx, tx.SyntacticVerify(b.ctx) } + +func (b *builder) NewBaseTx( + amount uint64, + owner secp256k1fx.OutputOwners, + keys []*secp256k1.PrivateKey, + changeAddr ids.ShortID, +) (*txs.Tx, error) { + toBurn, err := math.Add64(amount, b.cfg.TxFee) + if err != nil { + return nil, fmt.Errorf("amount (%d) + tx fee(%d) overflows", amount, b.cfg.TxFee) + } + ins, outs, _, signers, err := b.Spend(b.state, keys, 0, toBurn, changeAddr) + if err != nil { + return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) + } + + outs = append(outs, &avax.TransferableOutput{ + Asset: avax.Asset{ID: b.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amount, + OutputOwners: owner, + }, + }) + + avax.SortTransferableOutputs(outs, txs.Codec) + + utx := &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: b.ctx.NetworkID, + BlockchainID: b.ctx.ChainID, + Ins: ins, + Outs: outs, + }, + } + tx, err := txs.NewSigned(utx, txs.Codec, signers) + if err != nil { + return nil, err + } + return tx, tx.SyntacticVerify(b.ctx) +} diff --git a/vms/platformvm/txs/builder/mock_builder.go b/vms/platformvm/txs/builder/mock_builder.go index 79291afb7cd7..19f74a7bed2f 100644 --- a/vms/platformvm/txs/builder/mock_builder.go +++ b/vms/platformvm/txs/builder/mock_builder.go @@ -14,6 +14,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" secp256k1 "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" + secp256k1fx "github.com/ava-labs/avalanchego/vms/secp256k1fx" gomock "go.uber.org/mock/gomock" ) @@ -100,6 +101,21 @@ func (mr *MockBuilderMockRecorder) NewAdvanceTimeTx(arg0 interface{}) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAdvanceTimeTx", reflect.TypeOf((*MockBuilder)(nil).NewAdvanceTimeTx), arg0) } +// NewBaseTx mocks base method. +func (m *MockBuilder) NewBaseTx(arg0 uint64, arg1 secp256k1fx.OutputOwners, arg2 []*secp256k1.PrivateKey, arg3 ids.ShortID) (*txs.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewBaseTx", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*txs.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewBaseTx indicates an expected call of NewBaseTx. +func (mr *MockBuilderMockRecorder) NewBaseTx(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBaseTx", reflect.TypeOf((*MockBuilder)(nil).NewBaseTx), arg0, arg1, arg2, arg3) +} + // NewCreateChainTx mocks base method. func (m *MockBuilder) NewCreateChainTx(arg0 ids.ID, arg1 []byte, arg2 ids.ID, arg3 []ids.ID, arg4 string, arg5 []*secp256k1.PrivateKey, arg6 ids.ShortID) (*txs.Tx, error) { m.ctrl.T.Helper() diff --git a/vms/platformvm/txs/codec.go b/vms/platformvm/txs/codec.go index 1d4eac1f700e..d743376d1acb 100644 --- a/vms/platformvm/txs/codec.go +++ b/vms/platformvm/txs/codec.go @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" @@ -103,5 +104,8 @@ func RegisterUnsignedTxsTypes(targetCodec linearcodec.Codec) error { } func RegisterDUnsignedTxsTypes(targetCodec linearcodec.Codec) error { - return targetCodec.RegisterType(&TransferSubnetOwnershipTx{}) + return utils.Err( + targetCodec.RegisterType(&TransferSubnetOwnershipTx{}), + targetCodec.RegisterType(&BaseTx{}), + ) } diff --git a/vms/platformvm/txs/executor/advance_time_test.go b/vms/platformvm/txs/executor/advance_time_test.go index 9bf5aafed7ac..694d6b7ff7fa 100644 --- a/vms/platformvm/txs/executor/advance_time_test.go +++ b/vms/platformvm/txs/executor/advance_time_test.go @@ -462,8 +462,7 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { dummyHeight := uint64(1) // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - // Starts after the corre + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -492,7 +491,7 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { // The above validator is now part of the staking set // Queue a staker that joins the staker set after the above validator leaves - subnetVdr2NodeID := ids.NodeID(preFundedKeys[1].PublicKey().Address()) + subnetVdr2NodeID := genesisNodeIDs[1] tx, err = env.txBuilder.NewAddSubnetValidatorTx( 1, // Weight uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time @@ -567,7 +566,7 @@ func TestTrackedSubnet(t *testing.T) { } // Add a subnet validator to the staker set - subnetValidatorNodeID := preFundedKeys[0].PublicKey().Address() + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultGenesisTime.Add(1 * time.Minute) subnetVdr1EndTime := defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) @@ -575,7 +574,7 @@ func TestTrackedSubnet(t *testing.T) { 1, // Weight uint64(subnetVdr1StartTime.Unix()), // Start time uint64(subnetVdr1EndTime.Unix()), // end time - ids.NodeID(subnetValidatorNodeID), // Node ID + subnetValidatorNodeID, // Node ID subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, @@ -616,8 +615,8 @@ func TestTrackedSubnet(t *testing.T) { env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) - _, ok := env.config.Validators.GetValidator(subnetID, ids.NodeID(subnetValidatorNodeID)) - require.Equal(tracked, ok) + _, ok := env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) + require.True(ok) }) } } @@ -923,7 +922,7 @@ func addPendingValidator( uint64(startTime.Unix()), uint64(endTime.Unix()), nodeID, - ids.ShortID(nodeID), + ids.GenerateTestShortID(), reward.PercentDenominator, keys, ids.ShortEmpty, diff --git a/vms/platformvm/txs/executor/atomic_tx_executor.go b/vms/platformvm/txs/executor/atomic_tx_executor.go index 09d374b3a395..3b7dc60ec173 100644 --- a/vms/platformvm/txs/executor/atomic_tx_executor.go +++ b/vms/platformvm/txs/executor/atomic_tx_executor.go @@ -76,6 +76,10 @@ func (*AtomicTxExecutor) AddPermissionlessDelegatorTx(*txs.AddPermissionlessDele return ErrWrongTxType } +func (*AtomicTxExecutor) BaseTx(*txs.BaseTx) error { + return ErrWrongTxType +} + func (e *AtomicTxExecutor) ImportTx(tx *txs.ImportTx) error { return e.atomicTx(tx) } diff --git a/vms/platformvm/txs/executor/create_chain_test.go b/vms/platformvm/txs/executor/create_chain_test.go index a8debf3b58bc..72315d3c4dd5 100644 --- a/vms/platformvm/txs/executor/create_chain_test.go +++ b/vms/platformvm/txs/executor/create_chain_test.go @@ -78,8 +78,7 @@ func TestCreateChainTxWrongControlSig(t *testing.T) { require.NoError(err) // Generate new, random key to sign tx with - factory := secp256k1.Factory{} - key, err := factory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() require.NoError(err) // Replace a valid signature with one from another key diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index 74a5bb40764f..c26a865bdc6f 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -20,7 +20,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" @@ -36,8 +36,6 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/config" @@ -75,12 +73,19 @@ var ( testSubnet1 *txs.Tx testSubnet1ControlKeys = preFundedKeys[0:3] - // Used to create and use keys. - testKeyfactory secp256k1.Factory + // Node IDs of genesis validators. Initialized in init function + genesisNodeIDs []ids.NodeID errMissing = errors.New("missing") ) +func init() { + genesisNodeIDs = make([]ids.NodeID, len(preFundedKeys)) + for i := range preFundedKeys { + genesisNodeIDs[i] = ids.GenerateTestNodeID() + } +} + type mutableSharedMemory struct { atomic.SharedMemory } @@ -121,8 +126,7 @@ func newEnvironment(t *testing.T, postBanff, postCortina bool) *environment { config := defaultConfig(postBanff, postCortina) clk := defaultClock(postBanff || postCortina) - baseDBManager := manager.NewMemDB(version.CurrentDatabase) - baseDB := versiondb.New(baseDBManager.Current().Database) + baseDB := versiondb.New(memdb.New()) ctx, msm := defaultCtx(baseDB) fx := defaultFx(clk, ctx.Log, isBootstrapped.Get()) @@ -230,7 +234,6 @@ func defaultState( ctx, metrics.Noop, rewards, - &utils.Atomic[bool]{}, ) if err != nil { panic(err) @@ -373,15 +376,14 @@ func buildGenesisTest(ctx *snow.Context) []byte { } } - genesisValidators := make([]api.PermissionlessValidator, len(preFundedKeys)) - for i, key := range preFundedKeys { - nodeID := ids.NodeID(key.PublicKey().Address()) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(genesisNodeIDs)) + for i, nodeID := range genesisNodeIDs { addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) if err != nil { panic(err) } - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), NodeID: nodeID, @@ -444,10 +446,8 @@ func shutdownEnvironment(env *environment) error { } } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( env.state.Close(), env.baseDB.Close(), ) - return errs.Err } diff --git a/vms/platformvm/txs/executor/import_test.go b/vms/platformvm/txs/executor/import_test.go index 9cbe0a517ce9..3d78429cf906 100644 --- a/vms/platformvm/txs/executor/import_test.go +++ b/vms/platformvm/txs/executor/import_test.go @@ -36,8 +36,7 @@ func TestNewImportTx(t *testing.T) { expectedErr error } - factory := secp256k1.Factory{} - sourceKey, err := factory.NewPrivateKey() + sourceKey, err := secp256k1.NewPrivateKey() require.NoError(t, err) cnt := new(byte) diff --git a/vms/platformvm/txs/executor/proposal_tx_executor.go b/vms/platformvm/txs/executor/proposal_tx_executor.go index dd66815b9c8f..bd329b3f2576 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor.go @@ -101,6 +101,10 @@ func (*ProposalTxExecutor) TransferSubnetOwnershipTx(*txs.TransferSubnetOwnershi return ErrWrongTxType } +func (*ProposalTxExecutor) BaseTx(*txs.BaseTx) error { + return ErrWrongTxType +} + func (e *ProposalTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { // AddValidatorTx is a proposal transaction until the Banff fork // activation. Following the activation, AddValidatorTxs must be issued into diff --git a/vms/platformvm/txs/executor/proposal_tx_executor_test.go b/vms/platformvm/txs/executor/proposal_tx_executor_test.go index 4bebbb4105d9..bc95f3ed39b2 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor_test.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor_test.go @@ -25,7 +25,7 @@ import ( func TestProposalTxExecuteAddDelegator(t *testing.T) { dummyHeight := uint64(1) rewardAddress := preFundedKeys[0].PublicKey().Address() - nodeID := ids.NodeID(rewardAddress) + nodeID := genesisNodeIDs[0] newValidatorID := ids.GenerateTestNodeID() newValidatorStartTime := uint64(defaultValidateStartTime.Add(5 * time.Second).Unix()) @@ -288,8 +288,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { require.NoError(shutdownEnvironment(env)) }() - nodeID := preFundedKeys[0].PublicKey().Address() - + nodeID := genesisNodeIDs[0] { // Case: Proposed validator currently validating primary network // but stops validating subnet after stops validating primary network @@ -298,7 +297,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, uint64(defaultValidateStartTime.Unix())+1, uint64(defaultValidateEndTime.Unix())+1, - ids.NodeID(nodeID), + nodeID, testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr @@ -330,7 +329,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, uint64(defaultValidateStartTime.Unix())+1, uint64(defaultValidateEndTime.Unix()), - ids.NodeID(nodeID), + nodeID, testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr @@ -353,11 +352,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { } // Add a validator to pending validator set of primary network - key, err := testKeyfactory.NewPrivateKey() - require.NoError(err) - pendingDSValidatorID := ids.NodeID(key.PublicKey().Address()) - - // starts validating primary network 10 seconds after genesis + // Starts validating primary network 10 seconds after genesis + pendingDSValidatorID := ids.GenerateTestNodeID() dsStartTime := defaultGenesisTime.Add(10 * time.Second) dsEndTime := dsStartTime.Add(5 * defaultMinStakingDuration) @@ -366,7 +362,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { uint64(dsStartTime.Unix()), // start time uint64(dsEndTime.Unix()), // end time pendingDSValidatorID, // node ID - nodeID, // reward address + ids.GenerateTestShortID(), // reward address reward.PercentDenominator, // shares []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, @@ -516,8 +512,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) @@ -548,7 +544,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time - ids.NodeID(nodeID), // node ID + nodeID, // node ID testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, @@ -573,7 +569,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(defaultValidateStartTime.Unix())+1, // start time uint64(defaultValidateEndTime.Unix()), // end time - ids.NodeID(nodeID), // node ID + nodeID, // node ID testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr @@ -606,8 +602,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(defaultGenesisTime.Unix())+1, // start time uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr ) @@ -642,8 +638,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(defaultGenesisTime.Unix())+1, // start time uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], preFundedKeys[1]}, ids.ShortEmpty, // change addr ) @@ -677,8 +673,8 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(defaultGenesisTime.Unix())+1, // start time uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) @@ -784,12 +780,14 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { } { + nodeID := genesisNodeIDs[0] + // Case: Validator already validating primary network tx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, uint64(defaultValidateStartTime.Unix())+1, uint64(defaultValidateEndTime.Unix()), - ids.NodeID(preFundedKeys[0].Address()), + nodeID, ids.ShortEmpty, reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0]}, diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index 2aa9e9c4a400..22bab59afd3b 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -514,3 +514,34 @@ func (e *StandardTxExecutor) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwn return nil } + +func (e *StandardTxExecutor) BaseTx(tx *txs.BaseTx) error { + if !e.Backend.Config.IsDActivated(e.State.GetTimestamp()) { + return ErrDUpgradeNotActive + } + + // Verify the tx is well-formed + if err := e.Tx.SyntacticVerify(e.Ctx); err != nil { + return err + } + + // Verify the flowcheck + if err := e.FlowChecker.VerifySpend( + tx, + e.State, + tx.Ins, + tx.Outs, + e.Tx.Creds, + map[ids.ID]uint64{ + e.Ctx.AVAXAssetID: e.Config.TxFee, + }, + ); err != nil { + return err + } + + // Consume the UTXOS + avax.Consume(e.State, tx.Ins) + // Produce the UTXOS + avax.Produce(e.State, e.Tx.ID(), tx.Outs) + return nil +} diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index 5a9aaf73ed1d..78e15078e133 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -99,7 +99,7 @@ func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { func TestStandardTxExecutorAddDelegator(t *testing.T) { dummyHeight := uint64(1) rewardAddress := preFundedKeys[0].PublicKey().Address() - nodeID := ids.NodeID(rewardAddress) + nodeID := genesisNodeIDs[0] newValidatorID := ids.GenerateTestNodeID() newValidatorStartTime := uint64(defaultValidateStartTime.Add(5 * time.Second).Unix()) @@ -380,7 +380,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { require.NoError(shutdownEnvironment(env)) }() - nodeID := preFundedKeys[0].PublicKey().Address() + nodeID := genesisNodeIDs[0] env.config.BanffTime = env.state.GetTimestamp() { @@ -392,7 +392,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, uint64(startTime.Unix()), uint64(defaultValidateEndTime.Unix())+1, - ids.NodeID(nodeID), + nodeID, testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr @@ -420,7 +420,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, uint64(defaultValidateStartTime.Unix()+1), uint64(defaultValidateEndTime.Unix()), - ids.NodeID(nodeID), + nodeID, testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr @@ -439,12 +439,8 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { } // Add a validator to pending validator set of primary network - key, err := testKeyfactory.NewPrivateKey() - require.NoError(err) - - pendingDSValidatorID := ids.NodeID(key.PublicKey().Address()) - - // starts validating primary network 10 seconds after genesis + // Starts validating primary network 10 seconds after genesis + pendingDSValidatorID := ids.GenerateTestNodeID() dsStartTime := defaultGenesisTime.Add(10 * time.Second) dsEndTime := dsStartTime.Add(5 * defaultMinStakingDuration) @@ -453,7 +449,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { uint64(dsStartTime.Unix()), // start time uint64(dsEndTime.Unix()), // end time pendingDSValidatorID, // node ID - nodeID, // reward address + ids.GenerateTestShortID(), // reward address reward.PercentDenominator, // shares []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, @@ -586,8 +582,8 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) @@ -614,7 +610,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time - ids.NodeID(nodeID), // node ID + nodeID, // node ID testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, @@ -640,7 +636,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(startTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time - ids.NodeID(nodeID), // node ID + nodeID, // node ID testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr @@ -670,8 +666,8 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(startTime.Unix()), // start time uint64(startTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr ) @@ -703,8 +699,8 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(startTime.Unix()), // start time uint64(startTime.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr ) @@ -736,8 +732,8 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(startTime.Unix()), // start time uint64(startTime.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], preFundedKeys[1]}, ids.ShortEmpty, // change addr ) @@ -768,8 +764,8 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(startTime.Unix())+1, // start time uint64(startTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) diff --git a/vms/platformvm/txs/executor/tx_mempool_verifier.go b/vms/platformvm/txs/executor/tx_mempool_verifier.go index aa8d1dfaeb86..6704ccbd0489 100644 --- a/vms/platformvm/txs/executor/tx_mempool_verifier.go +++ b/vms/platformvm/txs/executor/tx_mempool_verifier.go @@ -78,6 +78,10 @@ func (v *MempoolTxVerifier) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwne return v.standardTx(tx) } +func (v *MempoolTxVerifier) BaseTx(tx *txs.BaseTx) error { + return v.standardTx(tx) +} + func (v *MempoolTxVerifier) standardTx(tx txs.UnsignedTx) error { baseState, err := v.standardBaseState() if err != nil { diff --git a/vms/platformvm/txs/mempool/issuer.go b/vms/platformvm/txs/mempool/issuer.go index e24afb5282da..b56c10190cf8 100644 --- a/vms/platformvm/txs/mempool/issuer.go +++ b/vms/platformvm/txs/mempool/issuer.go @@ -79,6 +79,11 @@ func (i *issuer) TransferSubnetOwnershipTx(*txs.TransferSubnetOwnershipTx) error return nil } +func (i *issuer) BaseTx(*txs.BaseTx) error { + i.m.addDecisionTx(i.tx) + return nil +} + func (i *issuer) AddPermissionlessValidatorTx(*txs.AddPermissionlessValidatorTx) error { i.m.addStakerTx(i.tx) return nil diff --git a/vms/platformvm/txs/mempool/mempool.go b/vms/platformvm/txs/mempool/mempool.go index 7d1ba9b609bd..91b547cf5414 100644 --- a/vms/platformvm/txs/mempool/mempool.go +++ b/vms/platformvm/txs/mempool/mempool.go @@ -6,6 +6,7 @@ package mempool import ( "errors" "fmt" + "time" "github.com/prometheus/client_golang/prometheus" @@ -18,9 +19,9 @@ import ( ) const ( - // targetTxSize is the maximum number of bytes a transaction can use to be + // MaxTxSize is the maximum number of bytes a transaction can use to be // allowed into the mempool. - targetTxSize = 64 * units.KiB + MaxTxSize = 64 * units.KiB // droppedTxIDsCacheSize is the maximum number of dropped txIDs to cache droppedTxIDsCacheSize = 64 @@ -34,7 +35,10 @@ const ( var ( _ Mempool = (*mempool)(nil) - errMempoolFull = errors.New("mempool is full") + errDuplicateTx = errors.New("duplicate tx") + errTxTooLarge = errors.New("tx too large") + errMempoolFull = errors.New("mempool is full") + errConflictsWithOtherTx = errors.New("tx conflicts with other tx") ) type BlockTimer interface { @@ -97,7 +101,7 @@ type mempool struct { blkTimer BlockTimer } -func NewMempool( +func New( namespace string, registerer prometheus.Registerer, blkTimer BlockTimer, @@ -158,25 +162,30 @@ func (m *mempool) Add(tx *txs.Tx) error { // Note: a previously dropped tx can be re-added txID := tx.ID() if m.Has(txID) { - return fmt.Errorf("duplicate tx %s", txID) + return fmt.Errorf("%w: %s", errDuplicateTx, txID) } - txBytes := tx.Bytes() - if len(txBytes) > targetTxSize { - return fmt.Errorf("tx %s size (%d) > target size (%d)", txID, len(txBytes), targetTxSize) + txSize := len(tx.Bytes()) + if txSize > MaxTxSize { + return fmt.Errorf("%w: %s size (%d) > max size (%d)", + errTxTooLarge, + txID, + txSize, + MaxTxSize, + ) } - if len(txBytes) > m.bytesAvailable { - return fmt.Errorf("%w, tx %s size (%d) exceeds available space (%d)", + if txSize > m.bytesAvailable { + return fmt.Errorf("%w: %s size (%d) > available space (%d)", errMempoolFull, txID, - len(txBytes), + txSize, m.bytesAvailable, ) } inputs := tx.Unsigned.InputIDs() if m.consumedUTXOs.Overlaps(inputs) { - return fmt.Errorf("tx %s conflicts with a transaction in the mempool", txID) + return fmt.Errorf("%w: %s", errConflictsWithOtherTx, txID) } if err := tx.Unsigned.Visit(&issuer{ @@ -297,3 +306,34 @@ func (m *mempool) deregister(tx *txs.Tx) { inputs := tx.Unsigned.InputIDs() m.consumedUTXOs.Difference(inputs) } + +// Drops all [txs.Staker] transactions whose [StartTime] is before +// [minStartTime] from [mempool]. The dropped tx ids are returned. +// +// TODO: Remove once [StartTime] field is ignored in staker txs +func DropExpiredStakerTxs(mempool Mempool, minStartTime time.Time) []ids.ID { + var droppedTxIDs []ids.ID + + for mempool.HasStakerTx() { + tx := mempool.PeekStakerTx() + startTime := tx.Unsigned.(txs.Staker).StartTime() + if !startTime.Before(minStartTime) { + // The next proposal tx in the mempool starts sufficiently far in + // the future. + break + } + + txID := tx.ID() + err := fmt.Errorf( + "synchrony bound (%s) is later than staker start time (%s)", + minStartTime, + startTime, + ) + + mempool.Remove([]*txs.Tx{tx}) + mempool.MarkDropped(txID, err) // cache tx as dropped + droppedTxIDs = append(droppedTxIDs, txID) + } + + return droppedTxIDs +} diff --git a/vms/platformvm/txs/mempool/mempool_test.go b/vms/platformvm/txs/mempool/mempool_test.go index bdcd3101233f..dbfe895f9d9b 100644 --- a/vms/platformvm/txs/mempool/mempool_test.go +++ b/vms/platformvm/txs/mempool/mempool_test.go @@ -34,7 +34,7 @@ func TestBlockBuilderMaxMempoolSizeHandling(t *testing.T) { require := require.New(t) registerer := prometheus.NewRegistry() - mpool, err := NewMempool("mempool", registerer, &noopBlkTimer{}) + mpool, err := New("mempool", registerer, &noopBlkTimer{}) require.NoError(err) decisionTxs, err := createTestDecisionTxs(1) @@ -58,7 +58,7 @@ func TestDecisionTxsInMempool(t *testing.T) { require := require.New(t) registerer := prometheus.NewRegistry() - mpool, err := NewMempool("mempool", registerer, &noopBlkTimer{}) + mpool, err := New("mempool", registerer, &noopBlkTimer{}) require.NoError(err) decisionTxs, err := createTestDecisionTxs(2) @@ -110,7 +110,7 @@ func TestProposalTxsInMempool(t *testing.T) { require := require.New(t) registerer := prometheus.NewRegistry() - mpool, err := NewMempool("mempool", registerer, &noopBlkTimer{}) + mpool, err := New("mempool", registerer, &noopBlkTimer{}) require.NoError(err) // The proposal txs are ordered by decreasing start time. This means after diff --git a/vms/platformvm/txs/mempool/remover.go b/vms/platformvm/txs/mempool/remover.go index e418cf46c342..b21071b16465 100644 --- a/vms/platformvm/txs/mempool/remover.go +++ b/vms/platformvm/txs/mempool/remover.go @@ -62,6 +62,11 @@ func (r *remover) TransferSubnetOwnershipTx(*txs.TransferSubnetOwnershipTx) erro return nil } +func (r *remover) BaseTx(*txs.BaseTx) error { + r.m.removeDecisionTxs([]*txs.Tx{r.tx}) + return nil +} + func (r *remover) AddPermissionlessValidatorTx(*txs.AddPermissionlessValidatorTx) error { r.m.removeStakerTx(r.tx) return nil diff --git a/vms/platformvm/txs/remove_subnet_validator_tx_test.go b/vms/platformvm/txs/remove_subnet_validator_tx_test.go index 02a9fdce7496..4b1f381c039b 100644 --- a/vms/platformvm/txs/remove_subnet_validator_tx_test.go +++ b/vms/platformvm/txs/remove_subnet_validator_tx_test.go @@ -51,11 +51,11 @@ func TestRemoveSubnetValidatorTxSerialization(t *testing.T) { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, } - nodeID := ids.NodeID{ + nodeID := ids.BuildTestNodeID([]byte{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x11, 0x22, 0x33, 0x44, - } + }) subnetID := ids.ID{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, @@ -263,7 +263,7 @@ func TestRemoveSubnetValidatorTxSerialization(t *testing.T) { } avax.SortTransferableOutputs(complexRemoveValidatorTx.Outs, Codec) utils.Sort(complexRemoveValidatorTx.Ins) - require.NoError(simpleRemoveValidatorTx.SyntacticVerify(&snow.Context{ + require.NoError(complexRemoveValidatorTx.SyntacticVerify(&snow.Context{ NetworkID: 1, ChainID: constants.PlatformChainID, AVAXAssetID: avaxAssetID, diff --git a/vms/platformvm/txs/transfer_subnet_ownership_tx_test.go b/vms/platformvm/txs/transfer_subnet_ownership_tx_test.go index 7e6f5835a283..e8cddeb3e1d0 100644 --- a/vms/platformvm/txs/transfer_subnet_ownership_tx_test.go +++ b/vms/platformvm/txs/transfer_subnet_ownership_tx_test.go @@ -103,7 +103,7 @@ func TestTransferSubnetOwnershipTxSerialization(t *testing.T) { expectedUnsignedSimpleTransferSubnetOwnershipTxBytes := []byte{ // Codec version 0x00, 0x00, - // RemoveSubnetValidatorTx Type ID + // TransferSubnetOwnershipTx Type ID 0x00, 0x00, 0x00, 0x21, // Mainnet network ID 0x00, 0x00, 0x00, 0x01, @@ -276,7 +276,7 @@ func TestTransferSubnetOwnershipTxSerialization(t *testing.T) { } avax.SortTransferableOutputs(complexTransferSubnetOwnershipTx.Outs, Codec) utils.Sort(complexTransferSubnetOwnershipTx.Ins) - require.NoError(simpleTransferSubnetOwnershipTx.SyntacticVerify(&snow.Context{ + require.NoError(complexTransferSubnetOwnershipTx.SyntacticVerify(&snow.Context{ NetworkID: 1, ChainID: constants.PlatformChainID, AVAXAssetID: avaxAssetID, diff --git a/vms/platformvm/txs/txheap/by_end_time_test.go b/vms/platformvm/txs/txheap/by_end_time_test.go index 8ea152d27e02..5d95a2c66ad7 100644 --- a/vms/platformvm/txs/txheap/by_end_time_test.go +++ b/vms/platformvm/txs/txheap/by_end_time_test.go @@ -23,7 +23,7 @@ func TestByEndTime(t *testing.T) { utx0 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{0}, + NodeID: ids.BuildTestNodeID([]byte{0}), Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 1, }, @@ -34,7 +34,7 @@ func TestByEndTime(t *testing.T) { utx1 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{1}, + NodeID: ids.BuildTestNodeID([]byte{1}), Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 2, }, @@ -45,7 +45,7 @@ func TestByEndTime(t *testing.T) { utx2 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{1}, + NodeID: ids.BuildTestNodeID([]byte{1}), Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 3, }, diff --git a/vms/platformvm/txs/txheap/by_start_time_test.go b/vms/platformvm/txs/txheap/by_start_time_test.go index 164e2ec35e59..e00d42076015 100644 --- a/vms/platformvm/txs/txheap/by_start_time_test.go +++ b/vms/platformvm/txs/txheap/by_start_time_test.go @@ -23,7 +23,7 @@ func TestByStartTime(t *testing.T) { utx0 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{0}, + NodeID: ids.BuildTestNodeID([]byte{0}), Start: uint64(baseTime.Unix()) + 1, End: uint64(baseTime.Unix()) + 1, }, @@ -34,7 +34,7 @@ func TestByStartTime(t *testing.T) { utx1 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{1}, + NodeID: ids.BuildTestNodeID([]byte{1}), Start: uint64(baseTime.Unix()) + 2, End: uint64(baseTime.Unix()) + 2, }, @@ -45,7 +45,7 @@ func TestByStartTime(t *testing.T) { utx2 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{1}, + NodeID: ids.BuildTestNodeID([]byte{1}), Start: uint64(baseTime.Unix()) + 3, End: uint64(baseTime.Unix()) + 3, }, diff --git a/vms/platformvm/txs/validator_test.go b/vms/platformvm/txs/validator_test.go index 3361d11939b4..fbef50981a14 100644 --- a/vms/platformvm/txs/validator_test.go +++ b/vms/platformvm/txs/validator_test.go @@ -9,22 +9,20 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" ) const defaultWeight = 10000 -// each key controls an address that has [defaultBalance] AVAX at genesis -var keys = secp256k1.TestKeys() - func TestBoundedBy(t *testing.T) { require := require.New(t) + nodeID := ids.GenerateTestNodeID() + // case 1: a starts, a finishes, b starts, b finishes aStartTime := uint64(0) aEndTIme := uint64(1) a := &Validator{ - NodeID: ids.NodeID(keys[0].PublicKey().Address()), + NodeID: nodeID, Start: aStartTime, End: aEndTIme, Wght: defaultWeight, @@ -33,7 +31,7 @@ func TestBoundedBy(t *testing.T) { bStartTime := uint64(2) bEndTime := uint64(3) b := &Validator{ - NodeID: ids.NodeID(keys[0].PublicKey().Address()), + NodeID: nodeID, Start: bStartTime, End: bEndTime, Wght: defaultWeight, diff --git a/vms/platformvm/txs/visitor.go b/vms/platformvm/txs/visitor.go index 5476d73c7e86..05a21c355801 100644 --- a/vms/platformvm/txs/visitor.go +++ b/vms/platformvm/txs/visitor.go @@ -19,4 +19,5 @@ type Visitor interface { AddPermissionlessValidatorTx(*AddPermissionlessValidatorTx) error AddPermissionlessDelegatorTx(*AddPermissionlessDelegatorTx) error TransferSubnetOwnershipTx(*TransferSubnetOwnershipTx) error + BaseTx(*BaseTx) error } diff --git a/vms/platformvm/validator_set_property_test.go b/vms/platformvm/validator_set_property_test.go index d984a1a986b0..2ac0d4358d7d 100644 --- a/vms/platformvm/validator_set_property_test.go +++ b/vms/platformvm/validator_set_property_test.go @@ -20,7 +20,7 @@ import ( "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -36,7 +36,6 @@ import ( "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/block" @@ -375,7 +374,7 @@ func addPrimaryValidatorWithoutBLSKey(vm *VM, data *validatorInputData) (*state. func internalAddValidator(vm *VM, signedTx *txs.Tx) (*state.Staker, error) { stakerTx := signedTx.Unsigned.(txs.StakerTx) - if err := vm.Builder.AddUnverifiedTx(signedTx); err != nil { + if err := vm.Network.IssueTx(context.Background(), signedTx); err != nil { return nil, fmt.Errorf("could not add tx to mempool: %w", err) } @@ -748,9 +747,9 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { }} vm.clock.Set(forkTime.Add(time.Second)) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - chainDBManager := baseDBManager.NewPrefixDBManager([]byte{0}) - atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) + baseDB := memdb.New() + chainDB := prefixdb.New([]byte{0}, baseDB) + atomicDB := prefixdb.New([]byte{1}, baseDB) msgChan := make(chan common.Message, 1) ctx := defaultContext(t) @@ -774,7 +773,7 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { err = vm.Initialize( context.Background(), ctx, - chainDBManager, + chainDB, genesisBytes, nil, nil, @@ -803,7 +802,7 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { if err != nil { return nil, ids.Empty, err } - if err := vm.Builder.AddUnverifiedTx(testSubnet1); err != nil { + if err := vm.Network.IssueTx(context.Background(), testSubnet1); err != nil { return nil, ids.Empty, err } @@ -842,7 +841,7 @@ func buildCustomGenesis() ([]byte, error) { // won't find next staker to promote/evict from stakers set. Contrary to // what happens with production code we push such validator at the end of // times, so to avoid interference with our tests - nodeID := ids.NodeID(keys[len(keys)-1].PublicKey().Address()) + nodeID := genesisNodeIDs[len(genesisNodeIDs)-1] addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) if err != nil { return nil, err @@ -850,8 +849,8 @@ func buildCustomGenesis() ([]byte, error) { starTime := mockable.MaxTime.Add(-1 * defaultMinStakingDuration) endTime := mockable.MaxTime - genesisValidator := api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidator := api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(starTime.Unix()), EndTime: json.Uint64(endTime.Unix()), NodeID: nodeID, @@ -872,7 +871,7 @@ func buildCustomGenesis() ([]byte, error) { NetworkID: json.Uint32(constants.UnitTestID), AvaxAssetID: avaxAssetID, UTXOs: genesisUTXOs, - Validators: []api.PermissionlessValidator{genesisValidator}, + Validators: []api.GenesisPermissionlessValidator{genesisValidator}, Chains: nil, Time: json.Uint64(defaultGenesisTime.Unix()), InitialSupply: json.Uint64(360 * units.MegaAvax), diff --git a/vms/platformvm/validators/manager.go b/vms/platformvm/validators/manager.go index fb7c314c90a7..a4c5c87a3040 100644 --- a/vms/platformvm/validators/manager.go +++ b/vms/platformvm/validators/manager.go @@ -48,10 +48,6 @@ type State interface { GetLastAccepted() ids.ID GetStatelessBlock(blockID ids.ID) (block.Block, error) - // ApplyCurrentValidators adds all the current validators and delegators of - // [subnetID] into [vdrs]. - ApplyCurrentValidators(subnetID ids.ID, vdrs validators.Manager) error - // ApplyValidatorWeightDiffs iterates from [startHeight] towards the genesis // block until it has applied all of the diffs up to and including // [endHeight]. Applying the diffs modifies [validators]. @@ -346,22 +342,7 @@ func (m *manager) getCurrentValidatorSets( ctx context.Context, subnetID ids.ID, ) (map[ids.NodeID]*validators.GetValidatorOutput, map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { - subnetManager := m.cfg.Validators - if subnetManager.Count(subnetID) == 0 { - // If this subnet isn't tracked, there will not be any registered - // validators. To calculate the current validators we need to first - // fetch them from state. We generate a new manager as we don't want to - // modify that long-lived reference. - // - // TODO: remove this once all subnets are included in the validator - // manager. - subnetManager = validators.NewManager() - if err := m.state.ApplyCurrentValidators(subnetID, subnetManager); err != nil { - return nil, nil, 0, err - } - } - - subnetMap := subnetManager.GetMap(subnetID) + subnetMap := m.cfg.Validators.GetMap(subnetID) primaryMap := m.cfg.Validators.GetMap(constants.PrimaryNetworkID) currentHeight, err := m.getCurrentHeight(ctx) return subnetMap, primaryMap, currentHeight, err diff --git a/vms/platformvm/validators/manager_benchmark_test.go b/vms/platformvm/validators/manager_benchmark_test.go index 54d0e264e63e..0664c085c942 100644 --- a/vms/platformvm/validators/manager_benchmark_test.go +++ b/vms/platformvm/validators/manager_benchmark_test.go @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/formatting" @@ -66,8 +65,8 @@ func BenchmarkGetValidatorSet(b *testing.B) { addr, err := address.FormatBech32(constants.UnitTestHRP, ids.GenerateTestShortID().Bytes()) require.NoError(err) - genesisValidators := []api.PermissionlessValidator{{ - Staker: api.Staker{ + genesisValidators := []api.GenesisPermissionlessValidator{{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(genesisTime.Unix()), EndTime: json.Uint64(genesisEndTime.Unix()), NodeID: ids.GenerateTestNodeID(), @@ -129,7 +128,6 @@ func BenchmarkGetValidatorSet(b *testing.B) { MintingPeriod: 365 * 24 * time.Hour, SupplyCap: 720 * units.MegaAvax, }), - new(utils.Atomic[bool]), ) require.NoError(err) diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index b6e8937ecc8a..7f7568deb813 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -17,7 +17,7 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" @@ -29,7 +29,6 @@ import ( "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" @@ -37,6 +36,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/network" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" @@ -62,6 +62,7 @@ var ( type VM struct { config.Config blockbuilder.Builder + network.Network validators.State metrics metrics.Metrics @@ -73,8 +74,8 @@ type VM struct { uptimeManager uptime.Manager // The context of this vm - ctx *snow.Context - dbManager manager.Manager + ctx *snow.Context + db database.Database state state.State @@ -96,7 +97,7 @@ type VM struct { func (vm *VM) Initialize( ctx context.Context, chainCtx *snow.Context, - dbManager manager.Manager, + db database.Database, genesisBytes []byte, _ []byte, configBytes []byte, @@ -124,7 +125,7 @@ func (vm *VM) Initialize( } vm.ctx = chainCtx - vm.dbManager = dbManager + vm.db = db vm.codecRegistry = linearcodec.NewDefault() vm.fx = &secp256k1fx.Fx{} @@ -135,7 +136,7 @@ func (vm *VM) Initialize( rewards := reward.NewCalculator(vm.RewardConfig) vm.state, err = state.New( - vm.dbManager.Current().Database, + vm.db, genesisBytes, registerer, &vm.Config, @@ -143,7 +144,6 @@ func (vm *VM) Initialize( vm.ctx, vm.metrics, rewards, - &vm.bootstrapped, ) if err != nil { return err @@ -179,7 +179,7 @@ func (vm *VM) Initialize( // Note: There is a circular dependency between the mempool and block // builder which is broken by passing in the vm. - mempool, err := mempool.NewMempool("mempool", registerer, vm) + mempool, err := mempool.New("mempool", registerer, vm) if err != nil { return fmt.Errorf("failed to create mempool: %w", err) } @@ -191,13 +191,19 @@ func (vm *VM) Initialize( txExecutorBackend, validatorManager, ) + vm.Network = network.New( + txExecutorBackend.Ctx, + vm.manager, + mempool, + txExecutorBackend.Config.PartialSyncPrimaryNetwork, + appSender, + ) vm.Builder = blockbuilder.New( mempool, vm.txBuilder, txExecutorBackend, vm.manager, toEngine, - appSender, ) // Create all of the chains that the database says exist @@ -305,17 +311,21 @@ func (vm *VM) onNormalOperationsStarted() error { } primaryVdrIDs := vm.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - if err := vm.uptimeManager.StartTracking(primaryVdrIDs, constants.PrimaryNetworkID); err != nil { return err } + vl := validators.NewLogger(vm.ctx.Log, constants.PrimaryNetworkID, vm.ctx.NodeID) + vm.Validators.RegisterCallbackListener(constants.PrimaryNetworkID, vl) + for subnetID := range vm.TrackedSubnets { vdrIDs := vm.Validators.GetValidatorIDs(subnetID) - if err := vm.uptimeManager.StartTracking(vdrIDs, subnetID); err != nil { return err } + + vl := validators.NewLogger(vm.ctx.Log, subnetID, vm.ctx.NodeID) + vm.Validators.RegisterCallbackListener(subnetID, vl) } if err := vm.state.Commit(); err != nil { @@ -340,7 +350,7 @@ func (vm *VM) SetState(_ context.Context, state snow.State) error { // Shutdown this blockchain func (vm *VM) Shutdown(context.Context) error { - if vm.dbManager == nil { + if vm.db == nil { return nil } @@ -364,12 +374,10 @@ func (vm *VM) Shutdown(context.Context) error { } } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( vm.state.Close(), - vm.dbManager.Close(), + vm.db.Close(), ) - return errs.Err } func (vm *VM) ParseBlock(_ context.Context, b []byte) (snowman.Block, error) { @@ -393,7 +401,9 @@ func (vm *VM) LastAccepted(context.Context) (ids.ID, error) { // SetPreference sets the preferred block to be the one with ID [blkID] func (vm *VM) SetPreference(_ context.Context, blkID ids.ID) error { - vm.Builder.SetPreference(blkID) + if vm.manager.SetPreference(blkID) { + vm.Builder.ResetBlockTimer() + } return nil } diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 41e7eafbdfe8..5b50c9895622 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -17,7 +17,7 @@ import ( "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" @@ -25,11 +25,9 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" @@ -74,7 +72,7 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addValidatorTx)) addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -106,7 +104,7 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(addFirstDelegatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addFirstDelegatorTx)) addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -140,7 +138,7 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(addSecondDelegatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addSecondDelegatorTx)) addSecondDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -164,7 +162,7 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require.NoError(err) // trigger block creation - err = vm.Builder.AddUnverifiedTx(addThirdDelegatorTx) + err = vm.Network.IssueTx(context.Background(), addThirdDelegatorTx) require.ErrorIs(err, executor.ErrOverDelegated) } @@ -217,10 +215,11 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { vm.ctx.Lock.Unlock() }() - key, err := testKeyFactory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() require.NoError(err) id := key.PublicKey().Address() + nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() // create valid tx @@ -228,7 +227,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { validatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0], keys[1]}, @@ -237,7 +236,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the add validator tx - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addValidatorTx)) // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -251,7 +250,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { delegator1Stake, uint64(delegator1StartTime.Unix()), uint64(delegator1EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -259,7 +258,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the first add delegator tx - require.NoError(vm.Builder.AddUnverifiedTx(addFirstDelegatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addFirstDelegatorTx)) // trigger block creation for the first add delegator tx addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -273,7 +272,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { delegator2Stake, uint64(delegator2StartTime.Unix()), uint64(delegator2EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -281,7 +280,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the second add delegator tx - require.NoError(vm.Builder.AddUnverifiedTx(addSecondDelegatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addSecondDelegatorTx)) // trigger block creation for the second add delegator tx addSecondDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -295,7 +294,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { delegator3Stake, uint64(delegator3StartTime.Unix()), uint64(delegator3EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -303,7 +302,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the third add delegator tx - require.NoError(vm.Builder.AddUnverifiedTx(addThirdDelegatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addThirdDelegatorTx)) // trigger block creation for the third add delegator tx addThirdDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -317,7 +316,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { delegator4Stake, uint64(delegator4StartTime.Unix()), uint64(delegator4EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -325,7 +324,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the fourth add delegator tx - require.NoError(vm.Builder.AddUnverifiedTx(addFourthDelegatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addFourthDelegatorTx)) // trigger block creation for the fourth add delegator tx addFourthDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -343,8 +342,8 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { require := require.New(t) _, genesisBytes := defaultGenesis(t) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) + baseDB := memdb.New() + atomicDB := prefixdb.New([]byte{1}, baseDB) vm := &VM{Config: config.Config{ Chains: chains.TestManager, @@ -367,7 +366,7 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { require.NoError(vm.Initialize( context.Background(), ctx, - baseDBManager, + baseDB, genesisBytes, nil, nil, @@ -412,11 +411,10 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { ) require.NoError(err) - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - preferredChainTime := preferred.Timestamp() - preferredID := preferred.ID() preferredHeight := preferred.Height() statelessStandardBlk, err := block.NewBanffStandardBlock( @@ -473,21 +471,17 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { vm.ctx.Lock.Unlock() }() + nodeID := ids.GenerateTestNodeID() newValidatorStartTime := vm.clock.Time().Add(executor.SyncBound).Add(1 * time.Second) newValidatorEndTime := newValidatorStartTime.Add(defaultMinStakingDuration) - key, err := testKeyFactory.NewPrivateKey() - require.NoError(err) - - nodeID := ids.NodeID(key.PublicKey().Address()) - // Create the tx to add a new validator addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( vm.MinValidatorStake, uint64(newValidatorStartTime.Unix()), uint64(newValidatorEndTime.Unix()), nodeID, - ids.ShortID(nodeID), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, @@ -495,11 +489,10 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require.NoError(err) // Create the standard block to add the new validator - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - preferredChainTime := preferred.Timestamp() - preferredID := preferred.ID() preferredHeight := preferred.Height() statelessBlk, err := block.NewBanffStandardBlock( @@ -652,7 +645,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { vm.Config.Validators = validators.NewManager() execCfg, _ := config.GetExecutionConfig(nil) newState, err := state.New( - vm.dbManager.Current().Database, + vm.db, nil, prometheus.NewRegistry(), &vm.Config, @@ -660,7 +653,6 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { vm.ctx, metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), - &utils.Atomic[bool]{}, ) require.NoError(err) @@ -693,7 +685,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { newValidatorStartTime0 := vm.clock.Time().Add(executor.SyncBound).Add(1 * time.Second) newValidatorEndTime0 := newValidatorStartTime0.Add(defaultMaxStakingDuration) - nodeID0 := ids.NodeID(ids.GenerateTestShortID()) + nodeID0 := ids.GenerateTestNodeID() // Create the tx to add the first new validator addValidatorTx0, err := vm.txBuilder.NewAddValidatorTx( @@ -701,7 +693,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { uint64(newValidatorStartTime0.Unix()), uint64(newValidatorEndTime0.Unix()), nodeID0, - ids.ShortID(nodeID0), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, @@ -709,11 +701,10 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require.NoError(err) // Create the standard block to add the first new validator - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - preferredChainTime := preferred.Timestamp() - preferredID := preferred.ID() preferredHeight := preferred.Height() statelessAddValidatorStandardBlk0, err := block.NewBanffStandardBlock( @@ -866,7 +857,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { newValidatorStartTime1 := newValidatorStartTime0.Add(executor.SyncBound).Add(1 * time.Second) newValidatorEndTime1 := newValidatorStartTime1.Add(defaultMaxStakingDuration) - nodeID1 := ids.NodeID(ids.GenerateTestShortID()) + nodeID1 := ids.GenerateTestNodeID() // Create the tx to add the second new validator addValidatorTx1, err := vm.txBuilder.NewAddValidatorTx( @@ -874,7 +865,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { uint64(newValidatorStartTime1.Unix()), uint64(newValidatorEndTime1.Unix()), nodeID1, - ids.ShortID(nodeID1), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[1]}, ids.ShortEmpty, @@ -961,7 +952,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { vm.Config.Validators = validators.NewManager() execCfg, _ := config.GetExecutionConfig(nil) newState, err := state.New( - vm.dbManager.Current().Database, + vm.db, nil, prometheus.NewRegistry(), &vm.Config, @@ -969,7 +960,6 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { vm.ctx, metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), - &utils.Atomic[bool]{}, ) require.NoError(err) @@ -1006,22 +996,16 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { vm.ctx.Lock.Unlock() }() - nodeID0 := ids.NodeID(keys[0].PublicKey().Address()) - nodeID1 := ids.NodeID(keys[1].PublicKey().Address()) - nodeID2 := ids.NodeID(keys[2].PublicKey().Address()) - nodeID3 := ids.NodeID(keys[3].PublicKey().Address()) - nodeID4 := ids.NodeID(keys[4].PublicKey().Address()) - currentHeight, err := vm.GetCurrentHeight(context.Background()) require.NoError(err) require.Equal(uint64(1), currentHeight) expectedValidators1 := map[ids.NodeID]uint64{ - nodeID0: defaultWeight, - nodeID1: defaultWeight, - nodeID2: defaultWeight, - nodeID3: defaultWeight, - nodeID4: defaultWeight, + genesisNodeIDs[0]: defaultWeight, + genesisNodeIDs[1]: defaultWeight, + genesisNodeIDs[2]: defaultWeight, + genesisNodeIDs[3]: defaultWeight, + genesisNodeIDs[4]: defaultWeight, } validators, err := vm.GetValidatorSet(context.Background(), 1, constants.PrimaryNetworkID) require.NoError(err) @@ -1032,14 +1016,14 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { newValidatorStartTime0 := vm.clock.Time().Add(executor.SyncBound).Add(1 * time.Second) newValidatorEndTime0 := newValidatorStartTime0.Add(defaultMaxStakingDuration) - nodeID5 := ids.GenerateTestNodeID() + extraNodeID := ids.GenerateTestNodeID() // Create the tx to add the first new validator addValidatorTx0, err := vm.txBuilder.NewAddValidatorTx( vm.MaxValidatorStake, uint64(newValidatorStartTime0.Unix()), uint64(newValidatorEndTime0.Unix()), - nodeID5, + extraNodeID, ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, @@ -1048,11 +1032,10 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { require.NoError(err) // Create the standard block to add the first new validator - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - preferredChainTime := preferred.Timestamp() - preferredID := preferred.ID() preferredHeight := preferred.Height() statelessStandardBlk, err := block.NewBanffStandardBlock( @@ -1085,7 +1068,8 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { // Create the standard block that moves the first new validator from the // pending validator set into the current validator set. - preferred, err = vm.Builder.Preferred() + preferredID = vm.manager.Preferred() + preferred, err = vm.manager.GetBlock(preferredID) require.NoError(err) preferredID = preferred.ID() preferredHeight = preferred.Height() @@ -1115,12 +1099,12 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { } expectedValidators2 := map[ids.NodeID]uint64{ - nodeID0: defaultWeight, - nodeID1: defaultWeight, - nodeID2: defaultWeight, - nodeID3: defaultWeight, - nodeID4: defaultWeight, - nodeID5: vm.MaxValidatorStake, + genesisNodeIDs[0]: defaultWeight, + genesisNodeIDs[1]: defaultWeight, + genesisNodeIDs[2]: defaultWeight, + genesisNodeIDs[3]: defaultWeight, + genesisNodeIDs[4]: defaultWeight, + extraNodeID: vm.MaxValidatorStake, } validators, err = vm.GetValidatorSet(context.Background(), 3, constants.PrimaryNetworkID) require.NoError(err) @@ -1153,10 +1137,11 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { vm.ctx.Lock.Unlock() }() - key, err := testKeyFactory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() require.NoError(err) - id := key.PublicKey().Address() + id := key.Address() + nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() // create valid tx @@ -1164,7 +1149,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { validatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0], keys[1]}, @@ -1173,7 +1158,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require.NoError(err) // issue the add validator tx - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addValidatorTx)) // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1187,7 +1172,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { delegator1Stake, uint64(delegator1StartTime.Unix()), uint64(delegator1EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -1195,7 +1180,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require.NoError(err) // issue the first add delegator tx - require.NoError(vm.Builder.AddUnverifiedTx(addFirstDelegatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addFirstDelegatorTx)) // trigger block creation for the first add delegator tx addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1209,7 +1194,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { delegator2Stake, uint64(delegator2StartTime.Unix()), uint64(delegator2EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -1218,7 +1203,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { // attempting to issue the second add delegator tx should fail because the // total stake weight would go over the limit. - err = vm.Builder.AddUnverifiedTx(addSecondDelegatorTx) + err = vm.Network.IssueTx(context.Background(), addSecondDelegatorTx) require.ErrorIs(err, executor.ErrOverDelegated) } @@ -1237,17 +1222,18 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t vm.ctx.Lock.Unlock() }() - key, err := testKeyFactory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() require.NoError(err) - id := key.PublicKey().Address() + id := key.Address() + nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0], keys[1]}, @@ -1255,7 +1241,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addValidatorTx)) // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1272,7 +1258,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) + require.NoError(vm.Network.IssueTx(context.Background(), createSubnetTx)) // trigger block creation for the subnet tx createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1285,14 +1271,14 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(addSubnetValidatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addSubnetValidatorTx)) // trigger block creation for the validator tx addSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1310,7 +1296,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t require.Empty(emptyValidatorSet) removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -1321,7 +1307,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t // validator set into the current validator set. vm.clock.Set(validatorStartTime) - require.NoError(vm.Builder.AddUnverifiedTx(removeSubnetValidatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), removeSubnetValidatorTx)) // trigger block creation for the validator tx removeSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1354,17 +1340,18 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t vm.ctx.Lock.Unlock() }() - key, err := testKeyFactory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() require.NoError(err) id := key.PublicKey().Address() + nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0], keys[1]}, @@ -1372,7 +1359,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addValidatorTx)) // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1389,7 +1376,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) + require.NoError(vm.Network.IssueTx(context.Background(), createSubnetTx)) // trigger block creation for the subnet tx createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1398,21 +1385,18 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t require.NoError(createSubnetBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - vm.TrackedSubnets.Add(createSubnetTx.ID()) - require.NoError(vm.state.ApplyCurrentValidators(createSubnetTx.ID(), vm.Validators)) - addSubnetValidatorTx, err := vm.txBuilder.NewAddSubnetValidatorTx( defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(addSubnetValidatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addSubnetValidatorTx)) // trigger block creation for the validator tx addSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1422,7 +1406,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, @@ -1433,7 +1417,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t // validator set into the current validator set. vm.clock.Set(validatorStartTime) - require.NoError(vm.Builder.AddUnverifiedTx(removeSubnetValidatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), removeSubnetValidatorTx)) // trigger block creation for the validator tx removeSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1527,7 +1511,7 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { require.NoError(err) require.NoError(primaryTx.SyntacticVerify(vm.ctx)) - require.NoError(vm.Builder.AddUnverifiedTx(primaryTx)) + require.NoError(vm.Network.IssueTx(context.Background(), primaryTx)) require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting primary validator to current @@ -1554,7 +1538,7 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(subnetTx)) + require.NoError(vm.Network.IssueTx(context.Background(), subnetTx)) require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting the subnet validator to current @@ -1658,7 +1642,7 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { require.NoError(err) require.NoError(uPrimaryRestartTx.SyntacticVerify(vm.ctx)) - require.NoError(vm.Builder.AddUnverifiedTx(primaryRestartTx)) + require.NoError(vm.Network.IssueTx(context.Background(), primaryRestartTx)) require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting restarted primary validator to current @@ -1766,7 +1750,7 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(primaryTx1)) + require.NoError(vm.Network.IssueTx(context.Background(), primaryTx1)) require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting primary validator to current @@ -1858,7 +1842,7 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { require.NoError(err) require.NoError(uPrimaryRestartTx.SyntacticVerify(vm.ctx)) - require.NoError(vm.Builder.AddUnverifiedTx(primaryRestartTx)) + require.NoError(vm.Network.IssueTx(context.Background(), primaryRestartTx)) require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting restarted primary validator to current @@ -1929,7 +1913,7 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(primaryTx1)) + require.NoError(vm.Network.IssueTx(context.Background(), primaryTx1)) require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting primary validator to current @@ -1956,7 +1940,7 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(subnetTx)) + require.NoError(vm.Network.IssueTx(context.Background(), subnetTx)) require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting the subnet validator to current @@ -2060,7 +2044,7 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { require.NoError(err) require.NoError(uPrimaryRestartTx.SyntacticVerify(vm.ctx)) - require.NoError(vm.Builder.AddUnverifiedTx(primaryRestartTx)) + require.NoError(vm.Network.IssueTx(context.Background(), primaryRestartTx)) require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting restarted primary validator to current @@ -2138,7 +2122,7 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(primaryTx1)) + require.NoError(vm.Network.IssueTx(context.Background(), primaryTx1)) require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting primary validator to current @@ -2162,7 +2146,7 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(subnetTx)) + require.NoError(vm.Network.IssueTx(context.Background(), subnetTx)) require.NoError(buildAndAcceptStandardBlock(vm)) // move time ahead, promoting the subnet validator to current diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 2db4146d05df..c8f849ecea1a 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -17,7 +17,7 @@ import ( "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" @@ -100,6 +100,9 @@ var ( // each key controls an address that has [defaultBalance] AVAX at genesis keys = secp256k1.TestKeys() + // Node IDs of genesis validators. Initialized in init function + genesisNodeIDs []ids.NodeID + defaultMinValidatorStake = 5 * units.MilliAvax defaultMaxValidatorStake = 500 * units.MilliAvax defaultMinDelegatorStake = 1 * units.MilliAvax @@ -116,12 +119,20 @@ var ( xChainID = ids.Empty.Prefix(0) cChainID = ids.Empty.Prefix(1) - // Used to create and use keys. - testKeyFactory secp256k1.Factory - errMissing = errors.New("missing") ) +func init() { + for _, key := range keys { + // TODO: use ids.GenerateTestNodeID() instead of ids.BuildTestNodeID + // Can be done when TestGetState is refactored + nodeBytes := key.PublicKey().Address() + nodeID := ids.BuildTestNodeID(nodeBytes[:]) + + genesisNodeIDs = append(genesisNodeIDs, nodeID) + } +} + type mutableSharedMemory struct { atomic.SharedMemory } @@ -178,13 +189,12 @@ func defaultGenesis(t *testing.T) (*api.BuildGenesisArgs, []byte) { } } - genesisValidators := make([]api.PermissionlessValidator, len(keys)) - for i, key := range keys { - nodeID := ids.NodeID(key.PublicKey().Address()) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(genesisNodeIDs)) + for i, nodeID := range genesisNodeIDs { addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), NodeID: nodeID, @@ -246,14 +256,13 @@ func BuildGenesisTestWithArgs(t *testing.T, args *api.BuildGenesisArgs) (*api.Bu } } - genesisValidators := make([]api.PermissionlessValidator, len(keys)) - for i, key := range keys { - nodeID := ids.NodeID(key.PublicKey().Address()) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(genesisNodeIDs)) + for i, nodeID := range genesisNodeIDs { addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), NodeID: nodeID, @@ -318,9 +327,9 @@ func defaultVM(t *testing.T) (*VM, database.Database, *mutableSharedMemory) { BanffTime: banffForkTime, }} - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - chainDBManager := baseDBManager.NewPrefixDBManager([]byte{0}) - atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) + db := memdb.New() + chainDB := prefixdb.New([]byte{0}, db) + atomicDB := prefixdb.New([]byte{1}, db) vm.clock.Set(banffForkTime.Add(time.Second)) msgChan := make(chan common.Message, 1) @@ -344,7 +353,7 @@ func defaultVM(t *testing.T) (*VM, database.Database, *mutableSharedMemory) { require.NoError(vm.Initialize( context.Background(), ctx, - chainDBManager, + chainDB, genesisBytes, nil, nil, @@ -367,14 +376,14 @@ func defaultVM(t *testing.T) (*VM, database.Database, *mutableSharedMemory) { keys[0].PublicKey().Address(), // change addr ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(testSubnet1)) + require.NoError(vm.Network.IssueTx(context.Background(), testSubnet1)) blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) require.NoError(blk.Verify(context.Background())) require.NoError(blk.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - return vm, baseDBManager.Current().Database, msm + return vm, db, msm } // Ensure genesis state is parsed from bytes and stored correctly @@ -423,8 +432,7 @@ func TestGenesis(t *testing.T) { // Ensure current validator set of primary network is correct require.Len(genesisState.Validators, vm.Validators.Count(constants.PrimaryNetworkID)) - for _, key := range keys { - nodeID := ids.NodeID(key.PublicKey().Address()) + for _, nodeID := range genesisNodeIDs { _, ok := vm.Validators.GetValidator(constants.PrimaryNetworkID, nodeID) require.True(ok) } @@ -463,7 +471,7 @@ func TestAddValidatorCommit(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + require.NoError(vm.Network.IssueTx(context.Background(), tx)) blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -490,10 +498,9 @@ func TestInvalidAddValidatorCommit(t *testing.T) { vm.ctx.Lock.Unlock() }() + nodeID := ids.GenerateTestNodeID() startTime := defaultGenesisTime.Add(-txexecutor.SyncBound).Add(-1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) - key, _ := testKeyFactory.NewPrivateKey() - nodeID := ids.NodeID(key.PublicKey().Address()) // create invalid tx tx, err := vm.txBuilder.NewAddValidatorTx( @@ -501,18 +508,18 @@ func TestInvalidAddValidatorCommit(t *testing.T) { uint64(startTime.Unix()), uint64(endTime.Unix()), nodeID, - ids.ShortID(nodeID), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr ) require.NoError(err) - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - - preferredID := preferred.ID() preferredHeight := preferred.Height() + statelessBlk, err := block.NewBanffStandardBlock( preferred.Timestamp(), preferredID, @@ -563,7 +570,7 @@ func TestAddValidatorReject(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + require.NoError(vm.Network.IssueTx(context.Background(), tx)) blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -589,7 +596,7 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { }() // Use nodeID that is already in the genesis - repeatNodeID := ids.NodeID(keys[0].PublicKey().Address()) + repeatNodeID := genesisNodeIDs[0] startTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) @@ -600,7 +607,7 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { uint64(startTime.Unix()), uint64(endTime.Unix()), repeatNodeID, - ids.ShortID(repeatNodeID), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr @@ -608,7 +615,7 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { require.NoError(err) // trigger block creation - err = vm.Builder.AddUnverifiedTx(tx) + err = vm.Network.IssueTx(context.Background(), tx) require.ErrorIs(err, txexecutor.ErrAlreadyValidator) } @@ -624,7 +631,7 @@ func TestAddSubnetValidatorAccept(t *testing.T) { startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) - nodeID := ids.NodeID(keys[0].PublicKey().Address()) + nodeID := genesisNodeIDs[0] // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] @@ -641,7 +648,7 @@ func TestAddSubnetValidatorAccept(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + require.NoError(vm.Network.IssueTx(context.Background(), tx)) blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -670,7 +677,7 @@ func TestAddSubnetValidatorReject(t *testing.T) { startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) - nodeID := ids.NodeID(keys[0].PublicKey().Address()) + nodeID := genesisNodeIDs[0] // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] @@ -687,7 +694,7 @@ func TestAddSubnetValidatorReject(t *testing.T) { require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + require.NoError(vm.Network.IssueTx(context.Background(), tx)) blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -882,7 +889,7 @@ func TestCreateChain(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + require.NoError(vm.Network.IssueTx(context.Background(), tx)) blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) // should contain proposal to create chain @@ -922,8 +929,7 @@ func TestCreateSubnet(t *testing.T) { vm.ctx.Lock.Unlock() }() - nodeID := ids.NodeID(keys[0].PublicKey().Address()) - + nodeID := genesisNodeIDs[0] createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( 1, // threshold []ids.ShortID{ // control keys @@ -935,7 +941,7 @@ func TestCreateSubnet(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) + require.NoError(vm.Network.IssueTx(context.Background(), createSubnetTx)) // should contain proposal to create subnet blk, err := vm.Builder.BuildBlock(context.Background()) @@ -976,7 +982,7 @@ func TestCreateSubnet(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addValidatorTx)) blk, err = vm.Builder.BuildBlock(context.Background()) // should add validator to the new subnet require.NoError(err) @@ -1092,7 +1098,7 @@ func TestAtomicImport(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + require.NoError(vm.Network.IssueTx(context.Background(), tx)) blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -1139,10 +1145,9 @@ func TestOptimisticAtomicImport(t *testing.T) { }} require.NoError(tx.Initialize(txs.Codec)) - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - - preferredID := preferred.ID() preferredHeight := preferred.Height() statelessBlk, err := block.NewApricotAtomicBlock( @@ -1175,9 +1180,9 @@ func TestOptimisticAtomicImport(t *testing.T) { func TestRestartFullyAccepted(t *testing.T) { require := require.New(t) _, genesisBytes := defaultGenesis(t) - db := manager.NewMemDB(version.Semantic1_0_0) + db := memdb.New() - firstDB := db.NewPrefixDBManager([]byte{}) + firstDB := prefixdb.New([]byte{}, db) firstVM := &VM{Config: config.Config{ Chains: chains.TestManager, Validators: validators.NewManager(), @@ -1190,8 +1195,8 @@ func TestRestartFullyAccepted(t *testing.T) { firstCtx := defaultContext(t) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) + baseDB := memdb.New() + atomicDB := prefixdb.New([]byte{1}, baseDB) m := atomic.NewMemory(atomicDB) msm := &mutableSharedMemory{ SharedMemory: m.NewSharedMemory(firstCtx.ChainID), @@ -1218,13 +1223,6 @@ func TestRestartFullyAccepted(t *testing.T) { genesisID, err := firstVM.LastAccepted(context.Background()) require.NoError(err) - nextChainTime := initialClkTime.Add(time.Second) - firstVM.clock.Set(initialClkTime) - preferred, err := firstVM.Builder.Preferred() - require.NoError(err) - preferredID := preferred.ID() - preferredHeight := preferred.Height() - // include a tx to make the block be accepted tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -1245,6 +1243,14 @@ func TestRestartFullyAccepted(t *testing.T) { }} require.NoError(tx.Initialize(txs.Codec)) + nextChainTime := initialClkTime.Add(time.Second) + firstVM.clock.Set(initialClkTime) + + preferredID := firstVM.manager.Preferred() + preferred, err := firstVM.manager.GetBlock(preferredID) + require.NoError(err) + preferredHeight := preferred.Height() + statelessBlk, err := block.NewBanffStandardBlock( nextChainTime, preferredID, @@ -1282,7 +1288,7 @@ func TestRestartFullyAccepted(t *testing.T) { secondCtx.Lock.Unlock() }() - secondDB := db.NewPrefixDBManager([]byte{}) + secondDB := prefixdb.New([]byte{}, db) secondMsgChan := make(chan common.Message, 1) require.NoError(secondVM.Initialize( context.Background(), @@ -1307,10 +1313,9 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { _, genesisBytes := defaultGenesis(t) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - vmDBManager := baseDBManager.NewPrefixDBManager([]byte("vm")) - bootstrappingDB := prefixdb.New([]byte("bootstrapping"), baseDBManager.Current().Database) - + baseDB := memdb.New() + vmDB := prefixdb.New([]byte("vm"), baseDB) + bootstrappingDB := prefixdb.New([]byte("bootstrapping"), baseDB) blocked, err := queue.NewWithMissing(bootstrappingDB, "", prometheus.NewRegistry()) require.NoError(err) @@ -1328,7 +1333,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { vm.clock.Set(initialClkTime) ctx := defaultContext(t) - atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) + atomicDB := prefixdb.New([]byte{1}, baseDB) m := atomic.NewMemory(atomicDB) msm := &mutableSharedMemory{ SharedMemory: m.NewSharedMemory(ctx.ChainID), @@ -1343,7 +1348,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { require.NoError(vm.Initialize( context.Background(), ctx, - vmDBManager, + vmDB, genesisBytes, nil, nil, @@ -1352,9 +1357,6 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { nil, )) - preferred, err := vm.Builder.Preferred() - require.NoError(err) - // include a tx to make the block be accepted tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -1376,8 +1378,12 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { require.NoError(tx.Initialize(txs.Codec)) nextChainTime := initialClkTime.Add(time.Second) - preferredID := preferred.ID() + + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) + require.NoError(err) preferredHeight := preferred.Height() + statelessBlk, err := block.NewBanffStandardBlock( nextChainTime, preferredID, @@ -1392,7 +1398,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { advanceTimeBlkID := advanceTimeBlk.ID() advanceTimeBlkBytes := advanceTimeBlk.Bytes() - peerID := ids.NodeID{1, 2, 3, 4, 5, 4, 3, 2, 1} + peerID := ids.BuildTestNodeID([]byte{1, 2, 3, 4, 5, 4, 3, 2, 1}) beacons := validators.NewManager() require.NoError(beacons.AddStaker(ctx.SubnetID, peerID, nil, ids.Empty, 1)) @@ -1486,7 +1492,16 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { beacons.RegisterCallbackListener(ctx.SubnetID, startup) // The engine handles consensus - consensus := &smcon.Topological{} + snowGetHandler, err := snowgetter.New( + vm, + sender, + consensusCtx.Log, + time.Second, + 2000, + consensusCtx.Registerer, + ) + require.NoError(err) + commonCfg := common.Config{ Ctx: consensusCtx, Beacons: beacons, @@ -1495,14 +1510,10 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { Alpha: (totalWeight + 1) / 2, Sender: sender, BootstrapTracker: bootstrapTracker, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, SharedCfg: &common.SharedConfig{}, } - snowGetHandler, err := snowgetter.New(vm, commonCfg) - require.NoError(err) - bootstrapConfig := bootstrap.Config{ Config: commonCfg, AllGetsServer: snowGetHandler, @@ -1549,7 +1560,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, }, - Consensus: consensus, + Consensus: &smcon.Topological{}, } engine, err := smeng.New(engineConfig) require.NoError(err) @@ -1613,18 +1624,14 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { return nodeIDs } - frontier := []ids.ID{advanceTimeBlkID} + frontier := set.Of(advanceTimeBlkID) require.NoError(bootstrapper.Accepted(context.Background(), peerID, reqID, frontier)) externalSender.SendF = nil externalSender.CantSend = false require.NoError(bootstrapper.Ancestors(context.Background(), peerID, reqID, [][]byte{advanceTimeBlkBytes})) - - preferred, err = vm.Builder.Preferred() - require.NoError(err) - - require.Equal(advanceTimeBlk.ID(), preferred.ID()) + require.Equal(advanceTimeBlk.ID(), vm.manager.Preferred()) ctx.Lock.Unlock() chainRouter.Shutdown(context.Background()) @@ -1633,7 +1640,6 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { func TestUnverifiedParent(t *testing.T) { require := require.New(t) _, genesisBytes := defaultGenesis(t) - dbManager := manager.NewMemDB(version.Semantic1_0_0) vm := &VM{Config: config.Config{ Chains: chains.TestManager, @@ -1658,7 +1664,7 @@ func TestUnverifiedParent(t *testing.T) { require.NoError(vm.Initialize( context.Background(), ctx, - dbManager, + memdb.New(), genesisBytes, nil, nil, @@ -1687,10 +1693,11 @@ func TestUnverifiedParent(t *testing.T) { }} require.NoError(tx1.Initialize(txs.Codec)) - preferred, err := vm.Builder.Preferred() - require.NoError(err) nextChainTime := initialClkTime.Add(time.Second) - preferredID := preferred.ID() + + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) + require.NoError(err) preferredHeight := preferred.Height() statelessBlk, err := block.NewBanffStandardBlock( @@ -1745,7 +1752,7 @@ func TestMaxStakeAmount(t *testing.T) { vm.ctx.Lock.Unlock() }() - nodeID := ids.NodeID(keys[0].PublicKey().Address()) + nodeID := genesisNodeIDs[0] tests := []struct { description string @@ -1790,9 +1797,9 @@ func TestMaxStakeAmount(t *testing.T) { func TestUptimeDisallowedWithRestart(t *testing.T) { require := require.New(t) _, genesisBytes := defaultGenesis(t) - db := manager.NewMemDB(version.Semantic1_0_0) + db := memdb.New() - firstDB := db.NewPrefixDBManager([]byte{}) + firstDB := prefixdb.New([]byte{}, db) const firstUptimePercentage = 20 // 20% firstVM := &VM{Config: config.Config{ Chains: chains.TestManager, @@ -1836,7 +1843,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { firstCtx.Lock.Unlock() // Restart the VM with a larger uptime requirement - secondDB := db.NewPrefixDBManager([]byte{}) + secondDB := prefixdb.New([]byte{}, db) const secondUptimePercentage = 21 // 21% > firstUptimePercentage, so uptime for reward is not met now secondVM := &VM{Config: config.Config{ Chains: chains.TestManager, @@ -1928,7 +1935,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { require := require.New(t) _, genesisBytes := defaultGenesis(t) - db := manager.NewMemDB(version.Semantic1_0_0) + db := memdb.New() vm := &VM{Config: config.Config{ Chains: chains.TestManager, @@ -2035,16 +2042,17 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { vm.ctx.Lock.Unlock() }() - key, err := testKeyFactory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() require.NoError(err) id := key.PublicKey().Address() + nodeID := ids.GenerateTestNodeID() addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, @@ -2052,7 +2060,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + require.NoError(vm.Network.IssueTx(context.Background(), addValidatorTx)) // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -2069,7 +2077,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) + require.NoError(vm.Network.IssueTx(context.Background(), createSubnetTx)) // trigger block creation for the subnet tx createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -2082,7 +2090,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{key, keys[1]}, keys[1].Address(), @@ -2090,7 +2098,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { require.NoError(err) removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{key, keys[2]}, keys[2].Address(), @@ -2115,7 +2123,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { require.NoError(block.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), ids.NodeID(id)) + _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) require.ErrorIs(err, database.ErrNotFound) } @@ -2138,7 +2146,7 @@ func TestTransferSubnetOwnershipTx(t *testing.T) { require.NoError(err) subnetID := createSubnetTx.ID() - require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) + require.NoError(vm.Network.IssueTx(context.Background(), createSubnetTx)) createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -2170,7 +2178,7 @@ func TestTransferSubnetOwnershipTx(t *testing.T) { ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(transferSubnetOwnershipTx)) + require.NoError(vm.Network.IssueTx(context.Background(), transferSubnetOwnershipTx)) transferSubnetOwnershipBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -2193,3 +2201,78 @@ func TestTransferSubnetOwnershipTx(t *testing.T) { } require.Equal(expectedOwner, subnetOwner) } + +func TestBaseTx(t *testing.T) { + require := require.New(t) + vm, _, _ := defaultVM(t) + vm.ctx.Lock.Lock() + defer func() { + require.NoError(vm.Shutdown(context.Background())) + vm.ctx.Lock.Unlock() + }() + + sendAmt := uint64(100000) + changeAddr := ids.ShortEmpty + + baseTx, err := vm.txBuilder.NewBaseTx( + sendAmt, + secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[1].Address(), + }, + }, + []*secp256k1.PrivateKey{keys[0]}, + changeAddr, + ) + require.NoError(err) + + totalInputAmt := uint64(0) + key0InputAmt := uint64(0) + for inputID := range baseTx.Unsigned.InputIDs() { + utxo, err := vm.state.GetUTXO(inputID) + require.NoError(err) + require.IsType(&secp256k1fx.TransferOutput{}, utxo.Out) + castOut := utxo.Out.(*secp256k1fx.TransferOutput) + if castOut.AddressesSet().Equals(set.Of(keys[0].Address())) { + key0InputAmt += castOut.Amt + } + totalInputAmt += castOut.Amt + } + require.Equal(totalInputAmt, key0InputAmt) + + totalOutputAmt := uint64(0) + key0OutputAmt := uint64(0) + key1OutputAmt := uint64(0) + changeAddrOutputAmt := uint64(0) + for _, output := range baseTx.Unsigned.Outputs() { + require.IsType(&secp256k1fx.TransferOutput{}, output.Out) + castOut := output.Out.(*secp256k1fx.TransferOutput) + if castOut.AddressesSet().Equals(set.Of(keys[0].Address())) { + key0OutputAmt += castOut.Amt + } + if castOut.AddressesSet().Equals(set.Of(keys[1].Address())) { + key1OutputAmt += castOut.Amt + } + if castOut.AddressesSet().Equals(set.Of(changeAddr)) { + changeAddrOutputAmt += castOut.Amt + } + totalOutputAmt += castOut.Amt + } + require.Equal(totalOutputAmt, key0OutputAmt+key1OutputAmt+changeAddrOutputAmt) + + require.Equal(vm.TxFee, totalInputAmt-totalOutputAmt) + require.Equal(sendAmt, key1OutputAmt) + + require.NoError(vm.Network.IssueTx(context.Background(), baseTx)) + baseTxBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + + baseTxRawBlock := baseTxBlock.(*blockexecutor.Block).Block + require.IsType(&block.BanffStandardBlock{}, baseTxRawBlock) + require.Contains(baseTxRawBlock.Txs(), baseTx) + + require.NoError(baseTxBlock.Verify(context.Background())) + require.NoError(baseTxBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) +} diff --git a/vms/platformvm/warp/codec.go b/vms/platformvm/warp/codec.go index 0213a6701c6e..cf4587224751 100644 --- a/vms/platformvm/warp/codec.go +++ b/vms/platformvm/warp/codec.go @@ -8,7 +8,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) const codecVersion = 0 @@ -20,12 +20,11 @@ func init() { c = codec.NewManager(math.MaxInt) lc := linearcodec.NewCustomMaxLength(math.MaxInt32) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( lc.RegisterType(&BitSetSignature{}), c.RegisterCodec(codecVersion, lc), ) - if errs.Errored() { - panic(errs.Err) + if err != nil { + panic(err) } } diff --git a/vms/platformvm/warp/payload/codec.go b/vms/platformvm/warp/payload/codec.go index 31d20f6777ac..e2e8ddd7a7f5 100644 --- a/vms/platformvm/warp/payload/codec.go +++ b/vms/platformvm/warp/payload/codec.go @@ -6,8 +6,8 @@ package payload import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" ) const ( @@ -27,13 +27,12 @@ func init() { c = codec.NewManager(MaxMessageSize) lc := linearcodec.NewCustomMaxLength(MaxSliceLen) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( lc.RegisterType(&Hash{}), lc.RegisterType(&AddressedCall{}), c.RegisterCodec(codecVersion, lc), ) - if errs.Errored() { - panic(errs.Err) + if err != nil { + panic(err) } } diff --git a/vms/platformvm/warp/validator_test.go b/vms/platformvm/warp/validator_test.go index b306c82b79f0..9af37aed81f6 100644 --- a/vms/platformvm/warp/validator_test.go +++ b/vms/platformvm/warp/validator_test.go @@ -5,8 +5,8 @@ package warp import ( "context" - "fmt" "math" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -336,7 +336,7 @@ func BenchmarkGetCanonicalValidatorSet(b *testing.B) { }, } - b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { + b.Run(strconv.Itoa(size), func(b *testing.B) { for i := 0; i < b.N; i++ { _, _, err := GetCanonicalValidatorSet(context.Background(), validatorState, pChainHeight, subnetID) require.NoError(b, err) diff --git a/vms/propertyfx/fx.go b/vms/propertyfx/fx.go index 2719c37e2972..28d211a9b5ad 100644 --- a/vms/propertyfx/fx.go +++ b/vms/propertyfx/fx.go @@ -6,7 +6,7 @@ package propertyfx import ( "errors" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -32,15 +32,13 @@ func (fx *Fx) Initialize(vmIntf interface{}) error { log.Debug("initializing nft fx") c := fx.VM.CodecRegistry() - errs := wrappers.Errs{} - errs.Add( + return utils.Err( c.RegisterType(&MintOutput{}), c.RegisterType(&OwnedOutput{}), c.RegisterType(&MintOperation{}), c.RegisterType(&BurnOperation{}), c.RegisterType(&Credential{}), ) - return errs.Err } func (fx *Fx) VerifyOperation(txIntf, opIntf, credIntf interface{}, utxosIntf []interface{}) error { diff --git a/vms/proposervm/batched_vm_test.go b/vms/proposervm/batched_vm_test.go index 76f5e7ca31b8..326272275dac 100644 --- a/vms/proposervm/batched_vm_test.go +++ b/vms/proposervm/batched_vm_test.go @@ -11,7 +11,9 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" @@ -21,7 +23,6 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/proposervm/proposer" ) @@ -984,7 +985,7 @@ func initTestRemoteProposerVM( coreVM.InitializeF = func( context.Context, *snow.Context, - manager.Manager, + database.Database, []byte, []byte, []byte, @@ -1037,21 +1038,27 @@ func initTestRemoteProposerVM( return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil @@ -1061,13 +1068,10 @@ func initTestRemoteProposerVM( ctx.NodeID = ids.NodeIDFromCert(pTestCert) ctx.ValidatorState = valState - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) - // make sure that DBs are compressed correctly - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) require.NoError(proVM.Initialize( context.Background(), ctx, - dummyDBManager, + prefixdb.New([]byte{}, memdb.New()), // make sure that DBs are compressed correctly initialState, nil, nil, diff --git a/vms/proposervm/block/codec.go b/vms/proposervm/block/codec.go index bf8089dbeb5a..6d68a4cc2fe7 100644 --- a/vms/proposervm/block/codec.go +++ b/vms/proposervm/block/codec.go @@ -8,7 +8,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) const codecVersion = 0 @@ -24,13 +24,12 @@ func init() { linearCodec := linearcodec.NewCustomMaxLength(math.MaxUint32) c = codec.NewManager(math.MaxInt) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( linearCodec.RegisterType(&statelessBlock{}), linearCodec.RegisterType(&option{}), c.RegisterCodec(codecVersion, linearCodec), ) - if errs.Errored() { - panic(errs.Err) + if err != nil { + panic(err) } } diff --git a/vms/proposervm/post_fork_option_test.go b/vms/proposervm/post_fork_option_test.go index d713faffb551..09fe29730b6f 100644 --- a/vms/proposervm/post_fork_option_test.go +++ b/vms/proposervm/post_fork_option_test.go @@ -12,7 +12,6 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" @@ -672,7 +671,7 @@ func TestOptionTimestampValidity(t *testing.T) { coreVM.InitializeF = func( context.Context, *snow.Context, - manager.Manager, + database.Database, []byte, []byte, []byte, diff --git a/vms/proposervm/proposer/validators_test.go b/vms/proposervm/proposer/validators_test.go index a0703d498ec8..2f7913d01e2e 100644 --- a/vms/proposervm/proposer/validators_test.go +++ b/vms/proposervm/proposer/validators_test.go @@ -19,7 +19,7 @@ func TestValidatorDataLess(t *testing.T) { require.False(v2.Less(v1)) v1 = validatorData{ - id: ids.NodeID{1}, + id: ids.BuildTestNodeID([]byte{1}), } require.False(v1.Less(v2)) require.True(v2.Less(v1)) diff --git a/vms/proposervm/proposer/windower_test.go b/vms/proposervm/proposer/windower_test.go index ec2225003230..961398c78867 100644 --- a/vms/proposervm/proposer/windower_test.go +++ b/vms/proposervm/proposer/windower_test.go @@ -72,7 +72,7 @@ func TestWindowerChangeByHeight(t *testing.T) { chainID := ids.ID{0, 2} validatorIDs := make([]ids.NodeID, MaxWindows) for i := range validatorIDs { - validatorIDs[i] = ids.NodeID{byte(i + 1)} + validatorIDs[i] = ids.BuildTestNodeID([]byte{byte(i) + 1}) } vdrState := &validators.TestState{ T: t, @@ -134,7 +134,7 @@ func TestWindowerChangeByChain(t *testing.T) { validatorIDs := make([]ids.NodeID, MaxWindows) for i := range validatorIDs { - validatorIDs[i] = ids.NodeID{byte(i + 1)} + validatorIDs[i] = ids.BuildTestNodeID([]byte{byte(i) + 1}) } vdrState := &validators.TestState{ T: t, diff --git a/vms/proposervm/state_syncable_vm_test.go b/vms/proposervm/state_syncable_vm_test.go index 888ea5ebeee3..826f8b877987 100644 --- a/vms/proposervm/state_syncable_vm_test.go +++ b/vms/proposervm/state_syncable_vm_test.go @@ -11,14 +11,14 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/proposervm/summary" statelessblock "github.com/ava-labs/avalanchego/vms/proposervm/block" @@ -51,7 +51,7 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { HeightV: 0, BytesV: []byte("genesis state"), } - innerVM.InitializeF = func(context.Context, *snow.Context, manager.Manager, + innerVM.InitializeF = func(context.Context, *snow.Context, database.Database, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { @@ -67,10 +67,7 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { return innerGenesisBlk, nil } - // createVM - dbManager := manager.NewMemDB(version.Semantic1_0_0) - dbManager = dbManager.NewPrefixDBManager([]byte{}) - + // create the VM vm := New( innerVM, time.Time{}, @@ -87,7 +84,7 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { require.NoError(vm.Initialize( context.Background(), ctx, - dbManager, + prefixdb.New([]byte{}, memdb.New()), innerGenesisBlk.Bytes(), nil, nil, diff --git a/vms/proposervm/vm.go b/vms/proposervm/vm.go index a101be86f574..ae9691ab380e 100644 --- a/vms/proposervm/vm.go +++ b/vms/proposervm/vm.go @@ -18,7 +18,6 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" @@ -167,7 +166,7 @@ func New( func (vm *VM) Initialize( ctx context.Context, chainCtx *snow.Context, - dbManager manager.Manager, + db database.Database, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, @@ -193,9 +192,7 @@ func (vm *VM) Initialize( chainCtx.Metrics = optionalGatherer vm.ctx = chainCtx - rawDB := dbManager.Current().Database - prefixDB := prefixdb.New(dbPrefix, rawDB) - vm.db = versiondb.New(prefixDB) + vm.db = versiondb.New(prefixdb.New(dbPrefix, db)) baseState, err := state.NewMetered(vm.db, "state", registerer) if err != nil { return err @@ -237,7 +234,7 @@ func (vm *VM) Initialize( err = vm.ChainVM.Initialize( ctx, chainCtx, - dbManager, + db, genesisBytes, upgradeBytes, configBytes, diff --git a/vms/proposervm/vm_regression_test.go b/vms/proposervm/vm_regression_test.go index 351d63459ab4..0a27c43e112a 100644 --- a/vms/proposervm/vm_regression_test.go +++ b/vms/proposervm/vm_regression_test.go @@ -11,11 +11,12 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/version" ) func TestProposerVMInitializeShouldFailIfInnerVMCantVerifyItsHeightIndex(t *testing.T) { @@ -36,7 +37,7 @@ func TestProposerVMInitializeShouldFailIfInnerVMCantVerifyItsHeightIndex(t *test return customError } - innerVM.InitializeF = func(context.Context, *snow.Context, manager.Manager, + innerVM.InitializeF = func(context.Context, *snow.Context, database.Database, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { @@ -58,14 +59,12 @@ func TestProposerVMInitializeShouldFailIfInnerVMCantVerifyItsHeightIndex(t *test }() ctx := snow.DefaultContextTest() - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) initialState := []byte("genesis state") err := proVM.Initialize( context.Background(), ctx, - dummyDBManager, + prefixdb.New([]byte{}, memdb.New()), initialState, nil, nil, diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index 102ec2b37a33..b75493f26ac6 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -17,7 +17,8 @@ import ( "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" @@ -29,7 +30,6 @@ import ( "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/proposervm/proposer" "github.com/ava-labs/avalanchego/vms/proposervm/state" @@ -80,7 +80,7 @@ func initTestProposerVM( *validators.TestState, *VM, *snowman.TestBlock, - manager.Manager, + database.Database, ) { require := require.New(t) @@ -106,7 +106,7 @@ func initTestProposerVM( }, } - coreVM.InitializeF = func(context.Context, *snow.Context, manager.Manager, + coreVM.InitializeF = func(context.Context, *snow.Context, database.Database, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { @@ -152,21 +152,27 @@ func initTestProposerVM( return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil @@ -177,8 +183,7 @@ func initTestProposerVM( ctx.NodeID = ids.NodeIDFromCert(pTestCert) ctx.ValidatorState = valState - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) + db := prefixdb.New([]byte{0}, memdb.New()) // signal height index is complete coreVM.VerifyHeightIndexF = func(context.Context) error { @@ -188,7 +193,7 @@ func initTestProposerVM( require.NoError(proVM.Initialize( context.Background(), ctx, - dummyDBManager, + db, initialState, nil, nil, @@ -203,7 +208,7 @@ func initTestProposerVM( require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) - return coreVM, valState, proVM, coreGenBlk, dummyDBManager + return coreVM, valState, proVM, coreGenBlk, db } // VM.BuildBlock tests section @@ -893,9 +898,10 @@ func TestExpiredBuildBlock(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + nodeID := ids.BuildTestNodeID([]byte{1}) return map[ids.NodeID]*validators.GetValidatorOutput{ - {1}: { - NodeID: ids.NodeID{1}, + nodeID: { + NodeID: nodeID, Weight: 100, }, }, nil @@ -905,14 +911,13 @@ func TestExpiredBuildBlock(t *testing.T) { ctx.NodeID = ids.NodeIDFromCert(pTestCert) ctx.ValidatorState = valState - dbManager := manager.NewMemDB(version.Semantic1_0_0) toEngine := make(chan common.Message, 1) var toScheduler chan<- common.Message coreVM.InitializeF = func( _ context.Context, _ *snow.Context, - _ manager.Manager, + _ database.Database, _ []byte, _ []byte, _ []byte, @@ -931,7 +936,7 @@ func TestExpiredBuildBlock(t *testing.T) { require.NoError(proVM.Initialize( context.Background(), ctx, - dbManager, + memdb.New(), nil, nil, nil, @@ -1162,9 +1167,10 @@ func TestInnerVMRollback(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + nodeID := ids.BuildTestNodeID([]byte{1}) return map[ids.NodeID]*validators.GetValidatorOutput{ - {1}: { - NodeID: ids.NodeID{1}, + nodeID: { + NodeID: nodeID, Weight: 100, }, }, nil @@ -1200,7 +1206,7 @@ func TestInnerVMRollback(t *testing.T) { coreVM.InitializeF = func( context.Context, *snow.Context, - manager.Manager, + database.Database, []byte, []byte, []byte, @@ -1214,7 +1220,7 @@ func TestInnerVMRollback(t *testing.T) { return nil } - dbManager := manager.NewMemDB(version.Semantic1_0_0) + db := memdb.New() proVM := New( coreVM, @@ -1229,7 +1235,7 @@ func TestInnerVMRollback(t *testing.T) { require.NoError(proVM.Initialize( context.Background(), ctx, - dbManager, + db, nil, nil, nil, @@ -1316,7 +1322,7 @@ func TestInnerVMRollback(t *testing.T) { require.NoError(proVM.Initialize( context.Background(), ctx, - dbManager, + db, nil, nil, nil, @@ -1769,7 +1775,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { }, } - coreVM.InitializeF = func(context.Context, *snow.Context, manager.Manager, + coreVM.InitializeF = func(context.Context, *snow.Context, database.Database, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { @@ -1815,21 +1821,27 @@ func TestRejectedHeightNotIndexed(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil @@ -1839,13 +1851,10 @@ func TestRejectedHeightNotIndexed(t *testing.T) { ctx.NodeID = ids.NodeIDFromCert(pTestCert) ctx.ValidatorState = valState - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) - // make sure that DBs are compressed correctly - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) require.NoError(proVM.Initialize( context.Background(), ctx, - dummyDBManager, + prefixdb.New([]byte{}, memdb.New()), // make sure that DBs are compressed correctly initialState, nil, nil, @@ -1973,7 +1982,7 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { }, } - coreVM.InitializeF = func(context.Context, *snow.Context, manager.Manager, + coreVM.InitializeF = func(context.Context, *snow.Context, database.Database, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { @@ -2019,21 +2028,27 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil @@ -2043,13 +2058,10 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { ctx.NodeID = ids.NodeIDFromCert(pTestCert) ctx.ValidatorState = valState - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) - // make sure that DBs are compressed correctly - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) require.NoError(proVM.Initialize( context.Background(), ctx, - dummyDBManager, + prefixdb.New([]byte{}, memdb.New()), // make sure that DBs are compressed correctly initialState, nil, nil, @@ -2169,10 +2181,6 @@ func TestVMInnerBlkCache(t *testing.T) { pTestCert, ) - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) - // make sure that DBs are compressed correctly - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) - innerVM.EXPECT().Initialize( gomock.Any(), gomock.Any(), @@ -2200,7 +2208,7 @@ func TestVMInnerBlkCache(t *testing.T) { require.NoError(vm.Initialize( context.Background(), ctx, - dummyDBManager, + prefixdb.New([]byte{}, memdb.New()), // make sure that DBs are compressed correctly nil, nil, nil, @@ -2402,9 +2410,8 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { pTestCert, ) - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) // make sure that DBs are compressed correctly - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) + db := prefixdb.New([]byte{}, memdb.New()) innerVM.EXPECT().Initialize( gomock.Any(), @@ -2433,7 +2440,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { require.NoError(vm.Initialize( context.Background(), snowCtx, - dummyDBManager, + db, nil, nil, nil, @@ -2552,7 +2559,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { coreVM := &block.TestVM{ TestVM: common.TestVM{ T: t, - InitializeF: func(context.Context, *snow.Context, manager.Manager, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender) error { + InitializeF: func(context.Context, *snow.Context, database.Database, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender) error { return nil }, }, @@ -2601,9 +2608,8 @@ func TestHistoricalBlockDeletion(t *testing.T) { }, } - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) // make sure that DBs are compressed correctly - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) + db := prefixdb.New([]byte{}, memdb.New()) proVM := New( coreVM, @@ -2618,7 +2624,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { require.NoError(proVM.Initialize( context.Background(), ctx, - dummyDBManager, + db, initialState, nil, nil, @@ -2716,7 +2722,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { require.NoError(proVM.Initialize( context.Background(), ctx, - dummyDBManager, + db, initialState, nil, nil, @@ -2758,7 +2764,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { require.NoError(proVM.Initialize( context.Background(), ctx, - dummyDBManager, + db, initialState, nil, nil, diff --git a/vms/rpcchainvm/batched_vm_test.go b/vms/rpcchainvm/batched_vm_test.go index dbadedad988f..817037dc6e3e 100644 --- a/vms/rpcchainvm/batched_vm_test.go +++ b/vms/rpcchainvm/batched_vm_test.go @@ -12,14 +12,13 @@ import ( "go.uber.org/mock/gomock" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/chain" ) @@ -88,9 +87,8 @@ func TestBatchedParseBlockCaching(t *testing.T) { defer stopper.Stop(context.Background()) ctx := snow.DefaultContextTest() - dbManager := manager.NewMemDB(version.Semantic1_0_0) - require.NoError(vm.Initialize(context.Background(), ctx, dbManager, nil, nil, nil, nil, nil, nil)) + require.NoError(vm.Initialize(context.Background(), ctx, memdb.New(), nil, nil, nil, nil, nil, nil)) // Call should parse the first block blk, err := vm.ParseBlock(context.Background(), blkBytes1) diff --git a/vms/rpcchainvm/state_syncable_vm_test.go b/vms/rpcchainvm/state_syncable_vm_test.go index ffd486df94eb..241062616c9b 100644 --- a/vms/rpcchainvm/state_syncable_vm_test.go +++ b/vms/rpcchainvm/state_syncable_vm_test.go @@ -13,7 +13,8 @@ import ( "go.uber.org/mock/gomock" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" @@ -21,7 +22,6 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime/subprocess" @@ -471,10 +471,8 @@ func TestLastAcceptedBlockPostStateSummaryAccept(t *testing.T) { // Step 1: initialize VM and check initial LastAcceptedBlock ctx := snow.DefaultContextTest() - dbManager := manager.NewMemDB(version.Semantic1_0_0) - dbManager = dbManager.NewPrefixDBManager([]byte{}) - require.NoError(vm.Initialize(context.Background(), ctx, dbManager, nil, nil, nil, nil, nil, nil)) + require.NoError(vm.Initialize(context.Background(), ctx, prefixdb.New([]byte{}, memdb.New()), nil, nil, nil, nil, nil, nil)) blkID, err := vm.LastAccepted(context.Background()) require.NoError(err) diff --git a/vms/rpcchainvm/vm_client.go b/vms/rpcchainvm/vm_client.go index 430bb18201a5..d9915bfbfcd6 100644 --- a/vms/rpcchainvm/vm_client.go +++ b/vms/rpcchainvm/vm_client.go @@ -29,7 +29,6 @@ import ( "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/chains/atomic/gsharedmemory" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/rpcdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/ids/galiasreader" @@ -129,7 +128,7 @@ func (vm *VMClient) SetProcess(runtime runtime.Stopper, pid int, processTracker func (vm *VMClient) Initialize( ctx context.Context, chainCtx *snow.Context, - dbManager manager.Manager, + db database.Database, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, @@ -155,33 +154,21 @@ func (vm *VMClient) Initialize( return err } - // Initialize and serve each database and construct the db manager - // initialize request parameters - versionedDBs := dbManager.GetDatabases() - versionedDBServers := make([]*vmpb.VersionedDBServer, len(versionedDBs)) - for i, semDB := range versionedDBs { - dbVersion := semDB.Version.String() - serverListener, err := grpcutils.NewListener() - if err != nil { - return err - } - serverAddr := serverListener.Addr().String() - - go grpcutils.Serve(serverListener, vm.newDBServer(semDB.Database)) - chainCtx.Log.Info("grpc: serving database", - zap.String("version", dbVersion), - zap.String("address", serverAddr), - ) - - versionedDBServers[i] = &vmpb.VersionedDBServer{ - ServerAddr: serverAddr, - Version: dbVersion, - } + // Initialize the database + dbServerListener, err := grpcutils.NewListener() + if err != nil { + return err } + dbServerAddr := dbServerListener.Addr().String() + + go grpcutils.Serve(dbServerListener, vm.newDBServer(db)) + chainCtx.Log.Info("grpc: serving database", + zap.String("address", dbServerAddr), + ) vm.messenger = messenger.NewServer(toEngine) vm.keystore = gkeystore.NewServer(chainCtx.Keystore) - vm.sharedMemory = gsharedmemory.NewServer(chainCtx.SharedMemory, dbManager.Current().Database) + vm.sharedMemory = gsharedmemory.NewServer(chainCtx.SharedMemory, db) vm.bcLookup = galiasreader.NewServer(chainCtx.BCLookup) vm.appSender = appsender.NewServer(appSender) vm.validatorStateServer = gvalidators.NewServer(chainCtx.ValidatorState) @@ -211,7 +198,7 @@ func (vm *VMClient) Initialize( GenesisBytes: genesisBytes, UpgradeBytes: upgradeBytes, ConfigBytes: configBytes, - DbServers: versionedDBServers, + DbServerAddr: dbServerAddr, ServerAddr: serverAddr, }) if err != nil { @@ -409,7 +396,7 @@ func (vm *VMClient) CreateStaticHandlers(ctx context.Context) (map[string]http.H func (vm *VMClient) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { _, err := vm.client.Connected(ctx, &vmpb.ConnectedRequest{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), Version: nodeVersion.String(), }) return err @@ -417,7 +404,7 @@ func (vm *VMClient) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersio func (vm *VMClient) Disconnected(ctx context.Context, nodeID ids.NodeID) error { _, err := vm.client.Disconnected(ctx, &vmpb.DisconnectedRequest{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), }) return err } @@ -581,7 +568,7 @@ func (vm *VMClient) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID _, err := vm.client.AppRequest( ctx, &vmpb.AppRequestMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, Request: request, Deadline: grpcutils.TimestampFromTime(deadline), @@ -594,7 +581,7 @@ func (vm *VMClient) AppResponse(ctx context.Context, nodeID ids.NodeID, requestI _, err := vm.client.AppResponse( ctx, &vmpb.AppResponseMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, Response: response, }, @@ -606,7 +593,7 @@ func (vm *VMClient) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, req _, err := vm.client.AppRequestFailed( ctx, &vmpb.AppRequestFailedMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, }, ) @@ -617,7 +604,7 @@ func (vm *VMClient) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte _, err := vm.client.AppGossip( ctx, &vmpb.AppGossipMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), Msg: msg, }, ) diff --git a/vms/rpcchainvm/vm_server.go b/vms/rpcchainvm/vm_server.go index 6f78009d0f41..7ee82a241506 100644 --- a/vms/rpcchainvm/vm_server.go +++ b/vms/rpcchainvm/vm_server.go @@ -21,8 +21,8 @@ import ( "github.com/ava-labs/avalanchego/api/keystore/gkeystore" "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/chains/atomic/gsharedmemory" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/corruptabledb" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/rpcdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/ids/galiasreader" @@ -75,7 +75,7 @@ type VMServer struct { allowShutdown *utils.Atomic[bool] processMetrics prometheus.Gatherer - dbManager manager.Manager + db database.Database log logging.Logger serverCloser grpcutils.ServerCloser @@ -150,40 +150,19 @@ func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) // Register metrics for each Go plugin processes vm.processMetrics = registerer - // Dial each database in the request and construct the database manager - versionedDBs := make([]*manager.VersionedDatabase, len(req.DbServers)) - for i, vDBReq := range req.DbServers { - version, err := version.Parse(vDBReq.Version) - if err != nil { - // Ignore closing errors to return the original error - _ = vm.connCloser.Close() - return nil, err - } - - clientConn, err := grpcutils.Dial( - vDBReq.ServerAddr, - grpcutils.WithChainUnaryInterceptor(grpcClientMetrics.UnaryClientInterceptor()), - grpcutils.WithChainStreamInterceptor(grpcClientMetrics.StreamClientInterceptor()), - ) - if err != nil { - // Ignore closing errors to return the original error - _ = vm.connCloser.Close() - return nil, err - } - vm.connCloser.Add(clientConn) - db := rpcdb.NewClient(rpcdbpb.NewDatabaseClient(clientConn)) - versionedDBs[i] = &manager.VersionedDatabase{ - Database: corruptabledb.New(db), - Version: version, - } - } - dbManager, err := manager.NewManagerFromDBs(versionedDBs) + // Dial the database + dbClientConn, err := grpcutils.Dial( + req.DbServerAddr, + grpcutils.WithChainUnaryInterceptor(grpcClientMetrics.UnaryClientInterceptor()), + grpcutils.WithChainStreamInterceptor(grpcClientMetrics.StreamClientInterceptor()), + ) if err != nil { - // Ignore closing errors to return the original error - _ = vm.connCloser.Close() return nil, err } - vm.dbManager = dbManager + vm.connCloser.Add(dbClientConn) + vm.db = corruptabledb.New( + rpcdb.NewClient(rpcdbpb.NewDatabaseClient(dbClientConn)), + ) // TODO: Allow the logger to be configured by the client vm.log = logging.NewLogger( @@ -259,7 +238,7 @@ func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) ChainDataDir: req.ChainDataDir, } - if err := vm.vm.Initialize(ctx, vm.ctx, dbManager, req.GenesisBytes, req.UpgradeBytes, req.ConfigBytes, toEngine, nil, appSenderClient); err != nil { + if err := vm.vm.Initialize(ctx, vm.ctx, vm.db, req.GenesisBytes, req.UpgradeBytes, req.ConfigBytes, toEngine, nil, appSenderClient); err != nil { // Ignore errors closing resources to return the original error _ = vm.connCloser.Close() close(vm.closed) @@ -518,7 +497,7 @@ func (vm *VMServer) Health(ctx context.Context, _ *emptypb.Empty) (*vmpb.HealthR if err != nil { return &vmpb.HealthResponse{}, err } - dbHealth, err := vm.dbHealthChecks(ctx) + dbHealth, err := vm.db.HealthCheck(ctx) if err != nil { return &vmpb.HealthResponse{}, err } @@ -533,22 +512,6 @@ func (vm *VMServer) Health(ctx context.Context, _ *emptypb.Empty) (*vmpb.HealthR }, err } -func (vm *VMServer) dbHealthChecks(ctx context.Context) (interface{}, error) { - details := make(map[string]interface{}, len(vm.dbManager.GetDatabases())) - - // Check Database health - for _, client := range vm.dbManager.GetDatabases() { - // Shared gRPC client don't close - health, err := client.Database.HealthCheck(ctx) - if err != nil { - return nil, fmt.Errorf("failed to check db health %q: %w", client.Version.String(), err) - } - details[client.Version.String()] = health - } - - return details, nil -} - func (vm *VMServer) Version(ctx context.Context, _ *emptypb.Empty) (*vmpb.VersionResponse, error) { version, err := vm.vm.Version(ctx) return &vmpb.VersionResponse{ diff --git a/vms/rpcchainvm/with_context_vm_test.go b/vms/rpcchainvm/with_context_vm_test.go index a6d72e1a64bb..65d1e4396964 100644 --- a/vms/rpcchainvm/with_context_vm_test.go +++ b/vms/rpcchainvm/with_context_vm_test.go @@ -12,13 +12,12 @@ import ( "go.uber.org/mock/gomock" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" - "github.com/ava-labs/avalanchego/version" ) var ( @@ -100,9 +99,8 @@ func TestContextVMSummary(t *testing.T) { defer stopper.Stop(context.Background()) ctx := snow.DefaultContextTest() - dbManager := manager.NewMemDB(version.Semantic1_0_0) - require.NoError(vm.Initialize(context.Background(), ctx, dbManager, nil, nil, nil, nil, nil, nil)) + require.NoError(vm.Initialize(context.Background(), ctx, memdb.New(), nil, nil, nil, nil, nil, nil)) blkIntf, err := vm.BuildBlockWithContext(context.Background(), blockContext) require.NoError(err) diff --git a/vms/secp256k1fx/fx.go b/vms/secp256k1fx/fx.go index 28f81218313d..c969c9593976 100644 --- a/vms/secp256k1fx/fx.go +++ b/vms/secp256k1fx/fx.go @@ -9,9 +9,9 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/verify" ) @@ -40,8 +40,9 @@ var ( // Fx describes the secp256k1 feature extension type Fx struct { + secp256k1.RecoverCache + VM VM - SECPFactory secp256k1.Factory bootstrapped bool } @@ -53,21 +54,19 @@ func (fx *Fx) Initialize(vmIntf interface{}) error { log := fx.VM.Logger() log.Debug("initializing secp256k1 fx") - fx.SECPFactory = secp256k1.Factory{ - Cache: cache.LRU[ids.ID, *secp256k1.PublicKey]{ + fx.RecoverCache = secp256k1.RecoverCache{ + LRU: cache.LRU[ids.ID, *secp256k1.PublicKey]{ Size: defaultCacheSize, }, } c := fx.VM.CodecRegistry() - errs := wrappers.Errs{} - errs.Add( + return utils.Err( c.RegisterType(&TransferInput{}), c.RegisterType(&MintOutput{}), c.RegisterType(&TransferOutput{}), c.RegisterType(&MintOperation{}), c.RegisterType(&Credential{}), ) - return errs.Err } func (fx *Fx) InitializeVM(vmIntf interface{}) error { @@ -202,7 +201,7 @@ func (fx *Fx) VerifyCredentials(utx UnsignedTx, in *Input, cred *Credential, out // Make sure each signature in the signature list is from an owner of // the output being consumed sig := cred.Sigs[i] - pk, err := fx.SECPFactory.RecoverHashPublicKey(txHash, sig[:]) + pk, err := fx.RecoverPublicKeyFromHash(txHash, sig[:]) if err != nil { return err } diff --git a/vms/secp256k1fx/keychain.go b/vms/secp256k1fx/keychain.go index 6d460378d697..3246ef95722d 100644 --- a/vms/secp256k1fx/keychain.go +++ b/vms/secp256k1fx/keychain.go @@ -27,7 +27,6 @@ var ( // Keychain is a collection of keys that can be used to spend outputs type Keychain struct { - factory *secp256k1.Factory avaxAddrToKeyIndex map[ids.ShortID]int ethAddrToKeyIndex map[common.Address]int @@ -41,7 +40,6 @@ type Keychain struct { // NewKeychain returns a new keychain containing [keys] func NewKeychain(keys ...*secp256k1.PrivateKey) *Keychain { kc := &Keychain{ - factory: &secp256k1.Factory{}, avaxAddrToKeyIndex: make(map[ids.ShortID]int), ethAddrToKeyIndex: make(map[common.Address]int), } @@ -90,7 +88,7 @@ func (kc Keychain) EthAddresses() set.Set[common.Address] { // New returns a newly generated private key func (kc *Keychain) New() (*secp256k1.PrivateKey, error) { - sk, err := kc.factory.NewPrivateKey() + sk, err := secp256k1.NewPrivateKey() if err != nil { return nil, err } diff --git a/vms/secp256k1fx/keychain_test.go b/vms/secp256k1fx/keychain_test.go index 888781378461..46fdb1a0695c 100644 --- a/vms/secp256k1fx/keychain_test.go +++ b/vms/secp256k1fx/keychain_test.go @@ -46,7 +46,7 @@ func TestKeychainAdd(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keys[0]) require.NoError(err) - sk, err := kc.factory.ToPrivateKey(skBytes) + sk, err := secp256k1.ToPrivateKey(skBytes) require.NoError(err) kc.Add(sk) @@ -87,7 +87,7 @@ func TestKeychainMatch(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keyStr) require.NoError(err) - sk, err := kc.factory.ToPrivateKey(skBytes) + sk, err := secp256k1.ToPrivateKey(skBytes) require.NoError(err) sks = append(sks, sk) } @@ -132,7 +132,7 @@ func TestKeychainSpendMint(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keyStr) require.NoError(err) - sk, err := kc.factory.ToPrivateKey(skBytes) + sk, err := secp256k1.ToPrivateKey(skBytes) require.NoError(err) sks = append(sks, sk) } @@ -174,7 +174,7 @@ func TestKeychainSpendTransfer(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keyStr) require.NoError(err) - sk, err := kc.factory.ToPrivateKey(skBytes) + sk, err := secp256k1.ToPrivateKey(skBytes) require.NoError(err) sks = append(sks, sk) } @@ -222,7 +222,7 @@ func TestKeychainString(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keys[0]) require.NoError(err) - sk, err := kc.factory.ToPrivateKey(skBytes) + sk, err := secp256k1.ToPrivateKey(skBytes) require.NoError(err) kc.Add(sk) @@ -237,7 +237,7 @@ func TestKeychainPrefixedString(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keys[0]) require.NoError(err) - sk, err := kc.factory.ToPrivateKey(skBytes) + sk, err := secp256k1.ToPrivateKey(skBytes) require.NoError(err) kc.Add(sk) diff --git a/vms/tracedvm/block_vm.go b/vms/tracedvm/block_vm.go index b82466f3a420..969a6bc09637 100644 --- a/vms/tracedvm/block_vm.go +++ b/vms/tracedvm/block_vm.go @@ -11,7 +11,7 @@ import ( oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" @@ -100,7 +100,7 @@ func NewBlockVM(vm block.ChainVM, name string, tracer trace.Tracer) block.ChainV func (vm *blockVM) Initialize( ctx context.Context, chainCtx *snow.Context, - db manager.Manager, + db database.Database, genesisBytes, upgradeBytes, configBytes []byte, diff --git a/vms/tracedvm/vertex_vm.go b/vms/tracedvm/vertex_vm.go index 9c23f882ea40..53189f5cee70 100644 --- a/vms/tracedvm/vertex_vm.go +++ b/vms/tracedvm/vertex_vm.go @@ -10,7 +10,7 @@ import ( oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" @@ -35,7 +35,7 @@ func NewVertexVM(vm vertex.LinearizableVMWithEngine, tracer trace.Tracer) vertex func (vm *vertexVM) Initialize( ctx context.Context, chainCtx *snow.Context, - db manager.Manager, + db database.Database, genesisBytes, upgradeBytes, configBytes []byte, diff --git a/wallet/chain/p/backend_visitor.go b/wallet/chain/p/backend_visitor.go index da2fc591ecd5..57d602354428 100644 --- a/wallet/chain/p/backend_visitor.go +++ b/wallet/chain/p/backend_visitor.go @@ -58,6 +58,10 @@ func (b *backendVisitor) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwnersh return b.baseTx(&tx.BaseTx) } +func (b *backendVisitor) BaseTx(tx *txs.BaseTx) error { + return b.baseTx(tx) +} + func (b *backendVisitor) ImportTx(tx *txs.ImportTx) error { err := b.b.removeUTXOs( b.ctx, diff --git a/wallet/chain/p/signer_visitor.go b/wallet/chain/p/signer_visitor.go index 6df1687400ac..9dd6018ea2e3 100644 --- a/wallet/chain/p/signer_visitor.go +++ b/wallet/chain/p/signer_visitor.go @@ -51,6 +51,14 @@ func (*signerVisitor) RewardValidatorTx(*txs.RewardValidatorTx) error { return errUnsupportedTxType } +func (s *signerVisitor) BaseTx(tx *txs.BaseTx) error { + txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) + if err != nil { + return err + } + return sign(s.tx, false, txSigners) +} + func (s *signerVisitor) AddValidatorTx(tx *txs.AddValidatorTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { diff --git a/x/merkledb/README.md b/x/merkledb/README.md index 467a60e19b08..6c7d9d68775c 100644 --- a/x/merkledb/README.md +++ b/x/merkledb/README.md @@ -76,8 +76,8 @@ The node serialization format is as follows: Where: * `Value existence flag` is `1` if this node has a value, otherwise `0`. -* `Value length` is the length of the value, if it exists (i.e. if `Value existince flag` is `1`.) Otherwise not serialized. -* `Value` is the value, if it exists (i.e. if `Value existince flag` is `1`.) Otherwise not serialized. +* `Value length` is the length of the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. +* `Value` is the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. * `Number of children` is the number of children this node has. * `Child index` is the index of a child node within the list of the node's children. * `Child compressed key length` is the length of the child node's compressed key. @@ -197,8 +197,8 @@ Where: * `Child index` is the index of a child node within the list of the node's children. * `Child ID` is the child node's ID. * `Value existence flag` is `1` if this node has a value, otherwise `0`. -* `Value length` is the length of the value, if it exists (i.e. if `Value existince flag` is `1`.) Otherwise not serialized. -* `Value` is the value, if it exists (i.e. if `Value existince flag` is `1`.) Otherwise not serialized. +* `Value length` is the length of the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. +* `Value` is the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. * `Key length` is the number of nibbles in this node's key. * `Key` is the node's key. diff --git a/x/merkledb/cache.go b/x/merkledb/cache.go index 57d674ed63ef..7b280c1208d4 100644 --- a/x/merkledb/cache.go +++ b/x/merkledb/cache.go @@ -48,7 +48,7 @@ func (c *onEvictCache[K, V]) Get(key K) (V, bool) { // Put an element into this cache. If this causes an element // to be evicted, calls [c.onEviction] on the evicted element -// and returns the error from [c.onEviction]. Otherwise returns nil. +// and returns the error from [c.onEviction]. Otherwise, returns nil. func (c *onEvictCache[K, V]) Put(key K, value V) error { c.lock.Lock() defer c.lock.Unlock() diff --git a/x/merkledb/codec.go b/x/merkledb/codec.go index e7ef1eddb7f5..c9837abb509f 100644 --- a/x/merkledb/codec.go +++ b/x/merkledb/codec.go @@ -44,7 +44,6 @@ var ( trueBytes = []byte{trueByte} falseBytes = []byte{falseByte} - errTooManyChildren = errors.New("length of children list is larger than branching factor") errChildIndexTooLarge = errors.New("invalid child index. Must be less than branching factor") errLeadingZeroes = errors.New("varint has leading zeroes") errInvalidBool = errors.New("decoded bool is neither true nor false") @@ -63,13 +62,15 @@ type encoderDecoder interface { type encoder interface { // Assumes [n] is non-nil. encodeDBNode(n *dbNode) []byte - // Assumes [hv] is non-nil. - encodeHashValues(hv *hashValues) []byte + + // Returns the bytes that will be hashed to generate [n]'s ID. + // Assumes [n] is non-nil. + encodeHashValues(n *node) []byte } type decoder interface { // Assumes [n] is non-nil. - decodeDBNode(bytes []byte, n *dbNode, factor BranchFactor) error + decodeDBNode(bytes []byte, n *dbNode) error } func newCodec() encoderDecoder { @@ -82,7 +83,7 @@ func newCodec() encoderDecoder { } } -// Note that bytes.Buffer.Write always returns nil so we +// Note that bytes.Buffer.Write always returns nil, so we // can ignore its return values in [codecImpl] methods. type codecImpl struct { // Invariant: Every byte slice returned by [varIntPool] has @@ -114,9 +115,9 @@ func (c *codecImpl) encodeDBNode(n *dbNode) []byte { return buf.Bytes() } -func (c *codecImpl) encodeHashValues(hv *hashValues) []byte { +func (c *codecImpl) encodeHashValues(n *node) []byte { var ( - numChildren = len(hv.Children) + numChildren = len(n.children) // Estimate size [hv] to prevent memory allocations estimatedLen = minVarIntLen + numChildren*hashValuesChildLen + estimatedValueLen + estimatedKeyLen buf = bytes.NewBuffer(make([]byte, 0, estimatedLen)) @@ -125,19 +126,20 @@ func (c *codecImpl) encodeHashValues(hv *hashValues) []byte { c.encodeUint(buf, uint64(numChildren)) // ensure that the order of entries is consistent - for index := 0; BranchFactor(index) < hv.Key.branchFactor; index++ { - if entry, ok := hv.Children[byte(index)]; ok { - c.encodeUint(buf, uint64(index)) - _, _ = buf.Write(entry.id[:]) - } + keys := maps.Keys(n.children) + slices.Sort(keys) + for _, index := range keys { + entry := n.children[index] + c.encodeUint(buf, uint64(index)) + _, _ = buf.Write(entry.id[:]) } - c.encodeMaybeByteSlice(buf, hv.Value) - c.encodeKey(buf, hv.Key) + c.encodeMaybeByteSlice(buf, n.valueDigest) + c.encodeKey(buf, n.key) return buf.Bytes() } -func (c *codecImpl) decodeDBNode(b []byte, n *dbNode, branchFactor BranchFactor) error { +func (c *codecImpl) decodeDBNode(b []byte, n *dbNode) error { if minDBNodeLen > len(b) { return io.ErrUnexpectedEOF } @@ -154,25 +156,23 @@ func (c *codecImpl) decodeDBNode(b []byte, n *dbNode, branchFactor BranchFactor) switch { case err != nil: return err - case numChildren > uint64(branchFactor): - return errTooManyChildren case numChildren > uint64(src.Len()/minChildLen): return io.ErrUnexpectedEOF } - n.children = make(map[byte]child, branchFactor) + n.children = make(map[byte]child, numChildren) var previousChild uint64 for i := uint64(0); i < numChildren; i++ { index, err := c.decodeUint(src) if err != nil { return err } - if index >= uint64(branchFactor) || (i != 0 && index <= previousChild) { + if (i != 0 && index <= previousChild) || index > math.MaxUint8 { return errChildIndexTooLarge } previousChild = index - compressedKey, err := c.decodeKey(src, branchFactor) + compressedKey, err := c.decodeKey(src) if err != nil { return err } @@ -277,12 +277,12 @@ func (c *codecImpl) decodeMaybeByteSlice(src *bytes.Reader) (maybe.Maybe[[]byte] return maybe.Nothing[[]byte](), err } - bytes, err := c.decodeByteSlice(src) + rawBytes, err := c.decodeByteSlice(src) if err != nil { return maybe.Nothing[[]byte](), err } - return maybe.Some(bytes), nil + return maybe.Some(rawBytes), nil } func (c *codecImpl) decodeByteSlice(src *bytes.Reader) ([]byte, error) { @@ -331,11 +331,11 @@ func (*codecImpl) decodeID(src *bytes.Reader) (ids.ID, error) { } func (c *codecImpl) encodeKey(dst *bytes.Buffer, key Key) { - c.encodeUint(dst, uint64(key.tokenLength)) + c.encodeUint(dst, uint64(key.length)) _, _ = dst.Write(key.Bytes()) } -func (c *codecImpl) decodeKey(src *bytes.Reader, branchFactor BranchFactor) (Key, error) { +func (c *codecImpl) decodeKey(src *bytes.Reader) (Key, error) { if minKeyLen > src.Len() { return Key{}, io.ErrUnexpectedEOF } @@ -347,9 +347,10 @@ func (c *codecImpl) decodeKey(src *bytes.Reader, branchFactor BranchFactor) (Key if length > math.MaxInt { return Key{}, errIntOverflow } - result := emptyKey(branchFactor) - result.tokenLength = int(length) - keyBytesLen := result.bytesNeeded(result.tokenLength) + result := Key{ + length: int(length), + } + keyBytesLen := bytesNeeded(result.length) if keyBytesLen > src.Len() { return Key{}, io.ErrUnexpectedEOF } @@ -363,8 +364,8 @@ func (c *codecImpl) decodeKey(src *bytes.Reader, branchFactor BranchFactor) (Key if result.hasPartialByte() { // Confirm that the padding bits in the partial byte are 0. // We want to only look at the bits to the right of the last token, which is at index length-1. - // Generate a mask with (8-bitsToShift) 0s followed by bitsToShift 1s. - paddingMask := byte(0xFF >> (8 - result.bitsToShift(result.tokenLength-1))) + // Generate a mask where the (result.length % 8) left bits are 0. + paddingMask := byte(0xFF >> (result.length % 8)) if buffer[keyBytesLen-1]&paddingMask != 0 { return Key{}, errNonZeroKeyPadding } diff --git a/x/merkledb/codec_test.go b/x/merkledb/codec_test.go index cb83e1ce582c..00e5790b3171 100644 --- a/x/merkledb/codec_test.go +++ b/x/merkledb/codec_test.go @@ -80,24 +80,22 @@ func FuzzCodecKey(f *testing.F) { b []byte, ) { require := require.New(t) - for _, branchFactor := range branchFactors { - codec := codec.(*codecImpl) - reader := bytes.NewReader(b) - startLen := reader.Len() - got, err := codec.decodeKey(reader, branchFactor) - if err != nil { - t.SkipNow() - } - endLen := reader.Len() - numRead := startLen - endLen - - // Encoding [got] should be the same as [b]. - var buf bytes.Buffer - codec.encodeKey(&buf, got) - bufBytes := buf.Bytes() - require.Len(bufBytes, numRead) - require.Equal(b[:numRead], bufBytes) + codec := codec.(*codecImpl) + reader := bytes.NewReader(b) + startLen := reader.Len() + got, err := codec.decodeKey(reader) + if err != nil { + t.SkipNow() } + endLen := reader.Len() + numRead := startLen - endLen + + // Encoding [got] should be the same as [b]. + var buf bytes.Buffer + codec.encodeKey(&buf, got) + bufBytes := buf.Bytes() + require.Len(bufBytes, numRead) + require.Equal(b[:numRead], bufBytes) }, ) } @@ -109,17 +107,15 @@ func FuzzCodecDBNodeCanonical(f *testing.F) { b []byte, ) { require := require.New(t) - for _, branchFactor := range branchFactors { - codec := codec.(*codecImpl) - node := &dbNode{} - if err := codec.decodeDBNode(b, node, branchFactor); err != nil { - t.SkipNow() - } - - // Encoding [node] should be the same as [b]. - buf := codec.encodeDBNode(node) - require.Equal(b, buf) + codec := codec.(*codecImpl) + node := &dbNode{} + if err := codec.decodeDBNode(b, node); err != nil { + t.SkipNow() } + + // Encoding [node] should be the same as [b]. + buf := codec.encodeDBNode(node) + require.Equal(b, buf) }, ) } @@ -133,7 +129,7 @@ func FuzzCodecDBNodeDeterministic(f *testing.F) { valueBytes []byte, ) { require := require.New(t) - for _, branchFactor := range branchFactors { + for _, bf := range validBranchFactors { r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 value := maybe.Nothing[[]byte]() @@ -148,7 +144,7 @@ func FuzzCodecDBNodeDeterministic(f *testing.F) { value = maybe.Some(valueBytes) } - numChildren := r.Intn(int(branchFactor)) // #nosec G404 + numChildren := r.Intn(int(bf)) // #nosec G404 children := map[byte]child{} for i := 0; i < numChildren; i++ { @@ -159,7 +155,7 @@ func FuzzCodecDBNodeDeterministic(f *testing.F) { _, _ = r.Read(childKeyBytes) // #nosec G404 children[byte(i)] = child{ - compressedKey: ToKey(childKeyBytes, branchFactor), + compressedKey: ToKey(childKeyBytes), id: childID, } } @@ -171,7 +167,7 @@ func FuzzCodecDBNodeDeterministic(f *testing.F) { nodeBytes := codec.encodeDBNode(&node) var gotNode dbNode - require.NoError(codec.decodeDBNode(nodeBytes, &gotNode, branchFactor)) + require.NoError(codec.decodeDBNode(nodeBytes, &gotNode)) require.Equal(node, gotNode) nodeBytes2 := codec.encodeDBNode(&gotNode) @@ -181,31 +177,15 @@ func FuzzCodecDBNodeDeterministic(f *testing.F) { ) } -func TestCodecDecodeDBNode(t *testing.T) { +func TestCodecDecodeDBNode_TooShort(t *testing.T) { require := require.New(t) var ( parsedDBNode dbNode tooShortBytes = make([]byte, minDBNodeLen-1) ) - err := codec.decodeDBNode(tooShortBytes, &parsedDBNode, BranchFactor16) + err := codec.decodeDBNode(tooShortBytes, &parsedDBNode) require.ErrorIs(err, io.ErrUnexpectedEOF) - - proof := dbNode{ - value: maybe.Some([]byte{1}), - children: map[byte]child{}, - } - - nodeBytes := codec.encodeDBNode(&proof) - // Remove num children (0) from end - nodeBytes = nodeBytes[:len(nodeBytes)-minVarIntLen] - proofBytesBuf := bytes.NewBuffer(nodeBytes) - - // Put num children > branch factor - codec.(*codecImpl).encodeUint(proofBytesBuf, uint64(BranchFactor16+1)) - - err = codec.decodeDBNode(proofBytesBuf.Bytes(), &parsedDBNode, BranchFactor16) - require.ErrorIs(err, errTooManyChildren) } // Ensure that encodeHashValues is deterministic @@ -219,18 +199,18 @@ func FuzzEncodeHashValues(f *testing.F) { randSeed int, ) { require := require.New(t) - for _, branchFactor := range branchFactors { // Create a random *hashValues + for _, bf := range validBranchFactors { // Create a random node r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 children := map[byte]child{} - numChildren := r.Intn(int(branchFactor)) // #nosec G404 + numChildren := r.Intn(int(bf)) // #nosec G404 for i := 0; i < numChildren; i++ { compressedKeyLen := r.Intn(32) // #nosec G404 compressedKeyBytes := make([]byte, compressedKeyLen) _, _ = r.Read(compressedKeyBytes) // #nosec G404 children[byte(i)] = child{ - compressedKey: ToKey(compressedKeyBytes, branchFactor), + compressedKey: ToKey(compressedKeyBytes), id: ids.GenerateTestID(), hasValue: r.Intn(2) == 1, // #nosec G404 } @@ -247,13 +227,15 @@ func FuzzEncodeHashValues(f *testing.F) { key := make([]byte, r.Intn(32)) // #nosec G404 _, _ = r.Read(key) // #nosec G404 - hv := &hashValues{ - Children: children, - Value: value, - Key: ToKey(key, branchFactor), + hv := &node{ + key: ToKey(key), + dbNode: dbNode{ + children: children, + value: value, + }, } - // Serialize the *hashValues with both codecs + // Serialize hv with both codecs hvBytes1 := codec1.encodeHashValues(hv) hvBytes2 := codec2.encodeHashValues(hv) @@ -267,6 +249,6 @@ func FuzzEncodeHashValues(f *testing.F) { func TestCodecDecodeKeyLengthOverflowRegression(t *testing.T) { codec := codec.(*codecImpl) bytes := bytes.NewReader(binary.AppendUvarint(nil, math.MaxInt)) - _, err := codec.decodeKey(bytes, BranchFactor16) + _, err := codec.decodeKey(bytes) require.ErrorIs(t, err, io.ErrUnexpectedEOF) } diff --git a/x/merkledb/db.go b/x/merkledb/db.go index 87439010b1f0..b1ee699bab97 100644 --- a/x/merkledb/db.go +++ b/x/merkledb/db.go @@ -36,13 +36,13 @@ const ( // TODO: name better rebuildViewSizeFractionOfCacheSize = 50 minRebuildViewSizePerCommit = 1000 + clearBatchSize = units.MiB rebuildIntermediateDeletionWriteSize = units.MiB valueNodePrefixLen = 1 ) var ( - rootKey []byte - _ MerkleDB = (*merkleDB)(nil) + _ MerkleDB = (*merkleDB)(nil) codec = newCodec() @@ -54,8 +54,8 @@ var ( hadCleanShutdown = []byte{1} didNotHaveCleanShutdown = []byte{0} - errSameRoot = errors.New("start and end root are the same") - errNoNewRoot = errors.New("there was no updated root in change list") + errSameRoot = errors.New("start and end root are the same") + errNoNewSentinel = errors.New("there was no updated sentinel node in change list") ) type ChangeProofer interface { @@ -73,7 +73,7 @@ type ChangeProofer interface { maxLength int, ) (*ChangeProof, error) - // Returns nil iff all of the following hold: + // Returns nil iff all the following hold: // - [start] <= [end]. // - [proof] is non-empty. // - All keys in [proof.KeyValues] and [proof.DeletedKeys] are in [start, end]. @@ -114,6 +114,12 @@ type RangeProofer interface { CommitRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], proof *RangeProof) error } +type Clearer interface { + // Deletes all key/value pairs from the database + // and clears the change history. + Clear() error +} + type Prefetcher interface { // PrefetchPath attempts to load all trie nodes on the path of [key] // into the cache. @@ -129,6 +135,7 @@ type Prefetcher interface { type MerkleDB interface { database.Database + Clearer Trie MerkleRootGetter ProofGetter @@ -175,7 +182,7 @@ type merkleDB struct { // Should be held before taking [db.lock] commitLock sync.RWMutex - // Contains all of the key-value pairs stored by this database, + // Contains all the key-value pairs stored by this database, // including metadata, intermediate nodes and value nodes. baseDB database.Database @@ -194,8 +201,10 @@ type merkleDB struct { debugTracer trace.Tracer infoTracer trace.Tracer - // The root of this trie. - root *node + // The sentinel node of this trie. + // It is the node with a nil key and is the ancestor of all nodes in the trie. + // If it has a value or has multiple children, it is also the root of the trie. + sentinelNode *node // Valid children of this trie. childViews []*trieView @@ -204,8 +213,7 @@ type merkleDB struct { // [calculateNodeIDsHelper] at any given time. calculateNodeIDsSema *semaphore.Weighted - toKey func(p []byte) Key - rootKey Key + tokenSize int } // New returns a new merkle database. @@ -223,17 +231,13 @@ func newDatabase( config Config, metrics merkleMetrics, ) (*merkleDB, error) { - rootGenConcurrency := uint(runtime.NumCPU()) - if config.RootGenConcurrency != 0 { - rootGenConcurrency = config.RootGenConcurrency - } - if err := config.BranchFactor.Valid(); err != nil { return nil, err } - toKey := func(b []byte) Key { - return ToKey(b, config.BranchFactor) + rootGenConcurrency := uint(runtime.NumCPU()) + if config.RootGenConcurrency != 0 { + rootGenConcurrency = config.RootGenConcurrency } // Share a sync.Pool of []byte between the intermediateNodeDB and valueNodeDB @@ -246,15 +250,14 @@ func newDatabase( trieDB := &merkleDB{ metrics: metrics, baseDB: db, - valueNodeDB: newValueNodeDB(db, bufferPool, metrics, int(config.ValueNodeCacheSize), config.BranchFactor), - intermediateNodeDB: newIntermediateNodeDB(db, bufferPool, metrics, int(config.IntermediateNodeCacheSize), int(config.EvictionBatchSize)), - history: newTrieHistory(int(config.HistoryLength), toKey), + valueNodeDB: newValueNodeDB(db, bufferPool, metrics, int(config.ValueNodeCacheSize)), + intermediateNodeDB: newIntermediateNodeDB(db, bufferPool, metrics, int(config.IntermediateNodeCacheSize), int(config.EvictionBatchSize), BranchFactorToTokenSize[config.BranchFactor]), + history: newTrieHistory(int(config.HistoryLength)), debugTracer: getTracerIfEnabled(config.TraceLevel, DebugTrace, config.Tracer), infoTracer: getTracerIfEnabled(config.TraceLevel, InfoTrace, config.Tracer), childViews: make([]*trieView, 0, defaultPreallocationSize), calculateNodeIDsSema: semaphore.NewWeighted(int64(rootGenConcurrency)), - toKey: toKey, - rootKey: toKey(rootKey), + tokenSize: BranchFactorToTokenSize[config.BranchFactor], } root, err := trieDB.initializeRootIfNeeded() @@ -292,7 +295,7 @@ func newDatabase( // Deletes every intermediate node and rebuilds them by re-adding every key/value. // TODO: make this more efficient by only clearing out the stale portions of the trie. func (db *merkleDB) rebuild(ctx context.Context, cacheSize int) error { - db.root = newNode(nil, db.rootKey) + db.sentinelNode = newNode(Key{}) // Delete intermediate nodes. if err := database.ClearPrefix(db.baseDB, intermediateNodePrefix, rebuildIntermediateDeletionWriteSize); err != nil { @@ -474,7 +477,7 @@ func (db *merkleDB) PrefetchPath(key []byte) error { } func (db *merkleDB) prefetchPath(view *trieView, keyBytes []byte) error { - return view.visitPathToKey(db.toKey(keyBytes), func(n *node) error { + return view.visitPathToKey(ToKey(keyBytes), func(n *node) error { if !n.hasValue() { return db.intermediateNodeDB.nodeCache.Put(n.key, n) } @@ -501,11 +504,11 @@ func (db *merkleDB) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []e defer db.lock.RUnlock() values := make([][]byte, len(keys)) - errors := make([]error, len(keys)) + getErrors := make([]error, len(keys)) for i, key := range keys { - values[i], errors[i] = db.getValueCopy(db.toKey(key)) + values[i], getErrors[i] = db.getValueCopy(ToKey(key)) } - return values, errors + return values, getErrors } // GetValue returns the value associated with [key]. @@ -517,7 +520,7 @@ func (db *merkleDB) GetValue(ctx context.Context, key []byte) ([]byte, error) { db.lock.RLock() defer db.lock.RUnlock() - return db.getValueCopy(db.toKey(key)) + return db.getValueCopy(ToKey(key)) } // getValueCopy returns a copy of the value for the given [key]. @@ -575,7 +578,20 @@ func (db *merkleDB) GetMerkleRoot(ctx context.Context) (ids.ID, error) { // Assumes [db.lock] is read locked. func (db *merkleDB) getMerkleRoot() ids.ID { - return db.root.id + if !isSentinelNodeTheRoot(db.sentinelNode) { + // if the sentinel node should be skipped, the trie's root is the nil key node's only child + for _, childEntry := range db.sentinelNode.children { + return childEntry.id + } + } + return db.sentinelNode.id +} + +// isSentinelNodeTheRoot returns true if the passed in sentinel node has a value and or multiple child nodes +// When this is true, the root of the trie is the sentinel node +// When this is false, the root of the trie is the sentinel node's single child +func isSentinelNodeTheRoot(sentinel *node) bool { + return sentinel.valueDigest.HasValue() || len(sentinel.children) != 1 } func (db *merkleDB) GetProof(ctx context.Context, key []byte) (*Proof, error) { @@ -783,8 +799,8 @@ func (db *merkleDB) Has(k []byte) (bool, error) { return false, database.ErrClosed } - _, err := db.getValueWithoutLock(db.toKey(k)) - if err == database.ErrNotFound { + _, err := db.getValueWithoutLock(ToKey(k)) + if errors.Is(err, database.ErrNotFound) { return false, nil } return err == nil, err @@ -868,7 +884,7 @@ func (db *merkleDB) DeleteContext(ctx context.Context, key []byte) error { return view.commitToDB(ctx) } -// Assumes values inside of [ops] are safe to reference after the function +// Assumes values inside [ops] are safe to reference after the function // returns. Assumes [db.lock] isn't held. func (db *merkleDB) commitBatch(ops []database.BatchOp) error { db.commitLock.Lock() @@ -921,9 +937,9 @@ func (db *merkleDB) commitChanges(ctx context.Context, trieToCommit *trieView) e return nil } - rootChange, ok := changes.nodes[db.rootKey] + sentinelChange, ok := changes.nodes[Key{}] if !ok { - return errNoNewRoot + return errNoNewSentinel } currentValueNodeBatch := db.valueNodeDB.NewBatch() @@ -965,7 +981,7 @@ func (db *merkleDB) commitChanges(ctx context.Context, trieToCommit *trieView) e // Only modify in-memory state after the commit succeeds // so that we don't need to clean up on error. - db.root = rootChange.after + db.sentinelNode = sentinelChange.after db.history.record(changes) return nil } @@ -1020,7 +1036,7 @@ func (db *merkleDB) VerifyChangeProof( return err } - smallestKey := maybe.Bind(start, db.toKey) + smallestKey := maybe.Bind(start, ToKey) // Make sure the start proof, if given, is well-formed. if err := verifyProofPath(proof.StartProof, smallestKey); err != nil { @@ -1030,12 +1046,12 @@ func (db *merkleDB) VerifyChangeProof( // Find the greatest key in [proof.KeyChanges] // Note that [proof.EndProof] is a proof for this key. // [largestKey] is also used when we add children of proof nodes to [trie] below. - largestKey := maybe.Bind(end, db.toKey) + largestKey := maybe.Bind(end, ToKey) if len(proof.KeyChanges) > 0 { // If [proof] has key-value pairs, we should insert children // greater than [end] to ancestors of the node containing [end] // so that we get the expected root ID. - largestKey = maybe.Some(db.toKey(proof.KeyChanges[len(proof.KeyChanges)-1].Key)) + largestKey = maybe.Some(ToKey(proof.KeyChanges[len(proof.KeyChanges)-1].Key)) } // Make sure the end proof, if given, is well-formed. @@ -1045,7 +1061,7 @@ func (db *merkleDB) VerifyChangeProof( keyValues := make(map[Key]maybe.Maybe[[]byte], len(proof.KeyChanges)) for _, keyValue := range proof.KeyChanges { - keyValues[db.toKey(keyValue.Key)] = keyValue.Value + keyValues[ToKey(keyValue.Key)] = keyValue.Value } // want to prevent commit writes to DB, but not prevent DB reads @@ -1146,33 +1162,33 @@ func (db *merkleDB) invalidateChildrenExcept(exception *trieView) { } func (db *merkleDB) initializeRootIfNeeded() (ids.ID, error) { - // not sure if the root exists or had a value or not + // not sure if the sentinel node exists or if it had a value // check under both prefixes var err error - db.root, err = db.intermediateNodeDB.Get(db.rootKey) - if err == database.ErrNotFound { - db.root, err = db.valueNodeDB.Get(db.rootKey) + db.sentinelNode, err = db.intermediateNodeDB.Get(Key{}) + if errors.Is(err, database.ErrNotFound) { + db.sentinelNode, err = db.valueNodeDB.Get(Key{}) } if err == nil { - // Root already exists, so calculate its id - db.root.calculateID(db.metrics) - return db.root.id, nil + // sentinel node already exists, so calculate the root ID of the trie + db.sentinelNode.calculateID(db.metrics) + return db.getMerkleRoot(), nil } - if err != database.ErrNotFound { + if !errors.Is(err, database.ErrNotFound) { return ids.Empty, err } - // Root doesn't exist; make a new one. - db.root = newNode(nil, db.rootKey) + // sentinel node doesn't exist; make a new one. + db.sentinelNode = newNode(Key{}) // update its ID - db.root.calculateID(db.metrics) + db.sentinelNode.calculateID(db.metrics) - if err := db.intermediateNodeDB.Put(db.rootKey, db.root); err != nil { + if err := db.intermediateNodeDB.Put(Key{}, db.sentinelNode); err != nil { return ids.Empty, err } - return db.root.id, nil + return db.sentinelNode.id, nil } // Returns a view of the trie as it was when it had root [rootID] for keys within range [start, end]. @@ -1248,14 +1264,43 @@ func (db *merkleDB) getNode(key Key, hasValue bool) (*node, error) { switch { case db.closed: return nil, database.ErrClosed - case key == db.rootKey: - return db.root, nil + case key == Key{}: + return db.sentinelNode, nil case hasValue: return db.valueNodeDB.Get(key) } return db.intermediateNodeDB.Get(key) } +func (db *merkleDB) Clear() error { + db.commitLock.Lock() + defer db.commitLock.Unlock() + + db.lock.Lock() + defer db.lock.Unlock() + + // Clear nodes from disk and caches + if err := db.valueNodeDB.Clear(); err != nil { + return err + } + if err := db.intermediateNodeDB.Clear(); err != nil { + return err + } + + // Clear root + db.sentinelNode = newNode(Key{}) + db.sentinelNode.calculateID(db.metrics) + + // Clear history + db.history = newTrieHistory(db.history.maxHistoryLen) + db.history.record(&changeSummary{ + rootID: db.getMerkleRoot(), + values: map[Key]*change[maybe.Maybe[[]byte]]{}, + nodes: map[Key]*change[*node]{}, + }) + return nil +} + // Returns [key] prefixed by [prefix]. // The returned []byte is taken from [bufferPool] and // should be returned to it when the caller is done with it. diff --git a/x/merkledb/db_test.go b/x/merkledb/db_test.go index d4f09803cdaf..1cbce5a7792d 100644 --- a/x/merkledb/db_test.go +++ b/x/merkledb/db_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" "golang.org/x/exp/maps" @@ -30,6 +31,8 @@ import ( const defaultHistoryLength = 300 +var emptyKey Key + // newDB returns a new merkle database with the underlying type so that tests can access unexported fields func newDB(ctx context.Context, db database.Database, config Config) (*merkleDB, error) { db, err := New(ctx, db, config) @@ -63,7 +66,7 @@ func Test_MerkleDB_Get_Safety(t *testing.T) { val, err := db.Get(keyBytes) require.NoError(err) - n, err := db.getNode(ToKey(keyBytes, BranchFactor16), true) + n, err := db.getNode(ToKey(keyBytes), true) require.NoError(err) // node's value shouldn't be affected by the edit @@ -96,7 +99,7 @@ func Test_MerkleDB_GetValues_Safety(t *testing.T) { } func Test_MerkleDB_DB_Interface(t *testing.T) { - for _, bf := range branchFactors { + for _, bf := range validBranchFactors { for _, test := range database.Tests { db, err := getBasicDBWithBranchFactor(bf) require.NoError(t, err) @@ -108,7 +111,7 @@ func Test_MerkleDB_DB_Interface(t *testing.T) { func Benchmark_MerkleDB_DBInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bf := range branchFactors { + for _, bf := range validBranchFactors { for _, bench := range database.Benchmarks { db, err := getBasicDBWithBranchFactor(bf) require.NoError(b, err) @@ -773,6 +776,49 @@ func Test_MerkleDB_Random_Insert_Ordering(t *testing.T) { } } +func TestMerkleDBClear(t *testing.T) { + require := require.New(t) + + // Make a database and insert some key-value pairs. + db, err := getBasicDB() + require.NoError(err) + + emptyRootID := db.getMerkleRoot() + + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 + + insertRandomKeyValues( + require, + r, + []database.Database{db}, + 1_000, + 0.25, + ) + + // Clear the database. + require.NoError(db.Clear()) + + // Assert that the database is empty. + iter := db.NewIterator() + defer iter.Release() + require.False(iter.Next()) + require.Equal(emptyRootID, db.getMerkleRoot()) + require.Equal(emptyKey, db.sentinelNode.key) + + // Assert caches are empty. + require.Zero(db.valueNodeDB.nodeCache.Len()) + require.Zero(db.intermediateNodeDB.nodeCache.currentSize) + + // Assert history has only the clearing change. + require.Len(db.history.lastChanges, 1) + change, ok := db.history.lastChanges[emptyRootID] + require.True(ok) + require.Empty(change.nodes) + require.Empty(change.values) +} + func FuzzMerkleDBEmptyRandomizedActions(f *testing.F) { f.Fuzz( func( @@ -785,7 +831,7 @@ func FuzzMerkleDBEmptyRandomizedActions(f *testing.F) { } require := require.New(t) r := rand.New(rand.NewSource(randSeed)) // #nosec G404 - for _, bf := range branchFactors { + for _, ts := range validTokenSizes { runRandDBTest( require, r, @@ -795,7 +841,7 @@ func FuzzMerkleDBEmptyRandomizedActions(f *testing.F) { size, 0.01, /*checkHashProbability*/ ), - bf, + ts, ) } }) @@ -813,7 +859,7 @@ func FuzzMerkleDBInitialValuesRandomizedActions(f *testing.F) { } require := require.New(t) r := rand.New(rand.NewSource(randSeed)) // #nosec G404 - for _, bf := range branchFactors { + for _, ts := range validTokenSizes { runRandDBTest( require, r, @@ -824,7 +870,7 @@ func FuzzMerkleDBInitialValuesRandomizedActions(f *testing.F) { numSteps, 0.001, /*checkHashProbability*/ ), - bf, + ts, ) } }) @@ -851,8 +897,8 @@ const ( opMax // boundary value, not an actual op ) -func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, bf BranchFactor) { - db, err := getBasicDBWithBranchFactor(bf) +func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, tokenSize int) { + db, err := getBasicDBWithBranchFactor(tokenSizeToBranchFactor[tokenSize]) require.NoError(err) const ( @@ -877,13 +923,13 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, bf Br case opUpdate: require.NoError(currentBatch.Put(step.key, step.value)) - uncommittedKeyValues[ToKey(step.key, bf)] = step.value - uncommittedDeletes.Remove(ToKey(step.key, bf)) + uncommittedKeyValues[ToKey(step.key)] = step.value + uncommittedDeletes.Remove(ToKey(step.key)) case opDelete: require.NoError(currentBatch.Delete(step.key)) - uncommittedDeletes.Add(ToKey(step.key, bf)) - delete(uncommittedKeyValues, ToKey(step.key, bf)) + uncommittedDeletes.Add(ToKey(step.key)) + delete(uncommittedKeyValues, ToKey(step.key)) case opGenerateRangeProof: root, err := db.GetMerkleRoot(context.Background()) require.NoError(err) @@ -910,6 +956,7 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, bf Br start, end, root, + tokenSize, )) case opGenerateChangeProof: root, err := db.GetMerkleRoot(context.Background()) @@ -937,7 +984,7 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, bf Br require.NoError(err) require.LessOrEqual(len(changeProof.KeyChanges), maxProofLen) - changeProofDB, err := getBasicDBWithBranchFactor(bf) + changeProofDB, err := getBasicDBWithBranchFactor(tokenSizeToBranchFactor[tokenSize]) require.NoError(err) require.NoError(changeProofDB.VerifyChangeProof( @@ -984,10 +1031,10 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, bf Br require.ErrorIs(err, database.ErrNotFound) } - want := values[ToKey(step.key, bf)] + want := values[ToKey(step.key)] require.True(bytes.Equal(want, v)) // Use bytes.Equal so nil treated equal to []byte{} - trieValue, err := getNodeValueWithBranchFactor(db, string(step.key), bf) + trieValue, err := getNodeValue(db, string(step.key)) if err != nil { require.ErrorIs(err, database.ErrNotFound) } @@ -995,7 +1042,7 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, bf Br require.True(bytes.Equal(want, trieValue)) // Use bytes.Equal so nil treated equal to []byte{} case opCheckhash: // Create a view with the same key-values as [db] - newDB, err := getBasicDBWithBranchFactor(bf) + newDB, err := getBasicDBWithBranchFactor(tokenSizeToBranchFactor[tokenSize]) require.NoError(err) ops := make([]database.BatchOp, 0, len(values)) @@ -1093,7 +1140,7 @@ func generateRandTestWithKeys( step.value = genEnd(step.key) case opCheckhash: // this gets really expensive so control how often it happens - if r.Float64() < checkHashProbability { + if r.Float64() > checkHashProbability { continue } } diff --git a/x/merkledb/helpers_test.go b/x/merkledb/helpers_test.go index 3cd84ce11e7c..b7a2908ff377 100644 --- a/x/merkledb/helpers_test.go +++ b/x/merkledb/helpers_test.go @@ -52,13 +52,13 @@ func writeBasicBatch(t *testing.T, db *merkleDB) { func newRandomProofNode(r *rand.Rand) ProofNode { key := make([]byte, r.Intn(32)) // #nosec G404 _, _ = r.Read(key) // #nosec G404 - serializedKey := ToKey(key, BranchFactor16) + serializedKey := ToKey(key) val := make([]byte, r.Intn(64)) // #nosec G404 _, _ = r.Read(val) // #nosec G404 children := map[byte]ids.ID{} - for j := 0; j < int(BranchFactor16); j++ { + for j := 0; j < 16; j++ { if r.Float64() < 0.5 { var childID ids.ID _, _ = r.Read(childID[:]) // #nosec G404 diff --git a/x/merkledb/history.go b/x/merkledb/history.go index c82fbb1e5f78..c52385445cd2 100644 --- a/x/merkledb/history.go +++ b/x/merkledb/history.go @@ -32,8 +32,6 @@ type trieHistory struct { // Each change is tagged with this monotonic increasing number. nextInsertNumber uint64 - - toKey func([]byte) Key } // Tracks the beginning and ending state of a value. @@ -51,7 +49,7 @@ type changeSummaryAndInsertNumber struct { insertNumber uint64 } -// Tracks all of the node and value changes that resulted in the rootID. +// Tracks all the node and value changes that resulted in the rootID. type changeSummary struct { rootID ids.ID nodes map[Key]*change[*node] @@ -65,12 +63,11 @@ func newChangeSummary(estimatedSize int) *changeSummary { } } -func newTrieHistory(maxHistoryLookback int, toKey func([]byte) Key) *trieHistory { +func newTrieHistory(maxHistoryLookback int) *trieHistory { return &trieHistory{ maxHistoryLen: maxHistoryLookback, history: buffer.NewUnboundedDeque[*changeSummaryAndInsertNumber](maxHistoryLookback), lastChanges: make(map[ids.ID]*changeSummaryAndInsertNumber), - toKey: toKey, } } @@ -158,8 +155,8 @@ func (th *trieHistory) getValueChanges( // in order to stay within the [maxLength] limit if necessary. changedKeys = set.Set[Key]{} - startKey = maybe.Bind(start, th.toKey) - endKey = maybe.Bind(end, th.toKey) + startKey = maybe.Bind(start, ToKey) + endKey = maybe.Bind(end, ToKey) // For each element in the history in the range between [startRoot]'s // last appearance (exclusive) and [endRoot]'s last appearance (inclusive), @@ -237,8 +234,8 @@ func (th *trieHistory) getChangesToGetToRoot(rootID ids.ID, start maybe.Maybe[[] } var ( - startKey = maybe.Bind(start, th.toKey) - endKey = maybe.Bind(end, th.toKey) + startKey = maybe.Bind(start, ToKey) + endKey = maybe.Bind(end, ToKey) combinedChanges = newChangeSummary(defaultPreallocationSize) mostRecentChangeInsertNumber = th.nextInsertNumber - 1 mostRecentChangeIndex = th.history.Len() - 1 diff --git a/x/merkledb/history_test.go b/x/merkledb/history_test.go index 1261c92b22df..2ee1e5f4b31b 100644 --- a/x/merkledb/history_test.go +++ b/x/merkledb/history_test.go @@ -36,8 +36,9 @@ func Test_History_Simple(t *testing.T) { origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + + origRootID := db.getMerkleRoot() + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("key"), []byte("value0"))) @@ -45,7 +46,7 @@ func Test_History_Simple(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("key1"), []byte("value1"))) @@ -54,7 +55,7 @@ func Test_History_Simple(t *testing.T) { newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("k"), []byte("v"))) @@ -62,7 +63,7 @@ func Test_History_Simple(t *testing.T) { newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Delete([]byte("k"))) @@ -78,7 +79,7 @@ func Test_History_Simple(t *testing.T) { newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_Large(t *testing.T) { @@ -141,7 +142,7 @@ func Test_History_Large(t *testing.T) { require.NoError(err) require.NotNil(proof) - require.NoError(proof.Verify(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), roots[i])) + require.NoError(proof.Verify(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), roots[i], BranchFactorToTokenSize[config.BranchFactor])) } } } @@ -240,6 +241,7 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, + db.tokenSize, )) // write a new value into the db, now there should be 2 roots in the history @@ -256,6 +258,7 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, + db.tokenSize, )) // trigger a new root to be added to the history, which should cause rollover since there can only be 2 @@ -312,10 +315,10 @@ func Test_History_Values_Lookup_Over_Queue_Break(t *testing.T) { // changes should still be collectable even though the history has had to loop due to hitting max size changes, err := db.history.getValueChanges(startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 10) require.NoError(err) - require.Contains(changes.values, ToKey([]byte("key1"), BranchFactor16)) - require.Equal([]byte("value1"), changes.values[ToKey([]byte("key1"), BranchFactor16)].after.Value()) - require.Contains(changes.values, ToKey([]byte("key2"), BranchFactor16)) - require.Equal([]byte("value3"), changes.values[ToKey([]byte("key2"), BranchFactor16)].after.Value()) + require.Contains(changes.values, ToKey([]byte("key1"))) + require.Equal([]byte("value1"), changes.values[ToKey([]byte("key1"))].after.Value()) + require.Contains(changes.values, ToKey([]byte("key2"))) + require.Equal([]byte("value3"), changes.values[ToKey([]byte("key2"))].after.Value()) } func Test_History_RepeatedRoot(t *testing.T) { @@ -336,8 +339,9 @@ func Test_History_RepeatedRoot(t *testing.T) { origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + + origRootID := db.getMerkleRoot() + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("key1"), []byte("other"))) @@ -347,7 +351,7 @@ func Test_History_RepeatedRoot(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) // revert state to be the same as in orig proof batch = db.NewBatch() @@ -359,7 +363,7 @@ func Test_History_RepeatedRoot(t *testing.T) { newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_ExcessDeletes(t *testing.T) { @@ -378,8 +382,9 @@ func Test_History_ExcessDeletes(t *testing.T) { origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + + origRootID := db.getMerkleRoot() + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Delete([]byte("key1"))) @@ -391,7 +396,7 @@ func Test_History_ExcessDeletes(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_DontIncludeAllNodes(t *testing.T) { @@ -410,8 +415,9 @@ func Test_History_DontIncludeAllNodes(t *testing.T) { origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + + origRootID := db.getMerkleRoot() + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("z"), []byte("z"))) @@ -419,7 +425,7 @@ func Test_History_DontIncludeAllNodes(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_Branching2Nodes(t *testing.T) { @@ -438,8 +444,8 @@ func Test_History_Branching2Nodes(t *testing.T) { origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + origRootID := db.getMerkleRoot() + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("k"), []byte("v"))) @@ -447,7 +453,7 @@ func Test_History_Branching2Nodes(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_Branching3Nodes(t *testing.T) { @@ -466,8 +472,9 @@ func Test_History_Branching3Nodes(t *testing.T) { origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + + origRootID := db.getMerkleRoot() + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() require.NoError(batch.Put([]byte("key321"), []byte("value321"))) @@ -475,7 +482,7 @@ func Test_History_Branching3Nodes(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_MaxLength(t *testing.T) { @@ -572,9 +579,7 @@ func TestHistoryRecord(t *testing.T) { require := require.New(t) maxHistoryLen := 3 - th := newTrieHistory(maxHistoryLen, func(bytes []byte) Key { - return ToKey(bytes, BranchFactor16) - }) + th := newTrieHistory(maxHistoryLen) changes := []*changeSummary{} for i := 0; i < maxHistoryLen; i++ { // Fill the history @@ -647,22 +652,20 @@ func TestHistoryRecord(t *testing.T) { func TestHistoryGetChangesToRoot(t *testing.T) { maxHistoryLen := 3 - history := newTrieHistory(maxHistoryLen, func(bytes []byte) Key { - return ToKey(bytes, BranchFactor16) - }) + history := newTrieHistory(maxHistoryLen) changes := []*changeSummary{} for i := 0; i < maxHistoryLen; i++ { // Fill the history changes = append(changes, &changeSummary{ rootID: ids.GenerateTestID(), nodes: map[Key]*change[*node]{ - history.toKey([]byte{byte(i)}): { + ToKey([]byte{byte(i)}): { before: &node{id: ids.GenerateTestID()}, after: &node{id: ids.GenerateTestID()}, }, }, values: map[Key]*change[maybe.Maybe[[]byte]]{ - history.toKey([]byte{byte(i)}): { + ToKey([]byte{byte(i)}): { before: maybe.Some([]byte{byte(i)}), after: maybe.Some([]byte{byte(i + 1)}), }, @@ -701,7 +704,7 @@ func TestHistoryGetChangesToRoot(t *testing.T) { require.Len(got.nodes, 1) require.Len(got.values, 1) reversedChanges := changes[maxHistoryLen-1] - removedKey := history.toKey([]byte{byte(maxHistoryLen - 1)}) + removedKey := ToKey([]byte{byte(maxHistoryLen - 1)}) require.Equal(reversedChanges.nodes[removedKey].before, got.nodes[removedKey].after) require.Equal(reversedChanges.values[removedKey].before, got.values[removedKey].after) require.Equal(reversedChanges.values[removedKey].after, got.values[removedKey].before) @@ -714,12 +717,12 @@ func TestHistoryGetChangesToRoot(t *testing.T) { require.Len(got.nodes, 2) require.Len(got.values, 2) reversedChanges1 := changes[maxHistoryLen-1] - removedKey1 := history.toKey([]byte{byte(maxHistoryLen - 1)}) + removedKey1 := ToKey([]byte{byte(maxHistoryLen - 1)}) require.Equal(reversedChanges1.nodes[removedKey1].before, got.nodes[removedKey1].after) require.Equal(reversedChanges1.values[removedKey1].before, got.values[removedKey1].after) require.Equal(reversedChanges1.values[removedKey1].after, got.values[removedKey1].before) reversedChanges2 := changes[maxHistoryLen-2] - removedKey2 := history.toKey([]byte{byte(maxHistoryLen - 2)}) + removedKey2 := ToKey([]byte{byte(maxHistoryLen - 2)}) require.Equal(reversedChanges2.nodes[removedKey2].before, got.nodes[removedKey2].after) require.Equal(reversedChanges2.values[removedKey2].before, got.values[removedKey2].after) require.Equal(reversedChanges2.values[removedKey2].after, got.values[removedKey2].before) @@ -733,12 +736,12 @@ func TestHistoryGetChangesToRoot(t *testing.T) { require.Len(got.nodes, 2) require.Len(got.values, 1) reversedChanges1 := changes[maxHistoryLen-1] - removedKey1 := history.toKey([]byte{byte(maxHistoryLen - 1)}) + removedKey1 := ToKey([]byte{byte(maxHistoryLen - 1)}) require.Equal(reversedChanges1.nodes[removedKey1].before, got.nodes[removedKey1].after) require.Equal(reversedChanges1.values[removedKey1].before, got.values[removedKey1].after) require.Equal(reversedChanges1.values[removedKey1].after, got.values[removedKey1].before) reversedChanges2 := changes[maxHistoryLen-2] - removedKey2 := history.toKey([]byte{byte(maxHistoryLen - 2)}) + removedKey2 := ToKey([]byte{byte(maxHistoryLen - 2)}) require.Equal(reversedChanges2.nodes[removedKey2].before, got.nodes[removedKey2].after) }, }, @@ -750,10 +753,10 @@ func TestHistoryGetChangesToRoot(t *testing.T) { require.Len(got.nodes, 2) require.Len(got.values, 1) reversedChanges1 := changes[maxHistoryLen-1] - removedKey1 := history.toKey([]byte{byte(maxHistoryLen - 1)}) + removedKey1 := ToKey([]byte{byte(maxHistoryLen - 1)}) require.Equal(reversedChanges1.nodes[removedKey1].before, got.nodes[removedKey1].after) reversedChanges2 := changes[maxHistoryLen-2] - removedKey2 := history.toKey([]byte{byte(maxHistoryLen - 2)}) + removedKey2 := ToKey([]byte{byte(maxHistoryLen - 2)}) require.Equal(reversedChanges2.nodes[removedKey2].before, got.nodes[removedKey2].after) require.Equal(reversedChanges2.values[removedKey2].before, got.values[removedKey2].after) require.Equal(reversedChanges2.values[removedKey2].after, got.values[removedKey2].before) diff --git a/x/merkledb/intermediate_node_db.go b/x/merkledb/intermediate_node_db.go index e146b943d6c2..91cef6242410 100644 --- a/x/merkledb/intermediate_node_db.go +++ b/x/merkledb/intermediate_node_db.go @@ -13,7 +13,7 @@ const defaultBufferLength = 256 // Holds intermediate nodes. That is, those without values. // Changes to this database aren't written to [baseDB] until -// they're evicted from the [nodeCache] or Flush is called.. +// they're evicted from the [nodeCache] or Flush is called. type intermediateNodeDB struct { // Holds unused []byte bufferPool *sync.Pool @@ -31,6 +31,7 @@ type intermediateNodeDB struct { // the number of bytes to evict during an eviction batch evictionBatchSize int metrics merkleMetrics + tokenSize int } func newIntermediateNodeDB( @@ -39,12 +40,14 @@ func newIntermediateNodeDB( metrics merkleMetrics, size int, evictionBatchSize int, + tokenSize int, ) *intermediateNodeDB { result := &intermediateNodeDB{ metrics: metrics, baseDB: db, bufferPool: bufferPool, evictionBatchSize: evictionBatchSize, + tokenSize: tokenSize, } result.nodeCache = newOnEvictCache( size, @@ -121,15 +124,15 @@ func (db *intermediateNodeDB) Get(key Key) (*node, error) { // constructDBKey returns a key that can be used in [db.baseDB]. // We need to be able to differentiate between two keys of equal -// byte length but different token length, so we add padding to differentiate. +// byte length but different bit length, so we add padding to differentiate. // Additionally, we add a prefix indicating it is part of the intermediateNodeDB. func (db *intermediateNodeDB) constructDBKey(key Key) []byte { - if key.branchFactor == BranchFactor256 { - // For BranchFactor256, no padding is needed since byte length == token length + if db.tokenSize == 8 { + // For tokens of size byte, no padding is needed since byte length == token length return addPrefixToKey(db.bufferPool, intermediateNodePrefix, key.Bytes()) } - return addPrefixToKey(db.bufferPool, intermediateNodePrefix, key.Append(1).Bytes()) + return addPrefixToKey(db.bufferPool, intermediateNodePrefix, key.Extend(ToToken(1, db.tokenSize)).Bytes()) } func (db *intermediateNodeDB) Put(key Key, n *node) error { @@ -143,3 +146,14 @@ func (db *intermediateNodeDB) Flush() error { func (db *intermediateNodeDB) Delete(key Key) error { return db.nodeCache.Put(key, nil) } + +func (db *intermediateNodeDB) Clear() error { + // Reset the cache. Note we don't flush because that would cause us to + // persist intermediate nodes we're about to delete. + db.nodeCache = newOnEvictCache( + db.nodeCache.maxSize, + db.nodeCache.size, + db.nodeCache.onEviction, + ) + return database.AtomicClearPrefix(db.baseDB, db.baseDB, intermediateNodePrefix) +} diff --git a/x/merkledb/intermediate_node_db_test.go b/x/merkledb/intermediate_node_db_test.go index 3d40aa7f8a05..91709708f148 100644 --- a/x/merkledb/intermediate_node_db_test.go +++ b/x/merkledb/intermediate_node_db_test.go @@ -23,7 +23,7 @@ import ( func Test_IntermediateNodeDB(t *testing.T) { require := require.New(t) - n := newNode(nil, ToKey([]byte{0x00}, BranchFactor16)) + n := newNode(ToKey([]byte{0x00})) n.setValue(maybe.Some([]byte{byte(0x02)})) nodeSize := cacheEntrySize(n.key, n) @@ -39,11 +39,12 @@ func Test_IntermediateNodeDB(t *testing.T) { &mockMetrics{}, cacheSize, evictionBatchSize, + 4, ) // Put a key-node pair - node1Key := ToKey([]byte{0x01}, BranchFactor16) - node1 := newNode(nil, node1Key) + node1Key := ToKey([]byte{0x01}) + node1 := newNode(node1Key) node1.setValue(maybe.Some([]byte{byte(0x01)})) require.NoError(db.Put(node1Key, node1)) @@ -53,7 +54,7 @@ func Test_IntermediateNodeDB(t *testing.T) { require.Equal(node1, node1Read) // Overwrite the key-node pair - node1Updated := newNode(nil, node1Key) + node1Updated := newNode(node1Key) node1Updated.setValue(maybe.Some([]byte{byte(0x02)})) require.NoError(db.Put(node1Key, node1Updated)) @@ -73,8 +74,8 @@ func Test_IntermediateNodeDB(t *testing.T) { expectedSize := 0 added := 0 for { - key := ToKey([]byte{byte(added)}, BranchFactor16) - node := newNode(nil, emptyKey(BranchFactor16)) + key := ToKey([]byte{byte(added)}) + node := newNode(Key{}) node.setValue(maybe.Some([]byte{byte(added)})) newExpectedSize := expectedSize + cacheEntrySize(key, node) if newExpectedSize > cacheSize { @@ -93,8 +94,8 @@ func Test_IntermediateNodeDB(t *testing.T) { // Put one more element in the cache, which should trigger an eviction // of all but 2 elements. 2 elements remain rather than 1 element because of // the added key prefix increasing the size tracked by the batch. - key := ToKey([]byte{byte(added)}, BranchFactor16) - node := newNode(nil, emptyKey(BranchFactor16)) + key := ToKey([]byte{byte(added)}) + node := newNode(Key{}) node.setValue(maybe.Some([]byte{byte(added)})) require.NoError(db.Put(key, node)) @@ -102,7 +103,7 @@ func Test_IntermediateNodeDB(t *testing.T) { require.Equal(1, db.nodeCache.fifo.Len()) gotKey, _, ok := db.nodeCache.fifo.Oldest() require.True(ok) - require.Equal(ToKey([]byte{byte(added)}, BranchFactor16), gotKey) + require.Equal(ToKey([]byte{byte(added)}), gotKey) // Get a node from the base database // Use an early key that has been evicted from the cache @@ -134,41 +135,45 @@ func FuzzIntermediateNodeDBConstructDBKey(f *testing.F) { cacheSize := 200 evictionBatchSize := cacheSize baseDB := memdb.New() - db := newIntermediateNodeDB( - baseDB, - &sync.Pool{ - New: func() interface{} { return make([]byte, 0) }, - }, - &mockMetrics{}, - cacheSize, - evictionBatchSize, - ) + f.Fuzz(func( t *testing.T, key []byte, tokenLength uint, ) { require := require.New(t) - for _, branchFactor := range branchFactors { - p := ToKey(key, branchFactor) - if p.tokenLength <= int(tokenLength) { + for _, tokenSize := range validTokenSizes { + db := newIntermediateNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + evictionBatchSize, + tokenSize, + ) + + p := ToKey(key) + uBitLength := tokenLength * uint(tokenSize) + if uBitLength >= uint(p.length) { t.SkipNow() } - p = p.Take(int(tokenLength)) + p = p.Take(int(uBitLength)) constructedKey := db.constructDBKey(p) baseLength := len(p.value) + len(intermediateNodePrefix) require.Equal(intermediateNodePrefix, constructedKey[:len(intermediateNodePrefix)]) switch { - case branchFactor == BranchFactor256: + case tokenSize == 8: // for keys with tokens of size byte, no padding is added require.Equal(p.Bytes(), constructedKey[len(intermediateNodePrefix):]) case p.hasPartialByte(): require.Len(constructedKey, baseLength) - require.Equal(p.Append(1).Bytes(), constructedKey[len(intermediateNodePrefix):]) + require.Equal(p.Extend(ToToken(1, tokenSize)).Bytes(), constructedKey[len(intermediateNodePrefix):]) default: // when a whole number of bytes, there is an extra padding byte require.Len(constructedKey, baseLength+1) - require.Equal(p.Append(1).Bytes(), constructedKey[len(intermediateNodePrefix):]) + require.Equal(p.Extend(ToToken(1, tokenSize)).Bytes(), constructedKey[len(intermediateNodePrefix):]) } } }) @@ -187,10 +192,11 @@ func Test_IntermediateNodeDB_ConstructDBKey_DirtyBuffer(t *testing.T) { &mockMetrics{}, cacheSize, evictionBatchSize, + 4, ) db.bufferPool.Put([]byte{0xFF, 0xFF, 0xFF}) - constructedKey := db.constructDBKey(ToKey([]byte{}, BranchFactor16)) + constructedKey := db.constructDBKey(ToKey([]byte{})) require.Len(constructedKey, 2) require.Equal(intermediateNodePrefix, constructedKey[:len(intermediateNodePrefix)]) require.Equal(byte(16), constructedKey[len(constructedKey)-1]) @@ -201,9 +207,38 @@ func Test_IntermediateNodeDB_ConstructDBKey_DirtyBuffer(t *testing.T) { }, } db.bufferPool.Put([]byte{0xFF, 0xFF, 0xFF}) - p := ToKey([]byte{0xF0}, BranchFactor16).Take(1) + p := ToKey([]byte{0xF0}).Take(4) constructedKey = db.constructDBKey(p) require.Len(constructedKey, 2) require.Equal(intermediateNodePrefix, constructedKey[:len(intermediateNodePrefix)]) - require.Equal(p.Append(1).Bytes(), constructedKey[len(intermediateNodePrefix):]) + require.Equal(p.Extend(ToToken(1, 4)).Bytes(), constructedKey[len(intermediateNodePrefix):]) +} + +func TestIntermediateNodeDBClear(t *testing.T) { + require := require.New(t) + cacheSize := 200 + evictionBatchSize := cacheSize + baseDB := memdb.New() + db := newIntermediateNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + evictionBatchSize, + 4, + ) + + for _, b := range [][]byte{{1}, {2}, {3}} { + require.NoError(db.Put(ToKey(b), newNode(ToKey(b)))) + } + + require.NoError(db.Clear()) + + iter := baseDB.NewIteratorWithPrefix(intermediateNodePrefix) + defer iter.Release() + require.False(iter.Next()) + + require.Zero(db.nodeCache.currentSize) } diff --git a/x/merkledb/key.go b/x/merkledb/key.go index 461372a2baa8..d65d9b74a0a6 100644 --- a/x/merkledb/key.go +++ b/x/merkledb/key.go @@ -8,112 +8,135 @@ import ( "fmt" "strings" "unsafe" + + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) var ( - errInvalidBranchFactor = errors.New("invalid branch factor") - - branchFactorToTokenConfig = map[BranchFactor]tokenConfig{ - BranchFactor2: { - branchFactor: BranchFactor2, - tokenBitSize: 1, - tokensPerByte: 8, - singleTokenMask: 0b0000_0001, - }, - BranchFactor4: { - branchFactor: BranchFactor4, - tokenBitSize: 2, - tokensPerByte: 4, - singleTokenMask: 0b0000_0011, - }, - BranchFactor16: { - branchFactor: BranchFactor16, - tokenBitSize: 4, - tokensPerByte: 2, - singleTokenMask: 0b0000_1111, - }, - BranchFactor256: { - branchFactor: BranchFactor256, - tokenBitSize: 8, - tokensPerByte: 1, - singleTokenMask: 0b1111_1111, - }, + ErrInvalidBranchFactor = errors.New("branch factor must match one of the predefined branch factors") + + BranchFactorToTokenSize = map[BranchFactor]int{ + BranchFactor2: 1, + BranchFactor4: 2, + BranchFactor16: 4, + BranchFactor256: 8, + } + + tokenSizeToBranchFactor = map[int]BranchFactor{ + 1: BranchFactor2, + 2: BranchFactor4, + 4: BranchFactor16, + 8: BranchFactor256, + } + + validTokenSizes = maps.Keys(tokenSizeToBranchFactor) + + validBranchFactors = []BranchFactor{ + BranchFactor2, + BranchFactor4, + BranchFactor16, + BranchFactor256, } ) type BranchFactor int const ( - BranchFactor2 BranchFactor = 2 - BranchFactor4 BranchFactor = 4 - BranchFactor16 BranchFactor = 16 - BranchFactor256 BranchFactor = 256 + BranchFactor2 = BranchFactor(2) + BranchFactor4 = BranchFactor(4) + BranchFactor16 = BranchFactor(16) + BranchFactor256 = BranchFactor(256) ) -func (f BranchFactor) Valid() error { - if _, ok := branchFactorToTokenConfig[f]; ok { - return nil +// Valid checks if BranchFactor [b] is one of the predefined valid options for BranchFactor +func (b BranchFactor) Valid() error { + for _, validBF := range validBranchFactors { + if validBF == b { + return nil + } } - return fmt.Errorf("%w: %d", errInvalidBranchFactor, f) + return fmt.Errorf("%w: %d", ErrInvalidBranchFactor, b) } -type tokenConfig struct { - branchFactor BranchFactor - tokensPerByte int - tokenBitSize byte - singleTokenMask byte +// ToToken creates a key version of the passed byte with bit length equal to tokenSize +func ToToken(val byte, tokenSize int) Key { + return Key{ + value: string([]byte{val << dualBitIndex(tokenSize)}), + length: tokenSize, + } } -type Key struct { - tokenLength int - value string - tokenConfig +// Token returns the token at the specified index, +// Assumes that bitIndex + tokenSize doesn't cross a byte boundary +func (k Key) Token(bitIndex int, tokenSize int) byte { + storageByte := k.value[bitIndex/8] + // Shift the byte right to get the last bit to the rightmost position. + storageByte >>= dualBitIndex((bitIndex + tokenSize) % 8) + // Apply a mask to remove any other bits in the byte. + return storageByte & (0xFF >> dualBitIndex(tokenSize)) } -func emptyKey(bf BranchFactor) Key { - return Key{ - tokenConfig: branchFactorToTokenConfig[bf], +// iteratedHasPrefix checks if the provided prefix key is a prefix of the current key starting after the [bitsOffset]th bit +// this has better performance than constructing the actual key via Skip() then calling HasPrefix because it avoids an allocation +func (k Key) iteratedHasPrefix(prefix Key, bitsOffset int, tokenSize int) bool { + if k.length-bitsOffset < prefix.length { + return false } + for i := 0; i < prefix.length; i += tokenSize { + if k.Token(bitsOffset+i, tokenSize) != prefix.Token(i, tokenSize) { + return false + } + } + return true } -// ToKey returns [keyBytes] as a new key with the given [branchFactor]. -// Assumes [branchFactor] is valid. -func ToKey(keyBytes []byte, branchFactor BranchFactor) Key { - tc := branchFactorToTokenConfig[branchFactor] - return Key{ - value: byteSliceToString(keyBytes), - tokenConfig: tc, - tokenLength: len(keyBytes) * tc.tokensPerByte, - } +type Key struct { + // The number of bits in the key. + length int + // The string representation of the key + value string } -// TokensLength returns the number of tokens in [k]. -func (k Key) TokensLength() int { - return k.tokenLength +// ToKey returns [keyBytes] as a new key +// Assumes all bits of the keyBytes are part of the Key, call Key.Take if that is not the case +// Creates a copy of [keyBytes], so keyBytes are safe to edit after the call +func ToKey(keyBytes []byte) Key { + return toKey(slices.Clone(keyBytes)) +} + +// toKey returns [keyBytes] as a new key +// Assumes all bits of the keyBytes are part of the Key, call Key.Take if that is not the case +// Caller must not modify [keyBytes] after this call. +func toKey(keyBytes []byte) Key { + return Key{ + value: byteSliceToString(keyBytes), + length: len(keyBytes) * 8, + } } // hasPartialByte returns true iff the key fits into a non-whole number of bytes func (k Key) hasPartialByte() bool { - return k.tokenLength%k.tokensPerByte > 0 + return k.length%8 > 0 } // HasPrefix returns true iff [prefix] is a prefix of [k] or equal to it. func (k Key) HasPrefix(prefix Key) bool { // [prefix] must be shorter than [k] to be a prefix. - if k.tokenLength < prefix.tokenLength { + if k.length < prefix.length { return false } // The number of tokens in the last byte of [prefix], or zero // if [prefix] fits into a whole number of bytes. - remainderTokensCount := prefix.tokenLength % k.tokensPerByte - if remainderTokensCount == 0 { + remainderBitCount := prefix.length % 8 + if remainderBitCount == 0 { return strings.HasPrefix(k.value, prefix.value) } // check that the tokens in the partially filled final byte of [prefix] are // equal to the tokens in the final byte of [k]. - remainderBitsMask := byte(0xFF >> (remainderTokensCount * int(k.tokenBitSize))) + remainderBitsMask := byte(0xFF >> remainderBitCount) prefixRemainderTokens := prefix.value[len(prefix.value)-1] | remainderBitsMask remainderTokens := k.value[len(prefix.value)-1] | remainderBitsMask @@ -122,7 +145,7 @@ func (k Key) HasPrefix(prefix Key) bool { } // Note that this will never be an index OOB because len(prefix.value) > 0. - // If len(prefix.value) == 0 were true, [remainderTokens] would be 0 so we + // If len(prefix.value) == 0 were true, [remainderTokens] would be 0, so we // would have returned above. prefixWithoutPartialByte := prefix.value[:len(prefix.value)-1] return strings.HasPrefix(k.value, prefixWithoutPartialByte) @@ -134,130 +157,64 @@ func (k Key) HasStrictPrefix(prefix Key) bool { return k != prefix && k.HasPrefix(prefix) } -// Token returns the token at the specified index, -func (k Key) Token(index int) byte { - // Find the index in [k.value] of the byte containing the token at [index]. - storageByteIndex := index / k.tokensPerByte - storageByte := k.value[storageByteIndex] - // Shift the byte right to get the token to the rightmost position. - storageByte >>= k.bitsToShift(index) - // Apply a mask to remove any other tokens in the byte. - return storageByte & k.singleTokenMask -} - -// Append returns a new Path that equals the current -// Path with [token] appended to the end. -func (k Key) Append(token byte) Key { - buffer := make([]byte, k.bytesNeeded(k.tokenLength+1)) - k.appendIntoBuffer(buffer, token) - return Key{ - value: byteSliceToString(buffer), - tokenLength: k.tokenLength + 1, - tokenConfig: k.tokenConfig, - } +// Length returns the number of bits in the Key +func (k Key) Length() int { + return k.length } // Greater returns true if current Key is greater than other Key func (k Key) Greater(other Key) bool { - return k.value > other.value || (k.value == other.value && k.tokenLength > other.tokenLength) + return k.value > other.value || (k.value == other.value && k.length > other.length) } -// Less returns true if current Key is less than other Key +// Less will return true if current Key is less than other Key func (k Key) Less(other Key) bool { - return k.value < other.value || (k.value == other.value && k.tokenLength < other.tokenLength) + return k.value < other.value || (k.value == other.value && k.length < other.length) } -// bitsToShift returns the number of bits to right shift a token -// within its storage byte to get it to the rightmost -// position in the byte. Equivalently, this is the number of bits -// to left shift a raw token value to get it to the correct position -// within its storage byte. -// Example with branch factor 16: -// Suppose the token array is -// [0x01, 0x02, 0x03, 0x04] -// The byte representation of this array is -// [0b0001_0010, 0b0011_0100] -// To get the token at index 0 (0b0001) to the rightmost position -// in its storage byte (i.e. to make 0b0001_0010 into 0b0000_0001), -// we need to shift 0b0001_0010 to the right by 4 bits. -// Similarly: -// * Token at index 1 (0b0010) needs to be shifted by 0 bits -// * Token at index 2 (0b0011) needs to be shifted by 4 bits -// * Token at index 3 (0b0100) needs to be shifted by 0 bits -func (k Key) bitsToShift(index int) byte { - // [tokenIndex] is the index of the token in the byte. - // For example, if the branch factor is 16, then each byte contains 2 tokens. - // The first is at index 0, and the second is at index 1, by this definition. - tokenIndex := index % k.tokensPerByte - // The bit within the byte that the token starts at. - startBitIndex := k.tokenBitSize * byte(tokenIndex) - // The bit within the byte that the token ends at. - endBitIndex := startBitIndex + k.tokenBitSize - 1 - // We want to right shift until [endBitIndex] is at the last index, so return - // the distance from the end of the byte to the end of the token. - // Note that 7 is the index of the last bit in a byte. - return 7 - endBitIndex -} - -// bytesNeeded returns the number of bytes needed to store the passed number of -// tokens. -// -// Invariant: [tokens] is a non-negative, but otherwise untrusted, input and -// this method must never overflow. -func (k Key) bytesNeeded(tokens int) int { - size := tokens / k.tokensPerByte - if tokens%k.tokensPerByte != 0 { - size++ +// Extend returns a new Key that is the in-order aggregation of Key [k] with [keys] +func (k Key) Extend(keys ...Key) Key { + totalBitLength := k.length + for _, key := range keys { + totalBitLength += key.length } - return size -} - -func (k Key) AppendExtend(token byte, extensionKey Key) Key { - appendBytes := k.bytesNeeded(k.tokenLength + 1) - totalLength := k.tokenLength + 1 + extensionKey.tokenLength - buffer := make([]byte, k.bytesNeeded(totalLength)) - k.appendIntoBuffer(buffer[:appendBytes], token) - - // the extension path will be shifted based on the number of tokens in the partial byte - tokenRemainder := (k.tokenLength + 1) % k.tokensPerByte - result := Key{ - value: byteSliceToString(buffer), - tokenLength: totalLength, - tokenConfig: k.tokenConfig, + buffer := make([]byte, bytesNeeded(totalBitLength)) + copy(buffer, k.value) + currentTotal := k.length + for _, key := range keys { + extendIntoBuffer(buffer, key, currentTotal) + currentTotal += key.length } - extensionBuffer := buffer[appendBytes-1:] - if extensionKey.tokenLength == 0 { - return result + return Key{ + value: byteSliceToString(buffer), + length: totalBitLength, } +} - // If the existing value fits into a whole number of bytes, - // the extension path can be copied directly into the buffer. - if tokenRemainder == 0 { - copy(extensionBuffer[1:], extensionKey.value) - return result +func extendIntoBuffer(buffer []byte, val Key, bitsOffset int) { + if val.length == 0 { + return + } + bytesOffset := bytesNeeded(bitsOffset) + bitsRemainder := bitsOffset % 8 + if bitsRemainder == 0 { + copy(buffer[bytesOffset:], val.value) + return } - // The existing path doesn't fit into a whole number of bytes. - // Figure out how many bits to shift. - shift := extensionKey.bitsToShift(tokenRemainder - 1) // Fill the partial byte with the first [shift] bits of the extension path - extensionBuffer[0] |= extensionKey.value[0] >> (8 - shift) + buffer[bytesOffset-1] |= val.value[0] >> bitsRemainder // copy the rest of the extension path bytes into the buffer, // shifted byte shift bits - shiftCopy(extensionBuffer[1:], extensionKey.value, shift) - - return result + shiftCopy(buffer[bytesOffset:], val.value, dualBitIndex(bitsRemainder)) } -func (k Key) appendIntoBuffer(buffer []byte, token byte) { - copy(buffer, k.value) - - // Shift [token] to the left such that it's at the correct - // index within its storage byte, then OR it with its storage - // byte to write the token into the byte. - buffer[len(buffer)-1] |= token << k.bitsToShift(k.tokenLength) +// dualBitIndex gets the dual of the bit index +// ex: in a byte, the bit 5 from the right is the same as the bit 3 from the left +func dualBitIndex(shift int) int { + return (8 - shift) % 8 } // Treats [src] as a bit array and copies it into [dst] shifted by [shift] bits. @@ -266,10 +223,11 @@ func (k Key) appendIntoBuffer(buffer []byte, token byte) { // Assumes len(dst) >= len(src)-1. // If len(dst) == len(src)-1 the last byte of [src] is only partially copied // (i.e. the rightmost bits are not copied). -func shiftCopy(dst []byte, src string, shift byte) { +func shiftCopy(dst []byte, src string, shift int) { i := 0 + dualShift := dualBitIndex(shift) for ; i < len(src)-1; i++ { - dst[i] = src[i]<>(8-shift) + dst[i] = src[i]<>dualShift } if i < len(dst) { @@ -279,59 +237,56 @@ func shiftCopy(dst []byte, src string, shift byte) { } // Skip returns a new Key that contains the last -// k.length-tokensToSkip tokens of [k]. -func (k Key) Skip(tokensToSkip int) Key { - if k.tokenLength == tokensToSkip { - return emptyKey(k.branchFactor) +// k.length-bitsToSkip bits of [k]. +func (k Key) Skip(bitsToSkip int) Key { + if k.length <= bitsToSkip { + return Key{} } result := Key{ - value: k.value[tokensToSkip/k.tokensPerByte:], - tokenLength: k.tokenLength - tokensToSkip, - tokenConfig: k.tokenConfig, + value: k.value[bitsToSkip/8:], + length: k.length - bitsToSkip, } // if the tokens to skip is a whole number of bytes, // the remaining bytes exactly equals the new key. - if tokensToSkip%k.tokensPerByte == 0 { + if bitsToSkip%8 == 0 { return result } - // tokensToSkip does not remove a whole number of bytes. + // bitsToSkip does not remove a whole number of bytes. // copy the remaining shifted bytes into a new buffer. - buffer := make([]byte, k.bytesNeeded(result.tokenLength)) - bitsSkipped := tokensToSkip * int(k.tokenBitSize) - bitsRemovedFromFirstRemainingByte := byte(bitsSkipped % 8) + buffer := make([]byte, bytesNeeded(result.length)) + bitsRemovedFromFirstRemainingByte := bitsToSkip % 8 shiftCopy(buffer, result.value, bitsRemovedFromFirstRemainingByte) result.value = byteSliceToString(buffer) return result } -// Take returns a new Key that contains the first tokensToTake tokens of the current Key -func (k Key) Take(tokensToTake int) Key { - if k.tokenLength <= tokensToTake { +// Take returns a new Key that contains the first bitsToTake bits of the current Key +func (k Key) Take(bitsToTake int) Key { + if k.length <= bitsToTake { return k } result := Key{ - tokenLength: tokensToTake, - tokenConfig: k.tokenConfig, + length: bitsToTake, } - if !result.hasPartialByte() { - result.value = k.value[:tokensToTake/k.tokensPerByte] + remainderBits := result.length % 8 + if remainderBits == 0 { + result.value = k.value[:bitsToTake/8] return result } // We need to zero out some bits of the last byte so a simple slice will not work // Create a new []byte to store the altered value - buffer := make([]byte, k.bytesNeeded(tokensToTake)) + buffer := make([]byte, bytesNeeded(bitsToTake)) copy(buffer, k.value) - // We want to zero out everything to the right of the last token, which is at index [tokensToTake] - 1 - // Mask will be (8-bitsToShift) number of 1's followed by (bitsToShift) number of 0's - mask := byte(0xFF << k.bitsToShift(tokensToTake-1)) - buffer[len(buffer)-1] &= mask + // We want to zero out everything to the right of the last token, which is at index bitsToTake-1 + // Mask will be (8-remainderBits) number of 1's followed by (remainderBits) number of 0's + buffer[len(buffer)-1] &= byte(0xFF << dualBitIndex(remainderBits)) result.value = byteSliceToString(buffer) return result @@ -345,20 +300,6 @@ func (k Key) Bytes() []byte { return stringToByteSlice(k.value) } -// iteratedHasPrefix checks if the provided prefix path is a prefix of the current path after having skipped [skipTokens] tokens first -// this has better performance than constructing the actual path via Skip() then calling HasPrefix because it avoids the []byte allocation -func (k Key) iteratedHasPrefix(skipTokens int, prefix Key) bool { - if k.tokenLength-skipTokens < prefix.tokenLength { - return false - } - for i := 0; i < prefix.tokenLength; i++ { - if k.Token(skipTokens+i) != prefix.Token(i) { - return false - } - } - return true -} - // byteSliceToString converts the []byte to a string // Invariant: The input []byte must not be modified. func byteSliceToString(bs []byte) string { @@ -374,3 +315,12 @@ func stringToByteSlice(value string) []byte { // "safe" because we never edit the []byte return unsafe.Slice(unsafe.StringData(value), len(value)) } + +// Returns the number of bytes needed to store [bits] bits. +func bytesNeeded(bits int) int { + size := bits / 8 + if bits%8 != 0 { + size++ + } + return size +} diff --git a/x/merkledb/key_test.go b/x/merkledb/key_test.go index e56ee1a98050..f0819483b1a8 100644 --- a/x/merkledb/key_test.go +++ b/x/merkledb/key_test.go @@ -5,48 +5,52 @@ package merkledb import ( "fmt" + "strconv" "testing" "github.com/stretchr/testify/require" ) -var branchFactors = []BranchFactor{ - BranchFactor2, - BranchFactor4, - BranchFactor16, - BranchFactor256, +func TestBranchFactor_Valid(t *testing.T) { + require := require.New(t) + for _, bf := range validBranchFactors { + require.NoError(bf.Valid()) + } + var empty BranchFactor + err := empty.Valid() + require.ErrorIs(err, ErrInvalidBranchFactor) } func TestHasPartialByte(t *testing.T) { - for _, branchFactor := range branchFactors { - t.Run(fmt.Sprint(branchFactor), func(t *testing.T) { + for _, ts := range validTokenSizes { + t.Run(strconv.Itoa(ts), func(t *testing.T) { require := require.New(t) - key := emptyKey(branchFactor) + key := Key{} require.False(key.hasPartialByte()) - if branchFactor == BranchFactor256 { + if ts == 8 { // Tokens are an entire byte so // there is never a partial byte. - key = key.Append(0) + key = key.Extend(ToToken(1, ts)) require.False(key.hasPartialByte()) - key = key.Append(0) + key = key.Extend(ToToken(0, ts)) require.False(key.hasPartialByte()) return } // Fill all but the last token of the first byte. - for i := 0; i < key.tokensPerByte-1; i++ { - key = key.Append(0) + for i := 0; i < 8-ts; i += ts { + key = key.Extend(ToToken(1, ts)) require.True(key.hasPartialByte()) } // Fill the last token of the first byte. - key = key.Append(0) + key = key.Extend(ToToken(0, ts)) require.False(key.hasPartialByte()) // Fill the first token of the second byte. - key = key.Append(0) + key = key.Extend(ToToken(0, ts)) require.True(key.hasPartialByte()) }) } @@ -55,66 +59,71 @@ func TestHasPartialByte(t *testing.T) { func Test_Key_Has_Prefix(t *testing.T) { type test struct { name string - keyA func(bf BranchFactor) Key - keyB func(bf BranchFactor) Key + keyA func(ts int) Key + keyB func(ts int) Key isStrictPrefix bool isPrefix bool } key := "Key" - keyLength := map[BranchFactor]int{} - for _, branchFactor := range branchFactors { - config := branchFactorToTokenConfig[branchFactor] - keyLength[branchFactor] = len(key) * config.tokensPerByte - } tests := []test{ { name: "equal keys", - keyA: func(bf BranchFactor) Key { return ToKey([]byte(key), bf) }, - keyB: func(bf BranchFactor) Key { return ToKey([]byte(key), bf) }, + keyA: func(ts int) Key { return ToKey([]byte(key)) }, + keyB: func(ts int) Key { return ToKey([]byte(key)) }, isPrefix: true, isStrictPrefix: false, }, { - name: "one key has one fewer token", - keyA: func(bf BranchFactor) Key { return ToKey([]byte(key), bf) }, - keyB: func(bf BranchFactor) Key { return ToKey([]byte(key), bf).Take(keyLength[bf] - 1) }, + name: "one key has one fewer token", + keyA: func(ts int) Key { return ToKey([]byte(key)) }, + keyB: func(ts int) Key { + return ToKey([]byte(key)).Take(len(key)*8 - ts) + }, isPrefix: true, isStrictPrefix: true, }, { - name: "equal keys, both have one fewer token", - keyA: func(bf BranchFactor) Key { return ToKey([]byte(key), bf).Take(keyLength[bf] - 1) }, - keyB: func(bf BranchFactor) Key { return ToKey([]byte(key), bf).Take(keyLength[bf] - 1) }, + name: "equal keys, both have one fewer token", + keyA: func(ts int) Key { + return ToKey([]byte(key)).Take(len(key)*8 - ts) + }, + keyB: func(ts int) Key { + return ToKey([]byte(key)).Take(len(key)*8 - ts) + }, isPrefix: true, isStrictPrefix: false, }, { name: "different keys", - keyA: func(bf BranchFactor) Key { return ToKey([]byte{0xF7}, bf) }, - keyB: func(bf BranchFactor) Key { return ToKey([]byte{0xF0}, bf) }, + keyA: func(ts int) Key { return ToKey([]byte{0xF7}) }, + keyB: func(ts int) Key { return ToKey([]byte{0xF0}) }, isPrefix: false, isStrictPrefix: false, }, { - name: "same bytes, different lengths", - keyA: func(bf BranchFactor) Key { return ToKey([]byte{0x10, 0x00}, bf).Take(1) }, - keyB: func(bf BranchFactor) Key { return ToKey([]byte{0x10, 0x00}, bf).Take(2) }, + name: "same bytes, different lengths", + keyA: func(ts int) Key { + return ToKey([]byte{0x10, 0x00}).Take(ts) + }, + keyB: func(ts int) Key { + return ToKey([]byte{0x10, 0x00}).Take(ts * 2) + }, isPrefix: false, isStrictPrefix: false, }, } for _, tt := range tests { - for _, bf := range branchFactors { - t.Run(tt.name+" bf "+fmt.Sprint(bf), func(t *testing.T) { + for _, ts := range validTokenSizes { + t.Run(tt.name+" ts "+strconv.Itoa(ts), func(t *testing.T) { require := require.New(t) - keyA := tt.keyA(bf) - keyB := tt.keyB(bf) + keyA := tt.keyA(ts) + keyB := tt.keyB(ts) require.Equal(tt.isPrefix, keyA.HasPrefix(keyB)) - require.Equal(tt.isPrefix, keyA.iteratedHasPrefix(0, keyB)) + require.Equal(tt.isPrefix, keyA.iteratedHasPrefix(keyB, 0, ts)) require.Equal(tt.isStrictPrefix, keyA.HasStrictPrefix(keyB)) }) } @@ -124,30 +133,29 @@ func Test_Key_Has_Prefix(t *testing.T) { func Test_Key_Skip(t *testing.T) { require := require.New(t) - for _, bf := range branchFactors { - empty := emptyKey(bf) - require.Equal(ToKey([]byte{0}, bf).Skip(empty.tokensPerByte), empty) - if bf == BranchFactor256 { + empty := Key{} + require.Equal(ToKey([]byte{0}).Skip(8), empty) + for _, ts := range validTokenSizes { + if ts == 8 { continue } - shortKey := ToKey([]byte{0b0101_0101}, bf) - longKey := ToKey([]byte{0b0101_0101, 0b0101_0101}, bf) - for i := 0; i < shortKey.tokensPerByte; i++ { - shift := byte(i) * shortKey.tokenBitSize - skipKey := shortKey.Skip(i) + shortKey := ToKey([]byte{0b0101_0101}) + longKey := ToKey([]byte{0b0101_0101, 0b0101_0101}) + for shift := 0; shift < 8; shift += ts { + skipKey := shortKey.Skip(shift) require.Equal(byte(0b0101_0101<>(8-shift)), skipKey.value[0]) require.Equal(byte(0b0101_0101<>shift)< ts { + key1 = key1.Take(key1.length - ts) + } + key2 := ToKey(second) + if forceSecondOdd && key2.length > ts { + key2 = key2.Take(key2.length - ts) + } + token := byte(int(tokenByte) % int(tokenSizeToBranchFactor[ts])) + extendedP := key1.Extend(ToToken(token, ts), key2) + require.Equal(key1.length+key2.length+ts, extendedP.length) + firstIndex := 0 + for ; firstIndex < key1.length; firstIndex += ts { + require.Equal(key1.Token(firstIndex, ts), extendedP.Token(firstIndex, ts)) + } + require.Equal(token, extendedP.Token(firstIndex, ts)) + firstIndex += ts + for secondIndex := 0; secondIndex < key2.length; secondIndex += ts { + require.Equal(key2.Token(secondIndex, ts), extendedP.Token(firstIndex+secondIndex, ts)) + } + } + }) +} + +func FuzzKeyDoubleExtend_Any(f *testing.F) { + f.Fuzz(func( + t *testing.T, + baseKeyBytes []byte, + firstKeyBytes []byte, + secondKeyBytes []byte, + forceBaseOdd bool, forceFirstOdd bool, forceSecondOdd bool, ) { require := require.New(t) - for _, branchFactor := range branchFactors { - key1 := ToKey(first, branchFactor) - if forceFirstOdd && key1.tokenLength > 0 { - key1 = key1.Take(key1.tokenLength - 1) + for _, ts := range validTokenSizes { + baseKey := ToKey(baseKeyBytes) + if forceBaseOdd && baseKey.length > ts { + baseKey = baseKey.Take(baseKey.length - ts) + } + firstKey := ToKey(firstKeyBytes) + if forceFirstOdd && firstKey.length > ts { + firstKey = firstKey.Take(firstKey.length - ts) } - key2 := ToKey(second, branchFactor) - if forceSecondOdd && key2.tokenLength > 0 { - key2 = key2.Take(key2.tokenLength - 1) + + secondKey := ToKey(secondKeyBytes) + if forceSecondOdd && secondKey.length > ts { + secondKey = secondKey.Take(secondKey.length - ts) } - token = byte(int(token) % int(branchFactor)) - extendedP := key1.AppendExtend(token, key2) - require.Equal(key1.tokenLength+key2.tokenLength+1, extendedP.tokenLength) - for i := 0; i < key1.tokenLength; i++ { - require.Equal(key1.Token(i), extendedP.Token(i)) + + extendedP := baseKey.Extend(firstKey, secondKey) + require.Equal(baseKey.length+firstKey.length+secondKey.length, extendedP.length) + totalIndex := 0 + for baseIndex := 0; baseIndex < baseKey.length; baseIndex += ts { + require.Equal(baseKey.Token(baseIndex, ts), extendedP.Token(baseIndex, ts)) } - require.Equal(token, extendedP.Token(key1.tokenLength)) - for i := 0; i < key2.tokenLength; i++ { - require.Equal(key2.Token(i), extendedP.Token(i+1+key1.tokenLength)) + totalIndex += baseKey.length + for firstIndex := 0; firstIndex < firstKey.length; firstIndex += ts { + require.Equal(firstKey.Token(firstIndex, ts), extendedP.Token(totalIndex+firstIndex, ts)) + } + totalIndex += firstKey.length + for secondIndex := 0; secondIndex < secondKey.length; secondIndex += ts { + require.Equal(secondKey.Token(secondIndex, ts), extendedP.Token(totalIndex+secondIndex, ts)) } } }) @@ -509,15 +478,18 @@ func FuzzKeySkip(f *testing.F) { tokensToSkip uint, ) { require := require.New(t) - for _, branchFactor := range branchFactors { - key1 := ToKey(first, branchFactor) - if int(tokensToSkip) >= key1.tokenLength { + key1 := ToKey(first) + for _, ts := range validTokenSizes { + // need bits to be a multiple of token size + ubitsToSkip := tokensToSkip * uint(ts) + if ubitsToSkip >= uint(key1.length) { t.SkipNow() } - key2 := key1.Skip(int(tokensToSkip)) - require.Equal(key1.tokenLength-int(tokensToSkip), key2.tokenLength) - for i := 0; i < key2.tokenLength; i++ { - require.Equal(key1.Token(int(tokensToSkip)+i), key2.Token(i)) + bitsToSkip := int(ubitsToSkip) + key2 := key1.Skip(bitsToSkip) + require.Equal(key1.length-bitsToSkip, key2.length) + for i := 0; i < key2.length; i += ts { + require.Equal(key1.Token(bitsToSkip+i, ts), key2.Token(i, ts)) } } }) @@ -527,19 +499,24 @@ func FuzzKeyTake(f *testing.F) { f.Fuzz(func( t *testing.T, first []byte, - tokensToTake uint, + uTokensToTake uint, ) { require := require.New(t) - for _, branchFactor := range branchFactors { - key1 := ToKey(first, branchFactor) - if int(tokensToTake) >= key1.tokenLength { + for _, ts := range validTokenSizes { + key1 := ToKey(first) + uBitsToTake := uTokensToTake * uint(ts) + if uBitsToTake >= uint(key1.length) { t.SkipNow() } - key2 := key1.Take(int(tokensToTake)) - require.Equal(int(tokensToTake), key2.tokenLength) - - for i := 0; i < key2.tokenLength; i++ { - require.Equal(key1.Token(i), key2.Token(i)) + bitsToTake := int(uBitsToTake) + key2 := key1.Take(bitsToTake) + require.Equal(bitsToTake, key2.length) + if key2.hasPartialByte() { + paddingMask := byte(0xFF >> (key2.length % 8)) + require.Zero(key2.value[len(key2.value)-1] & paddingMask) + } + for i := 0; i < bitsToTake; i += ts { + require.Equal(key1.Token(i, ts), key2.Token(i, ts)) } } }) @@ -550,7 +527,7 @@ func TestShiftCopy(t *testing.T) { dst []byte src []byte expected []byte - shift byte + shift int } tests := []test{ diff --git a/x/merkledb/metrics.go b/x/merkledb/metrics.go index a633b9d1bee1..d8a80a02db5a 100644 --- a/x/merkledb/metrics.go +++ b/x/merkledb/metrics.go @@ -8,7 +8,7 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) var ( @@ -198,8 +198,7 @@ func newMetrics(namespace string, reg prometheus.Registerer) (merkleMetrics, err Help: "cumulative amount of misses on the view value cache", }), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( reg.Register(m.ioKeyWrite), reg.Register(m.ioKeyRead), reg.Register(m.hashCount), @@ -212,7 +211,7 @@ func newMetrics(namespace string, reg prometheus.Registerer) (merkleMetrics, err reg.Register(m.viewValueCacheHit), reg.Register(m.viewValueCacheMiss), ) - return &m, errs.Err + return &m, err } func (m *metrics) DatabaseNodeRead() { diff --git a/x/merkledb/mock_db.go b/x/merkledb/mock_db.go index f7e35883c177..a4d1d6b6d6f3 100644 --- a/x/merkledb/mock_db.go +++ b/x/merkledb/mock_db.go @@ -40,6 +40,20 @@ func (m *MockMerkleDB) EXPECT() *MockMerkleDBMockRecorder { return m.recorder } +// Clear mocks base method. +func (m *MockMerkleDB) Clear() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Clear") + ret0, _ := ret[0].(error) + return ret0 +} + +// Clear indicates an expected call of Clear. +func (mr *MockMerkleDBMockRecorder) Clear() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clear", reflect.TypeOf((*MockMerkleDB)(nil).Clear)) +} + // Close mocks base method. func (m *MockMerkleDB) Close() error { m.ctrl.T.Helper() diff --git a/x/merkledb/node.go b/x/merkledb/node.go index 259e048c1793..3fd38021a0c8 100644 --- a/x/merkledb/node.go +++ b/x/merkledb/node.go @@ -14,13 +14,6 @@ import ( const HashLength = 32 -// the values that go into the node's id -type hashValues struct { - Children map[byte]child - Value maybe.Maybe[[]byte] - Key Key -} - // Representation of a node stored in the database. type dbNode struct { value maybe.Maybe[[]byte] @@ -43,24 +36,19 @@ type node struct { } // Returns a new node with the given [key] and no value. -// If [parent] isn't nil, the new node is added as a child of [parent]. -func newNode(parent *node, key Key) *node { - newNode := &node{ +func newNode(key Key) *node { + return &node{ dbNode: dbNode{ - children: make(map[byte]child, key.branchFactor), + children: make(map[byte]child, 2), }, key: key, } - if parent != nil { - parent.addChild(newNode) - } - return newNode } // Parse [nodeBytes] to a node and set its key to [key]. func parseNode(key Key, nodeBytes []byte) (*node, error) { n := dbNode{} - if err := codec.decodeDBNode(nodeBytes, &n, key.branchFactor); err != nil { + if err := codec.decodeDBNode(nodeBytes, &n); err != nil { return nil, err } result := &node{ @@ -101,11 +89,7 @@ func (n *node) calculateID(metrics merkleMetrics) { } metrics.HashCalculated() - bytes := codec.encodeHashValues(&hashValues{ - Children: n.children, - Value: n.valueDigest, - Key: n.key, - }) + bytes := codec.encodeHashValues(n) n.id = hashing.ComputeHash256Array(bytes) } @@ -127,11 +111,11 @@ func (n *node) setValueDigest() { // Adds [child] as a child of [n]. // Assumes [child]'s key is valid as a child of [n]. // That is, [n.key] is a prefix of [child.key]. -func (n *node) addChild(childNode *node) { +func (n *node) addChild(childNode *node, tokenSize int) { n.setChildEntry( - childNode.key.Token(n.key.tokenLength), + childNode.key.Token(n.key.length, tokenSize), child{ - compressedKey: childNode.key.Skip(n.key.tokenLength + 1), + compressedKey: childNode.key.Skip(n.key.length + tokenSize), id: childNode.id, hasValue: childNode.hasValue(), }, @@ -145,9 +129,9 @@ func (n *node) setChildEntry(index byte, childEntry child) { } // Removes [child] from [n]'s children. -func (n *node) removeChild(child *node) { +func (n *node) removeChild(child *node, tokenSize int) { n.onNodeChanged() - delete(n.children, child.key.Token(n.key.tokenLength)) + delete(n.children, child.key.Token(n.key.length, tokenSize)) } // clone Returns a copy of [n]. diff --git a/x/merkledb/node_test.go b/x/merkledb/node_test.go index 9632b7c7dacb..e0cb4dd04b06 100644 --- a/x/merkledb/node_test.go +++ b/x/merkledb/node_test.go @@ -13,54 +13,57 @@ import ( ) func Test_Node_Marshal(t *testing.T) { - root := newNode(nil, emptyKey(BranchFactor16)) + root := newNode(Key{}) require.NotNil(t, root) - fullKey := ToKey([]byte("key"), BranchFactor16) - childNode := newNode(root, fullKey) + fullKey := ToKey([]byte("key")) + childNode := newNode(fullKey) + root.addChild(childNode, 4) childNode.setValue(maybe.Some([]byte("value"))) require.NotNil(t, childNode) childNode.calculateID(&mockMetrics{}) - root.addChild(childNode) + root.addChild(childNode, 4) data := root.bytes() - rootParsed, err := parseNode(ToKey([]byte(""), BranchFactor16), data) + rootParsed, err := parseNode(ToKey([]byte("")), data) require.NoError(t, err) require.Len(t, rootParsed.children, 1) - rootIndex := getSingleChildKey(root).Token(root.key.tokenLength) - parsedIndex := getSingleChildKey(rootParsed).Token(rootParsed.key.tokenLength) + rootIndex := getSingleChildKey(root, 4).Token(0, 4) + parsedIndex := getSingleChildKey(rootParsed, 4).Token(0, 4) rootChildEntry := root.children[rootIndex] parseChildEntry := rootParsed.children[parsedIndex] require.Equal(t, rootChildEntry.id, parseChildEntry.id) } func Test_Node_Marshal_Errors(t *testing.T) { - root := newNode(nil, emptyKey(BranchFactor16)) + root := newNode(Key{}) require.NotNil(t, root) - fullKey := ToKey([]byte{255}, BranchFactor16) - childNode1 := newNode(root, fullKey) + fullKey := ToKey([]byte{255}) + childNode1 := newNode(fullKey) + root.addChild(childNode1, 4) childNode1.setValue(maybe.Some([]byte("value1"))) require.NotNil(t, childNode1) childNode1.calculateID(&mockMetrics{}) - root.addChild(childNode1) + root.addChild(childNode1, 4) - fullKey = ToKey([]byte{237}, BranchFactor16) - childNode2 := newNode(root, fullKey) + fullKey = ToKey([]byte{237}) + childNode2 := newNode(fullKey) + root.addChild(childNode2, 4) childNode2.setValue(maybe.Some([]byte("value2"))) require.NotNil(t, childNode2) childNode2.calculateID(&mockMetrics{}) - root.addChild(childNode2) + root.addChild(childNode2, 4) data := root.bytes() for i := 1; i < len(data); i++ { broken := data[:i] - _, err := parseNode(ToKey([]byte(""), BranchFactor16), broken) + _, err := parseNode(ToKey([]byte("")), broken) require.ErrorIs(t, err, io.ErrUnexpectedEOF) } } diff --git a/x/merkledb/proof.go b/x/merkledb/proof.go index 63ea34542c9b..e348a83f0f13 100644 --- a/x/merkledb/proof.go +++ b/x/merkledb/proof.go @@ -31,8 +31,6 @@ var ( ErrNonIncreasingValues = errors.New("keys sent are not in increasing order") ErrStateFromOutsideOfRange = errors.New("state key falls outside of the start->end range") ErrNonIncreasingProofNodes = errors.New("each proof node key must be a strict prefix of the next") - ErrExtraProofNodes = errors.New("extra proof nodes in path") - ErrDataInMissingRootProof = errors.New("there should be no state or deleted keys in a change proof that had a missing root") ErrNoMerkleProof = errors.New("empty key response must include merkle proof") ErrShouldJustBeRoot = errors.New("end proof should only contain root") ErrNoStartProof = errors.New("no start proof") @@ -42,7 +40,6 @@ var ( ErrProofValueDoesntMatch = errors.New("the provided value does not match the proof node for the provided key's value") ErrProofNodeHasUnincludedValue = errors.New("the provided proof has a value for a key within the range that is not present in the provided key/values") ErrInvalidMaybe = errors.New("maybe is nothing but has value") - ErrInvalidChildIndex = errors.New("child index must be less than branch factor") ErrNilProofNode = errors.New("proof node is nil") ErrNilValueOrHash = errors.New("proof node's valueOrHash field is nil") ErrNilKey = errors.New("key is nil") @@ -53,7 +50,6 @@ var ( ErrNilProof = errors.New("proof is nil") ErrNilValue = errors.New("value is nil") ErrUnexpectedEndProof = errors.New("end proof should be empty") - ErrInconsistentBranchFactor = errors.New("all keys in proof nodes should have the same branch factor") ) type ProofNode struct { @@ -65,11 +61,12 @@ type ProofNode struct { Children map[byte]ids.ID } +// ToProto converts the ProofNode into the protobuf version of a proof node // Assumes [node.Key.Key.length] <= math.MaxUint64. func (node *ProofNode) ToProto() *pb.ProofNode { pbNode := &pb.ProofNode{ Key: &pb.Key{ - Length: uint64(node.Key.tokenLength), + Length: uint64(node.Key.length), Value: node.Key.Bytes(), }, ValueOrHash: &pb.MaybeBytes{ @@ -87,7 +84,7 @@ func (node *ProofNode) ToProto() *pb.ProofNode { return pbNode } -func (node *ProofNode) UnmarshalProto(pbNode *pb.ProofNode, bf BranchFactor) error { +func (node *ProofNode) UnmarshalProto(pbNode *pb.ProofNode) error { switch { case pbNode == nil: return ErrNilProofNode @@ -97,17 +94,14 @@ func (node *ProofNode) UnmarshalProto(pbNode *pb.ProofNode, bf BranchFactor) err return ErrInvalidMaybe case pbNode.Key == nil: return ErrNilKey - } - node.Key = ToKey(pbNode.Key.Value, bf).Take(int(pbNode.Key.Length)) - - if len(pbNode.Key.Value) != node.Key.bytesNeeded(node.Key.tokenLength) { + case len(pbNode.Key.Value) != bytesNeeded(int(pbNode.Key.Length)): return ErrInvalidKeyLength } - + node.Key = ToKey(pbNode.Key.Value).Take(int(pbNode.Key.Length)) node.Children = make(map[byte]ids.ID, len(pbNode.Children)) for childIndex, childIDBytes := range pbNode.Children { - if childIndex >= uint32(bf) { - return ErrInvalidChildIndex + if childIndex > math.MaxUint8 { + return errChildIndexTooLarge } childID, err := ids.ToID(childIDBytes) if err != nil { @@ -123,7 +117,7 @@ func (node *ProofNode) UnmarshalProto(pbNode *pb.ProofNode, bf BranchFactor) err return nil } -// An inclusion/exclustion proof of a key. +// Proof represents an inclusion/exclusion proof of a key. type Proof struct { // Nodes in the proof path from root --> target key // (or node that would be where key is if it doesn't exist). @@ -133,14 +127,14 @@ type Proof struct { Key Key // Nothing if [Key] isn't in the trie. - // Otherwise the value corresponding to [Key]. + // Otherwise, the value corresponding to [Key]. Value maybe.Maybe[[]byte] } -// Returns nil if the trie given in [proof] has root [expectedRootID]. +// Verify returns nil if the trie given in [proof] has root [expectedRootID]. // That is, this is a valid proof that [proof.Key] exists/doesn't exist // in the trie with root [expectedRootID]. -func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID) error { +func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID, tokenSize int) error { // Make sure the proof is well-formed. if len(proof.Path) == 0 { return ErrNoProof @@ -172,17 +166,17 @@ func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID) error { } // Don't bother locking [view] -- nobody else has a reference to it. - view, err := getStandaloneTrieView(ctx, nil, proof.Key.branchFactor) + view, err := getStandaloneTrieView(ctx, nil, tokenSize) if err != nil { return err } // Insert all proof nodes. - // [provenPath] is the path that we are proving exists, or the path - // that is where the path we are proving doesn't exist should be. - provenPath := maybe.Some(proof.Path[len(proof.Path)-1].Key) + // [provenKey] is the key that we are proving exists, or the key + // that is the next key along the node path, proving that [proof.Key] doesn't exist in the trie. + provenKey := maybe.Some(proof.Path[len(proof.Path)-1].Key) - if err = addPathInfo(view, proof.Path, provenPath, provenPath); err != nil { + if err = addPathInfo(view, proof.Path, provenKey, provenKey); err != nil { return err } @@ -215,7 +209,7 @@ func (proof *Proof) ToProto() *pb.Proof { return pbProof } -func (proof *Proof) UnmarshalProto(pbProof *pb.Proof, bf BranchFactor) error { +func (proof *Proof) UnmarshalProto(pbProof *pb.Proof) error { switch { case pbProof == nil: return ErrNilProof @@ -225,7 +219,7 @@ func (proof *Proof) UnmarshalProto(pbProof *pb.Proof, bf BranchFactor) error { return ErrInvalidMaybe } - proof.Key = ToKey(pbProof.Key, bf) + proof.Key = ToKey(pbProof.Key) if !pbProof.Value.IsNothing { proof.Value = maybe.Some(pbProof.Value.Value) @@ -233,7 +227,7 @@ func (proof *Proof) UnmarshalProto(pbProof *pb.Proof, bf BranchFactor) error { proof.Path = make([]ProofNode, len(pbProof.Proof)) for i, pbNode := range pbProof.Proof { - if err := proof.Path[i].UnmarshalProto(pbNode, bf); err != nil { + if err := proof.Path[i].UnmarshalProto(pbNode); err != nil { return err } } @@ -246,7 +240,7 @@ type KeyValue struct { Value []byte } -// A proof that a given set of key-value pairs are in a trie. +// RangeProof is a proof that a given set of key-value pairs are in a trie. type RangeProof struct { // Invariant: At least one of [StartProof], [EndProof], [KeyValues] is non-empty. @@ -287,6 +281,7 @@ func (proof *RangeProof) Verify( start maybe.Maybe[[]byte], end maybe.Maybe[[]byte], expectedRootID ids.ID, + tokenSize int, ) error { switch { case start.HasValue() && end.HasValue() && bytes.Compare(start.Value(), end.Value()) > 0: @@ -301,56 +296,44 @@ func (proof *RangeProof) Verify( return ErrNoEndProof } - // determine branch factor based on proof paths - var branchFactor BranchFactor - if len(proof.StartProof) > 0 { - branchFactor = proof.StartProof[0].Key.branchFactor - } else { - // safe because invariants prevent both start proof and end proof from being empty at the same time - branchFactor = proof.EndProof[0].Key.branchFactor - } - // Make sure the key-value pairs are sorted and in [start, end]. if err := verifyKeyValues(proof.KeyValues, start, end); err != nil { return err } // [proof] allegedly provides and proves all key-value - // pairs in [smallestProvenPath, largestProvenPath]. - // If [smallestProvenPath] is Nothing, [proof] should - // provide and prove all keys < [largestProvenPath]. - // If [largestProvenPath] is Nothing, [proof] should - // provide and prove all keys > [smallestProvenPath]. + // pairs in [smallestProvenKey, largestProvenKey]. + // If [smallestProvenKey] is Nothing, [proof] should + // provide and prove all keys < [largestProvenKey]. + // If [largestProvenKey] is Nothing, [proof] should + // provide and prove all keys > [smallestProvenKey]. // If both are Nothing, [proof] should prove the entire trie. - smallestProvenPath := maybe.Bind(start, func(b []byte) Key { - return ToKey(b, branchFactor) - }) + smallestProvenKey := maybe.Bind(start, ToKey) + + largestProvenKey := maybe.Bind(end, ToKey) - largestProvenPath := maybe.Bind(end, func(b []byte) Key { - return ToKey(b, branchFactor) - }) if len(proof.KeyValues) > 0 { // If [proof] has key-value pairs, we should insert children - // greater than [largestProvenPath] to ancestors of the node containing - // [largestProvenPath] so that we get the expected root ID. - largestProvenPath = maybe.Some(ToKey(proof.KeyValues[len(proof.KeyValues)-1].Key, branchFactor)) + // greater than [largestProvenKey] to ancestors of the node containing + // [largestProvenKey] so that we get the expected root ID. + largestProvenKey = maybe.Some(ToKey(proof.KeyValues[len(proof.KeyValues)-1].Key)) } // The key-value pairs (allegedly) proven by [proof]. keyValues := make(map[Key][]byte, len(proof.KeyValues)) for _, keyValue := range proof.KeyValues { - keyValues[ToKey(keyValue.Key, branchFactor)] = keyValue.Value + keyValues[ToKey(keyValue.Key)] = keyValue.Value } // Ensure that the start proof is valid and contains values that // match the key/values that were sent. - if err := verifyProofPath(proof.StartProof, smallestProvenPath); err != nil { + if err := verifyProofPath(proof.StartProof, smallestProvenKey); err != nil { return err } if err := verifyAllRangeProofKeyValuesPresent( proof.StartProof, - smallestProvenPath, - largestProvenPath, + smallestProvenKey, + largestProvenKey, keyValues, ); err != nil { return err @@ -358,13 +341,13 @@ func (proof *RangeProof) Verify( // Ensure that the end proof is valid and contains values that // match the key/values that were sent. - if err := verifyProofPath(proof.EndProof, largestProvenPath); err != nil { + if err := verifyProofPath(proof.EndProof, largestProvenKey); err != nil { return err } if err := verifyAllRangeProofKeyValuesPresent( proof.EndProof, - smallestProvenPath, - largestProvenPath, + smallestProvenKey, + largestProvenKey, keyValues, ); err != nil { return err @@ -380,30 +363,30 @@ func (proof *RangeProof) Verify( } // Don't need to lock [view] because nobody else has a reference to it. - view, err := getStandaloneTrieView(ctx, ops, branchFactor) + view, err := getStandaloneTrieView(ctx, ops, tokenSize) if err != nil { return err } // For all the nodes along the edges of the proof, insert children - // < [smallestProvenPath] and > [largestProvenPath] + // < [smallestProvenKey] and > [largestProvenKey] // into the trie so that we get the expected root ID (if this proof is valid). - // By inserting all children < [smallestProvenPath], we prove that there are no keys - // > [smallestProvenPath] but less than the first key given. + // By inserting all children < [smallestProvenKey], we prove that there are no keys + // > [smallestProvenKey] but less than the first key given. // That is, the peer who gave us this proof is not omitting nodes. if err := addPathInfo( view, proof.StartProof, - smallestProvenPath, - largestProvenPath, + smallestProvenKey, + largestProvenKey, ); err != nil { return err } if err := addPathInfo( view, proof.EndProof, - smallestProvenPath, - largestProvenPath, + smallestProvenKey, + largestProvenKey, ); err != nil { return err } @@ -444,21 +427,21 @@ func (proof *RangeProof) ToProto() *pb.RangeProof { } } -func (proof *RangeProof) UnmarshalProto(pbProof *pb.RangeProof, bf BranchFactor) error { +func (proof *RangeProof) UnmarshalProto(pbProof *pb.RangeProof) error { if pbProof == nil { return ErrNilRangeProof } proof.StartProof = make([]ProofNode, len(pbProof.StartProof)) for i, protoNode := range pbProof.StartProof { - if err := proof.StartProof[i].UnmarshalProto(protoNode, bf); err != nil { + if err := proof.StartProof[i].UnmarshalProto(protoNode); err != nil { return err } } proof.EndProof = make([]ProofNode, len(pbProof.EndProof)) for i, protoNode := range pbProof.EndProof { - if err := proof.EndProof[i].UnmarshalProto(protoNode, bf); err != nil { + if err := proof.EndProof[i].UnmarshalProto(protoNode); err != nil { return err } } @@ -479,13 +462,13 @@ func (proof *RangeProof) UnmarshalProto(pbProof *pb.RangeProof, bf BranchFactor) func verifyAllRangeProofKeyValuesPresent(proof []ProofNode, start maybe.Maybe[Key], end maybe.Maybe[Key], keysValues map[Key][]byte) error { for i := 0; i < len(proof); i++ { var ( - node = proof[i] - nodePath = node.Key + node = proof[i] + nodeKey = node.Key ) // Skip keys that cannot have a value (enforced by [verifyProofPath]). - if !nodePath.hasPartialByte() && (start.IsNothing() || !nodePath.Less(start.Value())) && (end.IsNothing() || !nodePath.Greater(end.Value())) { - value, ok := keysValues[nodePath] + if !nodeKey.hasPartialByte() && (start.IsNothing() || !nodeKey.Less(start.Value())) && (end.IsNothing() || !nodeKey.Greater(end.Value())) { + value, ok := keysValues[nodeKey] if !ok && node.ValueOrHash.HasValue() { // We didn't get a key-value pair for this key, but the proof node has a value. return ErrProofNodeHasUnincludedValue @@ -505,7 +488,7 @@ type KeyChange struct { Value maybe.Maybe[[]byte] } -// A change proof proves that a set of key-value changes occurred +// ChangeProof proves that a set of key-value changes occurred // between two trie roots, where each key-value pair's key is // between some lower and upper bound (inclusive). type ChangeProof struct { @@ -596,21 +579,21 @@ func (proof *ChangeProof) ToProto() *pb.ChangeProof { } } -func (proof *ChangeProof) UnmarshalProto(pbProof *pb.ChangeProof, bf BranchFactor) error { +func (proof *ChangeProof) UnmarshalProto(pbProof *pb.ChangeProof) error { if pbProof == nil { return ErrNilChangeProof } proof.StartProof = make([]ProofNode, len(pbProof.StartProof)) for i, protoNode := range pbProof.StartProof { - if err := proof.StartProof[i].UnmarshalProto(protoNode, bf); err != nil { + if err := proof.StartProof[i].UnmarshalProto(protoNode); err != nil { return err } } proof.EndProof = make([]ProofNode, len(pbProof.EndProof)) for i, protoNode := range pbProof.EndProof { - if err := proof.EndProof[i].UnmarshalProto(protoNode, bf); err != nil { + if err := proof.EndProof[i].UnmarshalProto(protoNode); err != nil { return err } } @@ -639,8 +622,8 @@ func (proof *ChangeProof) UnmarshalProto(pbProof *pb.ChangeProof, bf BranchFacto } // Verifies that all values present in the [proof]: -// - Are nothing when deleted, not in the db, or the node has path partial byte length -// - if the node's path is within the key range, that has a value that matches the value passed in the change list or in the db +// - Are nothing when deleted, not in the db, or the node has key partial byte length +// - if the node's key is within the key range, that has a value that matches the value passed in the change list or in the db func verifyAllChangeProofKeyValuesPresent( ctx context.Context, db MerkleDB, @@ -651,19 +634,19 @@ func verifyAllChangeProofKeyValuesPresent( ) error { for i := 0; i < len(proof); i++ { var ( - node = proof[i] - nodePath = node.Key + node = proof[i] + nodeKey = node.Key ) // Check the value of any node with a key that is within the range. // Skip keys that cannot have a value (enforced by [verifyProofPath]). - if !nodePath.hasPartialByte() && (start.IsNothing() || !nodePath.Less(start.Value())) && (end.IsNothing() || !nodePath.Greater(end.Value())) { - value, ok := keysValues[nodePath] + if !nodeKey.hasPartialByte() && (start.IsNothing() || !nodeKey.Less(start.Value())) && (end.IsNothing() || !nodeKey.Greater(end.Value())) { + value, ok := keysValues[nodeKey] if !ok { // This value isn't in the list of key-value pairs we got. - dbValue, err := db.GetValue(ctx, nodePath.Bytes()) + dbValue, err := db.GetValue(ctx, nodeKey.Bytes()) if err != nil { - if err != database.ErrNotFound { + if !errors.Is(err, database.ErrNotFound) { return err } // This key isn't in the database so proof node should have Nothing. @@ -686,7 +669,7 @@ func (proof *ChangeProof) Empty() bool { len(proof.StartProof) == 0 && len(proof.EndProof) == 0 } -// Exactly one of [ChangeProof] or [RangeProof] is non-nil. +// ChangeOrRangeProof has exactly one of [ChangeProof] or [RangeProof] is non-nil. type ChangeOrRangeProof struct { ChangeProof *ChangeProof RangeProof *RangeProof @@ -754,10 +737,8 @@ func verifyProofPath(proof []ProofNode, key maybe.Maybe[Key]) error { // loop over all but the last node since it will not have the prefix in exclusion proofs for i := 0; i < len(proof)-1; i++ { - nodeKey := proof[i].Key - if key.HasValue() && nodeKey.branchFactor != key.Value().branchFactor { - return ErrInconsistentBranchFactor - } + currentProofNode := proof[i] + nodeKey := currentProofNode.Key // Because the interface only support []byte keys, // a key with a partial byte should store a value @@ -770,11 +751,8 @@ func verifyProofPath(proof []ProofNode, key maybe.Maybe[Key]) error { return ErrProofNodeNotForKey } - // each node should have a key that has a matching BranchFactor and is a prefix of the next node's key + // each node should have a key that has a matching TokenConfig and is a prefix of the next node's key nextKey := proof[i+1].Key - if nextKey.branchFactor != nodeKey.branchFactor { - return ErrInconsistentBranchFactor - } if !nextKey.HasStrictPrefix(nodeKey) { return ErrNonIncreasingProofNodes } @@ -857,21 +835,21 @@ func addPathInfo( // Add [proofNode]'s children which are outside the range // [insertChildrenLessThan, insertChildrenGreaterThan]. - compressedPath := emptyKey(key.branchFactor) + compressedKey := Key{} for index, childID := range proofNode.Children { if existingChild, ok := n.children[index]; ok { - compressedPath = existingChild.compressedKey + compressedKey = existingChild.compressedKey } - childPath := key.AppendExtend(index, compressedPath) - if (shouldInsertLeftChildren && childPath.Less(insertChildrenLessThan.Value())) || - (shouldInsertRightChildren && childPath.Greater(insertChildrenGreaterThan.Value())) { + childKey := key.Extend(ToToken(index, t.tokenSize), compressedKey) + if (shouldInsertLeftChildren && childKey.Less(insertChildrenLessThan.Value())) || + (shouldInsertRightChildren && childKey.Greater(insertChildrenGreaterThan.Value())) { // We didn't set the other values on the child entry, but it doesn't matter. // We only need the IDs to be correct so that the calculated hash is correct. n.setChildEntry( index, child{ id: childID, - compressedKey: compressedPath, + compressedKey: compressedKey, }) } } @@ -881,7 +859,7 @@ func addPathInfo( } // getStandaloneTrieView returns a new view that has nothing in it besides the changes due to [ops] -func getStandaloneTrieView(ctx context.Context, ops []database.BatchOp, factor BranchFactor) (*trieView, error) { +func getStandaloneTrieView(ctx context.Context, ops []database.BatchOp, size int) (*trieView, error) { db, err := newDatabase( ctx, memdb.New(), @@ -890,7 +868,7 @@ func getStandaloneTrieView(ctx context.Context, ops []database.BatchOp, factor B Tracer: trace.Noop, ValueNodeCacheSize: verificationCacheSize, IntermediateNodeCacheSize: verificationCacheSize, - BranchFactor: factor, + BranchFactor: tokenSizeToBranchFactor[size], }, &mockMetrics{}, ) diff --git a/x/merkledb/proof_test.go b/x/merkledb/proof_test.go index bf9d9da18996..b22b80ffd09d 100644 --- a/x/merkledb/proof_test.go +++ b/x/merkledb/proof_test.go @@ -23,7 +23,7 @@ import ( func Test_Proof_Empty(t *testing.T) { proof := &Proof{} - err := proof.Verify(context.Background(), ids.Empty) + err := proof.Verify(context.Background(), ids.Empty, 4) require.ErrorIs(t, err, ErrNoProof) } @@ -43,7 +43,7 @@ func Test_Proof_Simple(t *testing.T) { proof, err := db.GetProof(ctx, []byte{}) require.NoError(err) - require.NoError(proof.Verify(ctx, expectedRoot)) + require.NoError(proof.Verify(ctx, expectedRoot, 4)) } func Test_Proof_Verify_Bad_Data(t *testing.T) { @@ -60,9 +60,9 @@ func Test_Proof_Verify_Bad_Data(t *testing.T) { expectedErr: nil, }, { - name: "odd length key with value", + name: "odd length key path with value", malform: func(proof *Proof) { - proof.Path[1].ValueOrHash = maybe.Some([]byte{1, 2}) + proof.Path[0].ValueOrHash = maybe.Some([]byte{1, 2}) }, expectedErr: ErrPartialByteLengthWithValue, }, @@ -112,7 +112,7 @@ func Test_Proof_Verify_Bad_Data(t *testing.T) { tt.malform(proof) - err = proof.Verify(context.Background(), db.getMerkleRoot()) + err = proof.Verify(context.Background(), db.getMerkleRoot(), 4) require.ErrorIs(err, tt.expectedErr) }) } @@ -150,7 +150,8 @@ func Test_RangeProof_Extra_Value(t *testing.T) { context.Background(), maybe.Some([]byte{1}), maybe.Some([]byte{5, 5}), - db.root.id, + db.getMerkleRoot(), + db.tokenSize, )) proof.KeyValues = append(proof.KeyValues, KeyValue{Key: []byte{5}, Value: []byte{5}}) @@ -159,7 +160,8 @@ func Test_RangeProof_Extra_Value(t *testing.T) { context.Background(), maybe.Some([]byte{1}), maybe.Some([]byte{5, 5}), - db.root.id, + db.getMerkleRoot(), + db.tokenSize, ) require.ErrorIs(err, ErrInvalidProof) } @@ -185,9 +187,9 @@ func Test_RangeProof_Verify_Bad_Data(t *testing.T) { expectedErr: ErrProofValueDoesntMatch, }, { - name: "EndProof: odd length key with value", + name: "EndProof: odd length key path with value", malform: func(proof *RangeProof) { - proof.EndProof[1].ValueOrHash = maybe.Some([]byte{1, 2}) + proof.EndProof[0].ValueOrHash = maybe.Some([]byte{1, 2}) }, expectedErr: ErrPartialByteLengthWithValue, }, @@ -221,7 +223,7 @@ func Test_RangeProof_Verify_Bad_Data(t *testing.T) { tt.malform(proof) - err = proof.Verify(context.Background(), maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), db.getMerkleRoot()) + err = proof.Verify(context.Background(), maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), db.getMerkleRoot(), db.tokenSize) require.ErrorIs(err, tt.expectedErr) }) } @@ -253,6 +255,7 @@ func Test_Proof(t *testing.T) { context.Background(), ViewChanges{ BatchOps: []database.BatchOp{ + {Key: []byte("key"), Value: []byte("value")}, {Key: []byte("key0"), Value: []byte("value0")}, {Key: []byte("key1"), Value: []byte("value1")}, {Key: []byte("key2"), Value: []byte("value2")}, @@ -271,19 +274,18 @@ func Test_Proof(t *testing.T) { require.Len(proof.Path, 3) - require.Equal(ToKey([]byte("key1"), BranchFactor16), proof.Path[2].Key) + require.Equal(ToKey([]byte("key")), proof.Path[0].Key) + require.Equal(maybe.Some([]byte("value")), proof.Path[0].ValueOrHash) + require.Equal(ToKey([]byte("key1")), proof.Path[2].Key) require.Equal(maybe.Some([]byte("value1")), proof.Path[2].ValueOrHash) - require.Equal(ToKey([]byte{}, BranchFactor16), proof.Path[0].Key) - require.True(proof.Path[0].ValueOrHash.IsNothing()) - expectedRootID, err := trie.GetMerkleRoot(context.Background()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), expectedRootID)) + require.NoError(proof.Verify(context.Background(), expectedRootID, dbTrie.tokenSize)) proof.Path[0].ValueOrHash = maybe.Some([]byte("value2")) - err = proof.Verify(context.Background(), expectedRootID) + err = proof.Verify(context.Background(), expectedRootID, dbTrie.tokenSize) require.ErrorIs(err, ErrInvalidProof) } @@ -357,7 +359,7 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { {Key: []byte{1}, Value: []byte{1}}, {Key: []byte{0}, Value: []byte{0}}, }, - EndProof: []ProofNode{{Key: emptyKey(BranchFactor16)}}, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrNonIncreasingValues, }, @@ -369,7 +371,7 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { KeyValues: []KeyValue{ {Key: []byte{0}, Value: []byte{0}}, }, - EndProof: []ProofNode{{Key: emptyKey(BranchFactor16)}}, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrStateFromOutsideOfRange, }, @@ -381,7 +383,7 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { KeyValues: []KeyValue{ {Key: []byte{2}, Value: []byte{0}}, }, - EndProof: []ProofNode{{Key: emptyKey(BranchFactor16)}}, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrStateFromOutsideOfRange, }, @@ -395,13 +397,13 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { }, StartProof: []ProofNode{ { - Key: ToKey([]byte{2}, BranchFactor16), + Key: ToKey([]byte{2}), }, { - Key: ToKey([]byte{1}, BranchFactor16), + Key: ToKey([]byte{1}), }, }, - EndProof: []ProofNode{{Key: emptyKey(BranchFactor16)}}, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrProofNodeNotForKey, }, @@ -415,16 +417,16 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { }, StartProof: []ProofNode{ { - Key: ToKey([]byte{1}, BranchFactor16), + Key: ToKey([]byte{1}), }, { - Key: ToKey([]byte{1, 2, 3}, BranchFactor16), // Not a prefix of [1, 2] + Key: ToKey([]byte{1, 2, 3}), // Not a prefix of [1, 2] }, { - Key: ToKey([]byte{1, 2, 3, 4}, BranchFactor16), + Key: ToKey([]byte{1, 2, 3, 4}), }, }, - EndProof: []ProofNode{{Key: emptyKey(BranchFactor16)}}, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrProofNodeNotForKey, }, @@ -438,39 +440,15 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { }, EndProof: []ProofNode{ { - Key: ToKey([]byte{2}, BranchFactor16), + Key: ToKey([]byte{2}), }, { - Key: ToKey([]byte{1}, BranchFactor16), + Key: ToKey([]byte{1}), }, }, }, expectedErr: ErrProofNodeNotForKey, }, - { - name: "inconsistent branching factor", - start: maybe.Some([]byte{1, 2}), - end: maybe.Some([]byte{1, 2}), - proof: &RangeProof{ - StartProof: []ProofNode{ - { - Key: ToKey([]byte{1}, BranchFactor16), - }, - { - Key: ToKey([]byte{1, 2}, BranchFactor16), - }, - }, - EndProof: []ProofNode{ - { - Key: ToKey([]byte{1}, BranchFactor4), - }, - { - Key: ToKey([]byte{1, 2}, BranchFactor4), - }, - }, - }, - expectedErr: ErrInconsistentBranchFactor, - }, { name: "end proof has node for wrong key", start: maybe.Nothing[[]byte](), @@ -481,13 +459,13 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { }, EndProof: []ProofNode{ { - Key: ToKey([]byte{1}, BranchFactor16), + Key: ToKey([]byte{1}), }, { - Key: ToKey([]byte{1, 2, 3}, BranchFactor16), // Not a prefix of [1, 2] + Key: ToKey([]byte{1, 2, 3}), // Not a prefix of [1, 2] }, { - Key: ToKey([]byte{1, 2, 3, 4}, BranchFactor16), + Key: ToKey([]byte{1, 2, 3, 4}), }, }, }, @@ -497,7 +475,7 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.proof.Verify(context.Background(), tt.start, tt.end, ids.Empty) + err := tt.proof.Verify(context.Background(), tt.start, tt.end, ids.Empty, 4) require.ErrorIs(t, err, tt.expectedErr) }) } @@ -523,9 +501,8 @@ func Test_RangeProof(t *testing.T) { require.Equal([]byte{2}, proof.KeyValues[1].Value) require.Equal([]byte{3}, proof.KeyValues[2].Value) - require.Nil(proof.EndProof[0].Key.Bytes()) - require.Equal([]byte{0}, proof.EndProof[1].Key.Bytes()) - require.Equal([]byte{3}, proof.EndProof[2].Key.Bytes()) + require.Equal([]byte{0}, proof.EndProof[0].Key.Bytes()) + require.Equal([]byte{3}, proof.EndProof[1].Key.Bytes()) // only a single node here since others are duplicates in endproof require.Equal([]byte{1}, proof.StartProof[0].Key.Bytes()) @@ -534,7 +511,8 @@ func Test_RangeProof(t *testing.T) { context.Background(), maybe.Some([]byte{1}), maybe.Some([]byte{3, 5}), - db.root.id, + db.getMerkleRoot(), + db.tokenSize, )) } @@ -578,15 +556,15 @@ func Test_RangeProof_NilStart(t *testing.T) { require.Equal([]byte("value1"), proof.KeyValues[0].Value) require.Equal([]byte("value2"), proof.KeyValues[1].Value) - require.Equal(ToKey([]byte("key2"), BranchFactor16), proof.EndProof[2].Key, BranchFactor16) - require.Equal(ToKey([]byte("key2"), BranchFactor16).Take(7), proof.EndProof[1].Key) - require.Equal(ToKey([]byte(""), BranchFactor16), proof.EndProof[0].Key, BranchFactor16) + require.Equal(ToKey([]byte("key2")), proof.EndProof[1].Key) + require.Equal(ToKey([]byte("key2")).Take(28), proof.EndProof[0].Key) require.NoError(proof.Verify( context.Background(), maybe.Nothing[[]byte](), maybe.Some([]byte("key35")), - db.root.id, + db.getMerkleRoot(), + db.tokenSize, )) } @@ -612,15 +590,15 @@ func Test_RangeProof_NilEnd(t *testing.T) { require.Equal([]byte{1}, proof.StartProof[0].Key.Bytes()) - require.Nil(proof.EndProof[0].Key.Bytes()) - require.Equal([]byte{0}, proof.EndProof[1].Key.Bytes()) - require.Equal([]byte{2}, proof.EndProof[2].Key.Bytes()) + require.Equal([]byte{0}, proof.EndProof[0].Key.Bytes()) + require.Equal([]byte{2}, proof.EndProof[1].Key.Bytes()) require.NoError(proof.Verify( context.Background(), maybe.Some([]byte{1}), maybe.Nothing[[]byte](), - db.root.id, + db.getMerkleRoot(), + db.tokenSize, )) } @@ -652,17 +630,18 @@ func Test_RangeProof_EmptyValues(t *testing.T) { require.Empty(proof.KeyValues[2].Value) require.Len(proof.StartProof, 1) - require.Equal(ToKey([]byte("key1"), BranchFactor16), proof.StartProof[0].Key, BranchFactor16) + require.Equal(ToKey([]byte("key1")), proof.StartProof[0].Key) - require.Len(proof.EndProof, 3) - require.Equal(ToKey([]byte("key2"), BranchFactor16), proof.EndProof[2].Key, BranchFactor16) - require.Equal(ToKey([]byte{}, BranchFactor16), proof.EndProof[0].Key, BranchFactor16) + require.Len(proof.EndProof, 2) + require.Equal(ToKey([]byte("key2")), proof.EndProof[1].Key) + require.Equal(ToKey([]byte("key2")).Take(28), proof.EndProof[0].Key) require.NoError(proof.Verify( context.Background(), maybe.Some([]byte("key1")), maybe.Some([]byte("key2")), - db.root.id, + db.getMerkleRoot(), + db.tokenSize, )) } @@ -797,7 +776,7 @@ func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { { name: "odd length key path with value", malform: func(proof *ChangeProof) { - proof.EndProof[1].ValueOrHash = maybe.Some([]byte{1, 2}) + proof.EndProof[0].ValueOrHash = maybe.Some([]byte{1, 2}) }, expectedErr: ErrPartialByteLengthWithValue, }, @@ -942,8 +921,8 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { name: "start proof node has wrong prefix", proof: &ChangeProof{ StartProof: []ProofNode{ - {Key: ToKey([]byte{2}, BranchFactor16)}, - {Key: ToKey([]byte{2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{2})}, + {Key: ToKey([]byte{2, 3})}, }, }, start: maybe.Some([]byte{1, 2, 3}), @@ -954,8 +933,8 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { name: "start proof non-increasing", proof: &ChangeProof{ StartProof: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{2, 3})}, }, }, start: maybe.Some([]byte{1, 2, 3}), @@ -969,8 +948,8 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { {Key: []byte{1, 2}, Value: maybe.Some([]byte{0})}, }, EndProof: []ProofNode{ - {Key: ToKey([]byte{2}, BranchFactor16)}, - {Key: ToKey([]byte{2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{2})}, + {Key: ToKey([]byte{2, 3})}, }, }, start: maybe.Nothing[[]byte](), @@ -984,8 +963,8 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { {Key: []byte{1, 2, 3}}, }, EndProof: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{2, 3})}, }, }, start: maybe.Nothing[[]byte](), @@ -1100,119 +1079,118 @@ func TestVerifyProofPath(t *testing.T) { }, { name: "1 element", - path: []ProofNode{{Key: ToKey([]byte{1}, BranchFactor16)}}, + path: []ProofNode{{Key: ToKey([]byte{1})}}, proofKey: maybe.Nothing[Key](), expectedErr: nil, }, { name: "non-increasing keys", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrNonIncreasingProofNodes, }, { name: "invalid key", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 4}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 4})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrProofNodeNotForKey, }, { name: "extra node inclusion proof", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2})), expectedErr: ErrProofNodeNotForKey, }, { name: "extra node exclusion proof", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 3}, BranchFactor16)}, - {Key: ToKey([]byte{1, 3, 4}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 3})}, + {Key: ToKey([]byte{1, 3, 4})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2})), expectedErr: ErrProofNodeNotForKey, }, { name: "happy path exclusion proof", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 4}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 4})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: nil, }, { name: "happy path inclusion proof", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: nil, }, { name: "repeat nodes", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrNonIncreasingProofNodes, }, { name: "repeat nodes 2", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrNonIncreasingProofNodes, }, { name: "repeat nodes 3", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2, 3}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrProofNodeNotForKey, }, { name: "oddLength key with value", path: []ProofNode{ - {Key: ToKey([]byte{1}, BranchFactor16)}, - {Key: ToKey([]byte{1, 2}, BranchFactor16)}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, { Key: Key{ - value: string([]byte{1, 2, 240}), - tokenLength: 5, - tokenConfig: branchFactorToTokenConfig[BranchFactor16], + value: string([]byte{1, 2, 240}), + length: 20, }, ValueOrHash: maybe.Some([]byte{1}), }, }, - proofKey: maybe.Some(ToKey([]byte{1, 2, 3}, BranchFactor16)), + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrPartialByteLengthWithValue, }, } @@ -1240,7 +1218,7 @@ func TestProofNodeUnmarshalProtoInvalidMaybe(t *testing.T) { } var unmarshaledNode ProofNode - err := unmarshaledNode.UnmarshalProto(protoNode, BranchFactor16) + err := unmarshaledNode.UnmarshalProto(protoNode) require.ErrorIs(t, err, ErrInvalidMaybe) } @@ -1257,7 +1235,7 @@ func TestProofNodeUnmarshalProtoInvalidChildBytes(t *testing.T) { } var unmarshaledNode ProofNode - err := unmarshaledNode.UnmarshalProto(protoNode, BranchFactor16) + err := unmarshaledNode.UnmarshalProto(protoNode) require.ErrorIs(t, err, hashing.ErrInvalidHashLen) } @@ -1270,11 +1248,11 @@ func TestProofNodeUnmarshalProtoInvalidChildIndex(t *testing.T) { protoNode := node.ToProto() childID := ids.GenerateTestID() - protoNode.Children[uint32(BranchFactor16)] = childID[:] + protoNode.Children[256] = childID[:] var unmarshaledNode ProofNode - err := unmarshaledNode.UnmarshalProto(protoNode, BranchFactor16) - require.ErrorIs(t, err, ErrInvalidChildIndex) + err := unmarshaledNode.UnmarshalProto(protoNode) + require.ErrorIs(t, err, errChildIndexTooLarge) } func TestProofNodeUnmarshalProtoMissingFields(t *testing.T) { @@ -1321,7 +1299,7 @@ func TestProofNodeUnmarshalProtoMissingFields(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var node ProofNode - err := node.UnmarshalProto(tt.nodeFunc(), BranchFactor16) + err := node.UnmarshalProto(tt.nodeFunc()) require.ErrorIs(t, err, tt.expectedErr) }) } @@ -1340,7 +1318,7 @@ func FuzzProofNodeProtoMarshalUnmarshal(f *testing.F) { // Assert the unmarshaled one is the same as the original. protoNode := node.ToProto() var unmarshaledNode ProofNode - require.NoError(unmarshaledNode.UnmarshalProto(protoNode, BranchFactor16)) + require.NoError(unmarshaledNode.UnmarshalProto(protoNode)) require.Equal(node, unmarshaledNode) // Marshaling again should yield same result. @@ -1397,7 +1375,7 @@ func FuzzRangeProofProtoMarshalUnmarshal(f *testing.F) { // Assert the unmarshaled one is the same as the original. var unmarshaledProof RangeProof protoProof := proof.ToProto() - require.NoError(unmarshaledProof.UnmarshalProto(protoProof, BranchFactor16)) + require.NoError(unmarshaledProof.UnmarshalProto(protoProof)) require.Equal(proof, unmarshaledProof) // Marshaling again should yield same result. @@ -1459,7 +1437,7 @@ func FuzzChangeProofProtoMarshalUnmarshal(f *testing.F) { // Assert the unmarshaled one is the same as the original. var unmarshaledProof ChangeProof protoProof := proof.ToProto() - require.NoError(unmarshaledProof.UnmarshalProto(protoProof, BranchFactor16)) + require.NoError(unmarshaledProof.UnmarshalProto(protoProof)) require.Equal(proof, unmarshaledProof) // Marshaling again should yield same result. @@ -1470,7 +1448,7 @@ func FuzzChangeProofProtoMarshalUnmarshal(f *testing.F) { func TestChangeProofUnmarshalProtoNil(t *testing.T) { var proof ChangeProof - err := proof.UnmarshalProto(nil, BranchFactor16) + err := proof.UnmarshalProto(nil) require.ErrorIs(t, err, ErrNilChangeProof) } @@ -1524,7 +1502,7 @@ func TestChangeProofUnmarshalProtoNilValue(t *testing.T) { protoProof.KeyChanges[0].Value = nil var unmarshaledProof ChangeProof - err := unmarshaledProof.UnmarshalProto(protoProof, BranchFactor16) + err := unmarshaledProof.UnmarshalProto(protoProof) require.ErrorIs(t, err, ErrNilMaybeBytes) } @@ -1542,7 +1520,7 @@ func TestChangeProofUnmarshalProtoInvalidMaybe(t *testing.T) { } var proof ChangeProof - err := proof.UnmarshalProto(protoProof, BranchFactor16) + err := proof.UnmarshalProto(protoProof) require.ErrorIs(t, err, ErrInvalidMaybe) } @@ -1575,7 +1553,7 @@ func FuzzProofProtoMarshalUnmarshal(f *testing.F) { } proof := Proof{ - Key: ToKey(key, BranchFactor16), + Key: ToKey(key), Value: value, Path: proofPath, } @@ -1584,7 +1562,7 @@ func FuzzProofProtoMarshalUnmarshal(f *testing.F) { // Assert the unmarshaled one is the same as the original. var unmarshaledProof Proof protoProof := proof.ToProto() - require.NoError(unmarshaledProof.UnmarshalProto(protoProof, BranchFactor16)) + require.NoError(unmarshaledProof.UnmarshalProto(protoProof)) require.Equal(proof, unmarshaledProof) // Marshaling again should yield same result. @@ -1626,7 +1604,7 @@ func TestProofProtoUnmarshal(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var proof Proof - err := proof.UnmarshalProto(tt.proof, BranchFactor16) + err := proof.UnmarshalProto(tt.proof) require.ErrorIs(t, err, tt.expectedErr) }) } @@ -1694,6 +1672,7 @@ func FuzzRangeProofInvariants(f *testing.F) { start, end, rootID, + db.tokenSize, )) // Make sure the start proof doesn't contain any nodes @@ -1732,14 +1711,14 @@ func FuzzRangeProofInvariants(f *testing.F) { proof := Proof{ Path: rangeProof.EndProof, - Key: ToKey(endBytes, BranchFactor16), + Key: ToKey(endBytes), Value: value, } rootID, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), rootID)) + require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize)) default: require.NotEmpty(rangeProof.EndProof) @@ -1747,14 +1726,14 @@ func FuzzRangeProofInvariants(f *testing.F) { // EndProof should be a proof for largest key-value. proof := Proof{ Path: rangeProof.EndProof, - Key: ToKey(greatestKV.Key, BranchFactor16), + Key: ToKey(greatestKV.Key), Value: maybe.Some(greatestKV.Value), } rootID, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), rootID)) + require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize)) } }) } @@ -1790,7 +1769,7 @@ func FuzzProofVerification(f *testing.F) { rootID, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), rootID)) + require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize)) // Insert a new key-value pair newKey := make([]byte, 32) diff --git a/x/merkledb/trie_test.go b/x/merkledb/trie_test.go index 7908c1266af7..a431dd6b254d 100644 --- a/x/merkledb/trie_test.go +++ b/x/merkledb/trie_test.go @@ -19,10 +19,6 @@ import ( ) func getNodeValue(t ReadOnlyTrie, key string) ([]byte, error) { - return getNodeValueWithBranchFactor(t, key, BranchFactor16) -} - -func getNodeValueWithBranchFactor(t ReadOnlyTrie, key string, bf BranchFactor) ([]byte, error) { var view *trieView if asTrieView, ok := t.(*trieView); ok { if err := asTrieView.calculateNodeIDs(context.Background()); err != nil { @@ -38,7 +34,7 @@ func getNodeValueWithBranchFactor(t ReadOnlyTrie, key string, bf BranchFactor) ( view = dbView.(*trieView) } - path := ToKey([]byte(key), bf) + path := ToKey([]byte(key)) var result *node err := view.visitPathToKey(path, func(n *node) error { result = n @@ -47,7 +43,7 @@ func getNodeValueWithBranchFactor(t ReadOnlyTrie, key string, bf BranchFactor) ( if err != nil { return nil, err } - if result.key != path || result == nil { + if result == nil || result.key != path { return nil, database.ErrNotFound } @@ -123,14 +119,14 @@ func TestTrieViewVisitPathToKey(t *testing.T) { trie := trieIntf.(*trieView) var nodePath []*node - require.NoError(trie.visitPathToKey(ToKey(nil, BranchFactor16), func(n *node) error { + require.NoError(trie.visitPathToKey(ToKey(nil), func(n *node) error { nodePath = append(nodePath, n) return nil })) // Just the root require.Len(nodePath, 1) - require.Equal(trie.root, nodePath[0]) + require.Equal(trie.sentinelNode, nodePath[0]) // Insert a key key1 := []byte{0} @@ -148,15 +144,16 @@ func TestTrieViewVisitPathToKey(t *testing.T) { require.NoError(trie.calculateNodeIDs(context.Background())) nodePath = make([]*node, 0, 2) - require.NoError(trie.visitPathToKey(ToKey(key1, BranchFactor16), func(n *node) error { + require.NoError(trie.visitPathToKey(ToKey(key1), func(n *node) error { nodePath = append(nodePath, n) return nil })) // Root and 1 value require.Len(nodePath, 2) - require.Equal(trie.root, nodePath[0]) - require.Equal(ToKey(key1, BranchFactor16), nodePath[1].key) + + require.Equal(trie.sentinelNode, nodePath[0]) + require.Equal(ToKey(key1), nodePath[1].key) // Insert another key which is a child of the first key2 := []byte{0, 1} @@ -174,14 +171,15 @@ func TestTrieViewVisitPathToKey(t *testing.T) { require.NoError(trie.calculateNodeIDs(context.Background())) nodePath = make([]*node, 0, 3) - require.NoError(trie.visitPathToKey(ToKey(key2, BranchFactor16), func(n *node) error { + require.NoError(trie.visitPathToKey(ToKey(key2), func(n *node) error { nodePath = append(nodePath, n) return nil })) require.Len(nodePath, 3) - require.Equal(trie.root, nodePath[0]) - require.Equal(ToKey(key1, BranchFactor16), nodePath[1].key) - require.Equal(ToKey(key2, BranchFactor16), nodePath[2].key) + + require.Equal(trie.sentinelNode, nodePath[0]) + require.Equal(ToKey(key1), nodePath[1].key) + require.Equal(ToKey(key2), nodePath[2].key) // Insert a key which shares no prefix with the others key3 := []byte{255} @@ -199,46 +197,50 @@ func TestTrieViewVisitPathToKey(t *testing.T) { require.NoError(trie.calculateNodeIDs(context.Background())) nodePath = make([]*node, 0, 2) - require.NoError(trie.visitPathToKey(ToKey(key3, BranchFactor16), func(n *node) error { + require.NoError(trie.visitPathToKey(ToKey(key3), func(n *node) error { nodePath = append(nodePath, n) return nil })) + require.Len(nodePath, 2) - require.Equal(trie.root, nodePath[0]) - require.Equal(ToKey(key3, BranchFactor16), nodePath[1].key) + + require.Equal(trie.sentinelNode, nodePath[0]) + require.Equal(ToKey(key3), nodePath[1].key) // Other key path not affected nodePath = make([]*node, 0, 3) - require.NoError(trie.visitPathToKey(ToKey(key2, BranchFactor16), func(n *node) error { + require.NoError(trie.visitPathToKey(ToKey(key2), func(n *node) error { nodePath = append(nodePath, n) return nil })) require.Len(nodePath, 3) - require.Equal(trie.root, nodePath[0]) - require.Equal(ToKey(key1, BranchFactor16), nodePath[1].key) - require.Equal(ToKey(key2, BranchFactor16), nodePath[2].key) + + require.Equal(trie.sentinelNode, nodePath[0]) + require.Equal(ToKey(key1), nodePath[1].key) + require.Equal(ToKey(key2), nodePath[2].key) // Gets closest node when key doesn't exist key4 := []byte{0, 1, 2} nodePath = make([]*node, 0, 3) - require.NoError(trie.visitPathToKey(ToKey(key4, BranchFactor16), func(n *node) error { + require.NoError(trie.visitPathToKey(ToKey(key4), func(n *node) error { nodePath = append(nodePath, n) return nil })) + require.Len(nodePath, 3) - require.Equal(trie.root, nodePath[0]) - require.Equal(ToKey(key1, BranchFactor16), nodePath[1].key) - require.Equal(ToKey(key2, BranchFactor16), nodePath[2].key) + require.Equal(trie.sentinelNode, nodePath[0]) + require.Equal(ToKey(key1), nodePath[1].key) + require.Equal(ToKey(key2), nodePath[2].key) // Gets just root when key doesn't exist and no key shares a prefix key5 := []byte{128} nodePath = make([]*node, 0, 1) - require.NoError(trie.visitPathToKey(ToKey(key5, BranchFactor16), func(n *node) error { + require.NoError(trie.visitPathToKey(ToKey(key5), func(n *node) error { nodePath = append(nodePath, n) return nil })) require.Len(nodePath, 1) - require.Equal(trie.root, nodePath[0]) + require.Equal(trie.sentinelNode, nodePath[0]) } func Test_Trie_ViewOnCommitedView(t *testing.T) { @@ -320,7 +322,7 @@ func Test_Trie_WriteToDB(t *testing.T) { rawBytes, err := dbTrie.baseDB.Get(prefixedKey) require.NoError(err) - node, err := parseNode(ToKey(key, BranchFactor16), rawBytes) + node, err := parseNode(ToKey(key), rawBytes) require.NoError(err) require.Equal([]byte("value"), node.value.Value()) } @@ -488,7 +490,7 @@ func Test_Trie_ExpandOnKeyPath(t *testing.T) { require.Equal([]byte("value12"), value) } -func Test_Trie_CompressedPaths(t *testing.T) { +func Test_Trie_compressedKeys(t *testing.T) { require := require.New(t) dbTrie, err := getBasicDB() @@ -619,7 +621,7 @@ func Test_Trie_HashCountOnBranch(t *testing.T) { // Make sure the branch node with the common prefix was created. // Note it's only created on call to GetMerkleRoot, not in NewView. - _, err = view2.getEditableNode(ToKey(keyPrefix, BranchFactor16), false) + _, err = view2.getEditableNode(ToKey(keyPrefix), false) require.NoError(err) // only hashes the new branch node, the new child node, and root @@ -760,7 +762,7 @@ func Test_Trie_ChainDeletion(t *testing.T) { require.NoError(err) require.NoError(newTrie.(*trieView).calculateNodeIDs(context.Background())) - root, err := newTrie.getEditableNode(emptyKey(BranchFactor16), false) + root, err := newTrie.getEditableNode(Key{}, false) require.NoError(err) require.Len(root.children, 1) @@ -777,7 +779,7 @@ func Test_Trie_ChainDeletion(t *testing.T) { ) require.NoError(err) require.NoError(newTrie.(*trieView).calculateNodeIDs(context.Background())) - root, err = newTrie.getEditableNode(emptyKey(BranchFactor16), false) + root, err = newTrie.getEditableNode(Key{}, false) require.NoError(err) // since all values have been deleted, the nodes should have been cleaned up require.Empty(root.children) @@ -842,15 +844,15 @@ func Test_Trie_NodeCollapse(t *testing.T) { require.NoError(err) require.NoError(trie.(*trieView).calculateNodeIDs(context.Background())) - root, err := trie.getEditableNode(emptyKey(BranchFactor16), false) + root, err := trie.getEditableNode(Key{}, false) require.NoError(err) require.Len(root.children, 1) - root, err = trie.getEditableNode(emptyKey(BranchFactor16), false) + root, err = trie.getEditableNode(Key{}, false) require.NoError(err) require.Len(root.children, 1) - firstNode, err := trie.getEditableNode(getSingleChildKey(root), true) + firstNode, err := trie.getEditableNode(getSingleChildKey(root, dbTrie.tokenSize), true) require.NoError(err) require.Len(firstNode.children, 1) @@ -868,11 +870,11 @@ func Test_Trie_NodeCollapse(t *testing.T) { require.NoError(err) require.NoError(trie.(*trieView).calculateNodeIDs(context.Background())) - root, err = trie.getEditableNode(emptyKey(BranchFactor16), false) + root, err = trie.getEditableNode(Key{}, false) require.NoError(err) require.Len(root.children, 1) - firstNode, err = trie.getEditableNode(getSingleChildKey(root), true) + firstNode, err = trie.getEditableNode(getSingleChildKey(root, dbTrie.tokenSize), true) require.NoError(err) require.Len(firstNode.children, 2) } @@ -1215,9 +1217,9 @@ func Test_Trie_ConcurrentNewViewAndCommit(t *testing.T) { // Returns the path of the only child of this node. // Assumes this node has exactly one child. -func getSingleChildKey(n *node) Key { +func getSingleChildKey(n *node, tokenSize int) Key { for index, entry := range n.children { - return n.key.AppendExtend(index, entry.compressedKey) + return n.key.Extend(ToToken(index, tokenSize), entry.compressedKey) } return Key{} } diff --git a/x/merkledb/trieview.go b/x/merkledb/trieview.go index 3422379a20cc..622bfcb11207 100644 --- a/x/merkledb/trieview.go +++ b/x/merkledb/trieview.go @@ -35,7 +35,7 @@ var ( ErrPartialByteLengthWithValue = errors.New( "the underlying db only supports whole number of byte keys, so cannot record changes with partial byte lengths", ) - ErrGetPathToFailure = errors.New("GetPathTo failed to return the closest node") + ErrVisitPathToKey = errors.New("failed to visit expected node during insertion") ErrStartAfterEnd = errors.New("start key > end key") ErrNoValidRoot = errors.New("a valid root was not provided to the trieView constructor") ErrParentNotDatabase = errors.New("parent trie is not database") @@ -96,8 +96,11 @@ type trieView struct { db *merkleDB - // The root of the trie represented by this view. - root *node + // The nil key node + // It is either the root of the trie or the root of the trie is its single child node + sentinelNode *node + + tokenSize int } // NewView returns a new view on top of this Trie where the passed changes @@ -145,19 +148,20 @@ func newTrieView( parentTrie TrieView, changes ViewChanges, ) (*trieView, error) { - root, err := parentTrie.getEditableNode(db.rootKey, false /* hasValue */) + sentinelNode, err := parentTrie.getEditableNode(Key{}, false /* hasValue */) if err != nil { - if err == database.ErrNotFound { + if errors.Is(err, database.ErrNotFound) { return nil, ErrNoValidRoot } return nil, err } newView := &trieView{ - root: root, - db: db, - parentTrie: parentTrie, - changes: newChangeSummary(len(changes.BatchOps) + len(changes.MapOps)), + sentinelNode: sentinelNode, + db: db, + parentTrie: parentTrie, + changes: newChangeSummary(len(changes.BatchOps) + len(changes.MapOps)), + tokenSize: db.tokenSize, } for _, op := range changes.BatchOps { @@ -173,7 +177,7 @@ func newTrieView( newVal = maybe.Some(slices.Clone(op.Value)) } } - if err := newView.recordValueChange(db.toKey(key), newVal); err != nil { + if err := newView.recordValueChange(toKey(key), newVal); err != nil { return nil, err } } @@ -181,7 +185,7 @@ func newTrieView( if !changes.ConsumeBytes { val = maybe.Bind(val, slices.Clone[[]byte]) } - if err := newView.recordValueChange(db.toKey(stringToByteSlice(key)), val); err != nil { + if err := newView.recordValueChange(toKey(stringToByteSlice(key)), val); err != nil { return nil, err } } @@ -197,16 +201,17 @@ func newHistoricalTrieView( return nil, ErrNoValidRoot } - passedRootChange, ok := changes.nodes[db.rootKey] + passedSentinelChange, ok := changes.nodes[Key{}] if !ok { return nil, ErrNoValidRoot } newView := &trieView{ - root: passedRootChange.after, - db: db, - parentTrie: db, - changes: changes, + sentinelNode: passedSentinelChange.after, + db: db, + parentTrie: db, + changes: changes, + tokenSize: db.tokenSize, } // since this is a set of historical changes, all nodes have already been calculated // since no new changes have occurred, no new calculations need to be done @@ -246,9 +251,9 @@ func (t *trieView) calculateNodeIDs(ctx context.Context) error { } _ = t.db.calculateNodeIDsSema.Acquire(context.Background(), 1) - t.calculateNodeIDsHelper(t.root) + t.calculateNodeIDsHelper(t.sentinelNode) t.db.calculateNodeIDsSema.Release(1) - t.changes.rootID = t.root.id + t.changes.rootID = t.getMerkleRoot() // ensure no ancestor changes occurred during execution if t.isInvalid() { @@ -269,8 +274,8 @@ func (t *trieView) calculateNodeIDsHelper(n *node) { ) for childIndex, child := range n.children { - childPath := n.key.AppendExtend(childIndex, child.compressedKey) - childNodeChange, ok := t.changes.nodes[childPath] + childKey := n.key.Extend(ToToken(childIndex, t.tokenSize), child.compressedKey) + childNodeChange, ok := t.changes.nodes[childKey] if !ok { // This child wasn't changed. continue @@ -302,9 +307,8 @@ func (t *trieView) calculateNodeIDsHelper(n *node) { wg.Wait() close(updatedChildren) - keyLength := n.key.tokenLength for updatedChild := range updatedChildren { - index := updatedChild.key.Token(keyLength) + index := updatedChild.key.Token(n.key.length, t.tokenSize) n.setChildEntry(index, child{ compressedKey: n.children[index].compressedKey, id: updatedChild.id, @@ -334,7 +338,7 @@ func (t *trieView) getProof(ctx context.Context, key []byte) (*Proof, error) { defer span.End() proof := &Proof{ - Key: t.db.toKey(key), + Key: ToKey(key), } var closestNode *node @@ -345,6 +349,22 @@ func (t *trieView) getProof(ctx context.Context, key []byte) (*Proof, error) { }); err != nil { return nil, err } + root, err := t.getRoot() + if err != nil { + return nil, err + } + + // The sentinel node is always the first node in the path. + // If the sentinel node is not the root, remove it from the proofPath. + if root != t.sentinelNode { + proof.Path = proof.Path[1:] + + // if there are no nodes in the proof path, add the root to serve as an exclusion proof + if len(proof.Path) == 0 { + proof.Path = []ProofNode{root.asProofNode()} + return proof, nil + } + } if closestNode.key == proof.Key { // There is a node with the given [key]. @@ -355,7 +375,7 @@ func (t *trieView) getProof(ctx context.Context, key []byte) (*Proof, error) { // There is no node with the given [key]. // If there is a child at the index where the node would be // if it existed, include that child in the proof. - nextIndex := proof.Key.Token(closestNode.key.tokenLength) + nextIndex := proof.Key.Token(closestNode.key.length, t.tokenSize) child, ok := closestNode.children[nextIndex] if !ok { return proof, nil @@ -363,7 +383,7 @@ func (t *trieView) getProof(ctx context.Context, key []byte) (*Proof, error) { childNode, err := t.getNodeWithID( child.id, - closestNode.key.AppendExtend(nextIndex, child.compressedKey), + closestNode.key.Extend(ToToken(nextIndex, t.tokenSize), child.compressedKey), child.hasValue, ) if err != nil { @@ -457,7 +477,11 @@ func (t *trieView) GetRangeProof( if len(result.StartProof) == 0 && len(result.EndProof) == 0 && len(result.KeyValues) == 0 { // If the range is empty, return the root proof. - rootProof, err := t.getProof(ctx, rootKey) + root, err := t.getRoot() + if err != nil { + return nil, err + } + rootProof, err := t.getProof(ctx, root.key.Bytes()) if err != nil { return nil, err } @@ -544,7 +568,17 @@ func (t *trieView) GetMerkleRoot(ctx context.Context) (ids.ID, error) { if err := t.calculateNodeIDs(ctx); err != nil { return ids.Empty, err } - return t.root.id, nil + return t.getMerkleRoot(), nil +} + +func (t *trieView) getMerkleRoot() ids.ID { + if !isSentinelNodeTheRoot(t.sentinelNode) { + for _, childEntry := range t.sentinelNode.children { + return childEntry.id + } + } + + return t.sentinelNode.id } func (t *trieView) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) { @@ -557,7 +591,7 @@ func (t *trieView) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []er valueErrors := make([]error, len(keys)) for i, key := range keys { - results[i], valueErrors[i] = t.getValueCopy(t.db.toKey(key)) + results[i], valueErrors[i] = t.getValueCopy(ToKey(key)) } return results, valueErrors } @@ -568,7 +602,7 @@ func (t *trieView) GetValue(ctx context.Context, key []byte) ([]byte, error) { _, span := t.db.debugTracer.Start(ctx, "MerkleDB.trieview.GetValue") defer span.End() - return t.getValueCopy(t.db.toKey(key)) + return t.getValueCopy(ToKey(key)) } // getValueCopy returns a copy of the value for the given [key]. @@ -654,7 +688,7 @@ func (t *trieView) remove(key Key) error { return err } if parent != nil { - parent.removeChild(nodeToDelete) + parent.removeChild(nodeToDelete, t.tokenSize) // merge the parent node and its child into a single node if possible return t.compressNodePath(grandParent, parent) @@ -692,15 +726,15 @@ func (t *trieView) compressNodePath(parent, node *node) error { // "Cycle" over the key/values to find the only child. // Note this iteration once because len(node.children) == 1. for index, entry := range node.children { - childKey = node.key.AppendExtend(index, entry.compressedKey) + childKey = node.key.Extend(ToToken(index, t.tokenSize), entry.compressedKey) childEntry = entry } // [node] is the first node with multiple children. // combine it with the [node] passed in. - parent.setChildEntry(childKey.Token(parent.key.tokenLength), + parent.setChildEntry(childKey.Token(parent.key.length, t.tokenSize), child{ - compressedKey: childKey.Skip(parent.key.tokenLength + 1), + compressedKey: childKey.Skip(parent.key.length + t.tokenSize), id: childEntry.id, hasValue: childEntry.hasValue, }) @@ -714,25 +748,24 @@ func (t *trieView) compressNodePath(parent, node *node) error { // Always returns at least the root node. func (t *trieView) visitPathToKey(key Key, visitNode func(*node) error) error { var ( - // all node paths start at the root - currentNode = t.root + // all node paths start at the sentinelNode since its nil key is a prefix of all keys + currentNode = t.sentinelNode err error ) if err := visitNode(currentNode); err != nil { return err } // while the entire path hasn't been matched - for currentNode.key.tokenLength < key.tokenLength { + for currentNode.key.length < key.length { // confirm that a child exists and grab its ID before attempting to load it - nextChildEntry, hasChild := currentNode.children[key.Token(currentNode.key.tokenLength)] + nextChildEntry, hasChild := currentNode.children[key.Token(currentNode.key.length, t.tokenSize)] - if !hasChild || !key.iteratedHasPrefix(currentNode.key.tokenLength+1, nextChildEntry.compressedKey) { + if !hasChild || !key.iteratedHasPrefix(nextChildEntry.compressedKey, currentNode.key.length+t.tokenSize, t.tokenSize) { // there was no child along the path or the child that was there doesn't match the remaining path return nil } - // grab the next node along the path - currentNode, err = t.getNodeWithID(nextChildEntry.id, key.Take(currentNode.key.tokenLength+1+nextChildEntry.compressedKey.tokenLength), nextChildEntry.hasValue) + currentNode, err = t.getNodeWithID(nextChildEntry.id, key.Take(currentNode.key.length+t.tokenSize+nextChildEntry.compressedKey.length), nextChildEntry.hasValue) if err != nil { return err } @@ -743,14 +776,6 @@ func (t *trieView) visitPathToKey(key Key, visitNode func(*node) error) error { return nil } -func getLengthOfCommonPrefix(first, second Key, secondOffset int) int { - commonIndex := 0 - for first.tokenLength > commonIndex && second.tokenLength > (commonIndex+secondOffset) && first.Token(commonIndex) == second.Token(commonIndex+secondOffset) { - commonIndex++ - } - return commonIndex -} - // Get a copy of the node matching the passed key from the trie. // Used by views to get nodes from their ancestors. func (t *trieView) getEditableNode(key Key, hadValue bool) (*node, error) { @@ -791,64 +816,61 @@ func (t *trieView) insert( return nil, err } - // a node with that exact path already exists so update its value + // a node with that exact key already exists so update its value if closestNode.key == key { closestNode.setValue(value) // closestNode was already marked as changed in the ancestry loop above return closestNode, nil } - closestNodeKeyLength := closestNode.key.tokenLength - // A node with the exact key doesn't exist so determine the portion of the // key that hasn't been matched yet - // Note that [key] has prefix [closestNodeFullPath] but exactMatch was false, - // so [key] must be longer than [closestNodeFullPath] and the following index and slice won't OOB. - existingChildEntry, hasChild := closestNode.children[key.Token(closestNodeKeyLength)] + // Note that [key] has prefix [closestNode.key], so [key] must be longer + // and the following index won't OOB. + existingChildEntry, hasChild := closestNode.children[key.Token(closestNode.key.length, t.tokenSize)] if !hasChild { - // there are no existing nodes along the path [fullPath], so create a new node to insert [value] - newNode := newNode( - closestNode, - key, - ) + // there are no existing nodes along the key [key], so create a new node to insert [value] + newNode := newNode(key) newNode.setValue(value) + closestNode.addChild(newNode, t.tokenSize) return newNode, t.recordNewNode(newNode) } - // if we have reached this point, then the [fullpath] we are trying to insert and + // if we have reached this point, then the [key] we are trying to insert and // the existing path node have some common prefix. // a new branching node will be created that will represent this common prefix and // have the existing path node and the value being inserted as children. // generate the new branch node - // find how many tokens are common between the existing child's compressed path and + // find how many tokens are common between the existing child's compressed key and // the current key(offset by the closest node's key), // then move all the common tokens into the branch node - commonPrefixLength := getLengthOfCommonPrefix(existingChildEntry.compressedKey, key, closestNodeKeyLength+1) + commonPrefixLength := getLengthOfCommonPrefix( + existingChildEntry.compressedKey, + key, + closestNode.key.length+t.tokenSize, + t.tokenSize, + ) - // If the length of the existing child's compressed path is less than or equal to the branch node's key that implies that the existing child's key matched the key to be inserted. - // Since it matched the key to be inserted, it should have been the last node returned by GetPathTo - if existingChildEntry.compressedKey.tokenLength <= commonPrefixLength { - return nil, ErrGetPathToFailure + if existingChildEntry.compressedKey.length <= commonPrefixLength { + // Since the compressed key is shorter than the common prefix, + // we should have visited [existingChildEntry] in [visitPathToKey]. + return nil, ErrVisitPathToKey } - branchNode := newNode( - closestNode, - key.Take(closestNodeKeyLength+1+commonPrefixLength), - ) + branchNode := newNode(key.Take(closestNode.key.length + t.tokenSize + commonPrefixLength)) + closestNode.addChild(branchNode, t.tokenSize) nodeWithValue := branchNode - if key.tokenLength == branchNode.key.tokenLength { + if key.length == branchNode.key.length { // the branch node has exactly the key to be inserted as its key, so set the value on the branch node branchNode.setValue(value) } else { // the key to be inserted is a child of the branch node // create a new node and add the value to it - newNode := newNode( - branchNode, - key, - ) + newNode := newNode(key) newNode.setValue(value) + branchNode.addChild(newNode, t.tokenSize) if err := t.recordNewNode(newNode); err != nil { return nil, err } @@ -857,9 +879,9 @@ func (t *trieView) insert( // add the existing child onto the branch node branchNode.setChildEntry( - existingChildEntry.compressedKey.Token(commonPrefixLength), + existingChildEntry.compressedKey.Token(commonPrefixLength, t.tokenSize), child{ - compressedKey: existingChildEntry.compressedKey.Skip(commonPrefixLength + 1), + compressedKey: existingChildEntry.compressedKey.Skip(commonPrefixLength + t.tokenSize), id: existingChildEntry.id, hasValue: existingChildEntry.hasValue, }) @@ -867,6 +889,15 @@ func (t *trieView) insert( return nodeWithValue, t.recordNewNode(branchNode) } +func getLengthOfCommonPrefix(first, second Key, secondOffset int, tokenSize int) int { + commonIndex := 0 + for first.length > commonIndex && second.length > commonIndex+secondOffset && + first.Token(commonIndex, tokenSize) == second.Token(commonIndex+secondOffset, tokenSize) { + commonIndex += tokenSize + } + return commonIndex +} + // Records that a node has been created. // Must not be called after [calculateNodeIDs] has returned. func (t *trieView) recordNewNode(after *node) error { @@ -883,12 +914,26 @@ func (t *trieView) recordNodeChange(after *node) error { // Must not be called after [calculateNodeIDs] has returned. func (t *trieView) recordNodeDeleted(after *node) error { // don't delete the root. - if after.key.tokenLength == 0 { + if after.key.length == 0 { return t.recordKeyChange(after.key, after, after.hasValue(), false /* newNode */) } return t.recordKeyChange(after.key, nil, after.hasValue(), false /* newNode */) } +func (t *trieView) getRoot() (*node, error) { + if !isSentinelNodeTheRoot(t.sentinelNode) { + // sentinelNode has one child, which is the root + for index, childEntry := range t.sentinelNode.children { + return t.getNodeWithID( + childEntry.id, + t.sentinelNode.key.Extend(ToToken(index, t.tokenSize), childEntry.compressedKey), + childEntry.hasValue) + } + } + + return t.sentinelNode, nil +} + // Records that the node associated with the given key has been changed. // If it is an existing node, record what its value was before it was changed. // Must not be called after [calculateNodeIDs] has returned. @@ -910,7 +955,7 @@ func (t *trieView) recordKeyChange(key Key, after *node, hadValue bool, newNode } before, err := t.getParentTrie().getEditableNode(key, hadValue) - if err != nil && err != database.ErrNotFound { + if err != nil && !errors.Is(err, database.ErrNotFound) { return err } t.changes.nodes[key] = &change[*node]{ diff --git a/x/merkledb/value_node_db.go b/x/merkledb/value_node_db.go index 8f168560d7fa..406c9a986dba 100644 --- a/x/merkledb/value_node_db.go +++ b/x/merkledb/value_node_db.go @@ -27,8 +27,7 @@ type valueNodeDB struct { nodeCache cache.Cacher[Key, *node] metrics merkleMetrics - closed utils.Atomic[bool] - branchFactor BranchFactor + closed utils.Atomic[bool] } func newValueNodeDB( @@ -36,14 +35,12 @@ func newValueNodeDB( bufferPool *sync.Pool, metrics merkleMetrics, cacheSize int, - branchFactor BranchFactor, ) *valueNodeDB { return &valueNodeDB{ - metrics: metrics, - baseDB: db, - bufferPool: bufferPool, - nodeCache: cache.NewSizedLRU(cacheSize, cacheEntrySize), - branchFactor: branchFactor, + metrics: metrics, + baseDB: db, + bufferPool: bufferPool, + nodeCache: cache.NewSizedLRU(cacheSize, cacheEntrySize), } } @@ -92,6 +89,11 @@ func (db *valueNodeDB) Get(key Key) (*node, error) { return parseNode(key, nodeBytes) } +func (db *valueNodeDB) Clear() error { + db.nodeCache.Flush() + return database.AtomicClearPrefix(db.baseDB, db.baseDB, valueNodePrefix) +} + // Batch of database operations type valueNodeBatch struct { db *valueNodeDB @@ -170,7 +172,7 @@ func (i *iterator) Next() bool { i.db.metrics.DatabaseNodeRead() key := i.nodeIter.Key() key = key[valueNodePrefixLen:] - n, err := parseNode(ToKey(key, i.db.branchFactor), i.nodeIter.Value()) + n, err := parseNode(ToKey(key), i.nodeIter.Value()) if err != nil { i.err = err return false diff --git a/x/merkledb/value_node_db_test.go b/x/merkledb/value_node_db_test.go index 910c6e1e9d6b..c87f0ab5ebf8 100644 --- a/x/merkledb/value_node_db_test.go +++ b/x/merkledb/value_node_db_test.go @@ -28,11 +28,10 @@ func TestValueNodeDB(t *testing.T) { }, &mockMetrics{}, size, - BranchFactor16, ) // Getting a key that doesn't exist should return an error. - key := ToKey([]byte{0x01}, BranchFactor16) + key := ToKey([]byte{0x01}) _, err := db.Get(key) require.ErrorIs(err, database.ErrNotFound) @@ -124,12 +123,11 @@ func TestValueNodeDBIterator(t *testing.T) { }, &mockMetrics{}, cacheSize, - BranchFactor16, ) // Put key-node pairs. for i := 0; i < cacheSize; i++ { - key := ToKey([]byte{byte(i)}, BranchFactor16) + key := ToKey([]byte{byte(i)}) node := &node{ dbNode: dbNode{ value: maybe.Some([]byte{byte(i)}), @@ -167,7 +165,7 @@ func TestValueNodeDBIterator(t *testing.T) { it.Release() // Put key-node pairs with a common prefix. - key := ToKey([]byte{0xFF, 0x00}, BranchFactor16) + key := ToKey([]byte{0xFF, 0x00}) n := &node{ dbNode: dbNode{ value: maybe.Some([]byte{0xFF, 0x00}), @@ -178,7 +176,7 @@ func TestValueNodeDBIterator(t *testing.T) { batch.Put(key, n) require.NoError(batch.Write()) - key = ToKey([]byte{0xFF, 0x01}, BranchFactor16) + key = ToKey([]byte{0xFF, 0x01}) n = &node{ dbNode: dbNode{ value: maybe.Some([]byte{0xFF, 0x01}), @@ -220,3 +218,34 @@ func TestValueNodeDBIterator(t *testing.T) { err := it.Error() require.ErrorIs(err, database.ErrClosed) } + +func TestValueNodeDBClear(t *testing.T) { + require := require.New(t) + cacheSize := 200 + baseDB := memdb.New() + db := newValueNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + ) + + batch := db.NewBatch() + for _, b := range [][]byte{{1}, {2}, {3}} { + batch.Put(ToKey(b), newNode(ToKey(b))) + } + require.NoError(batch.Write()) + + // Assert the db is not empty + iter := baseDB.NewIteratorWithPrefix(valueNodePrefix) + require.True(iter.Next()) + iter.Release() + + require.NoError(db.Clear()) + + iter = baseDB.NewIteratorWithPrefix(valueNodePrefix) + defer iter.Release() + require.False(iter.Next()) +} diff --git a/x/merkledb/view_iterator.go b/x/merkledb/view_iterator.go index 263aa409e882..fac213bf350b 100644 --- a/x/merkledb/view_iterator.go +++ b/x/merkledb/view_iterator.go @@ -26,8 +26,8 @@ func (t *trieView) NewIteratorWithPrefix(prefix []byte) database.Iterator { func (t *trieView) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { var ( changes = make([]KeyChange, 0, len(t.changes.values)) - startKey = t.db.toKey(start) - prefixKey = t.db.toKey(prefix) + startKey = ToKey(start) + prefixKey = ToKey(prefix) ) for key, change := range t.changes.values { diff --git a/x/sync/client.go b/x/sync/client.go index 095f515d41fb..6605a5089935 100644 --- a/x/sync/client.go +++ b/x/sync/client.go @@ -73,7 +73,7 @@ type client struct { stateSyncMinVersion *version.Application log logging.Logger metrics SyncMetrics - branchFactor merkledb.BranchFactor + tokenSize int } type ClientConfig struct { @@ -95,7 +95,7 @@ func NewClient(config *ClientConfig) (Client, error) { stateSyncMinVersion: config.StateSyncMinVersion, log: config.Log, metrics: config.Metrics, - branchFactor: config.BranchFactor, + tokenSize: merkledb.BranchFactorToTokenSize[config.BranchFactor], }, nil } @@ -124,7 +124,7 @@ func (c *client) GetChangeProof( case *pb.SyncGetChangeProofResponse_ChangeProof: // The server had enough history to send us a change proof var changeProof merkledb.ChangeProof - if err := changeProof.UnmarshalProto(changeProofResp.ChangeProof, c.branchFactor); err != nil { + if err := changeProof.UnmarshalProto(changeProofResp.ChangeProof); err != nil { return nil, err } @@ -158,7 +158,7 @@ func (c *client) GetChangeProof( case *pb.SyncGetChangeProofResponse_RangeProof: var rangeProof merkledb.RangeProof - if err := rangeProof.UnmarshalProto(changeProofResp.RangeProof, c.branchFactor); err != nil { + if err := rangeProof.UnmarshalProto(changeProofResp.RangeProof); err != nil { return nil, err } @@ -171,6 +171,7 @@ func (c *client) GetChangeProof( startKey, endKey, req.EndRootHash, + c.tokenSize, ) if err != nil { return nil, err @@ -208,6 +209,7 @@ func verifyRangeProof( start maybe.Maybe[[]byte], end maybe.Maybe[[]byte], rootBytes []byte, + tokenSize int, ) error { root, err := ids.ToID(rootBytes) if err != nil { @@ -227,6 +229,7 @@ func verifyRangeProof( start, end, root, + tokenSize, ); err != nil { return fmt.Errorf("%w due to %w", errInvalidRangeProof, err) } @@ -253,11 +256,8 @@ func (c *client) GetRangeProof( return nil, err } - startKey := maybeBytesToMaybe(req.StartKey) - endKey := maybeBytesToMaybe(req.EndKey) - var rangeProof merkledb.RangeProof - if err := rangeProof.UnmarshalProto(&rangeProofProto, c.branchFactor); err != nil { + if err := rangeProof.UnmarshalProto(&rangeProofProto); err != nil { return nil, err } @@ -265,9 +265,10 @@ func (c *client) GetRangeProof( ctx, &rangeProof, int(req.KeyLimit), - startKey, - endKey, + maybeBytesToMaybe(req.StartKey), + maybeBytesToMaybe(req.EndKey), req.RootHash, + c.tokenSize, ); err != nil { return nil, err } diff --git a/x/sync/client_test.go b/x/sync/client_test.go index 08c1a787b474..f6c67debe5ee 100644 --- a/x/sync/client_test.go +++ b/x/sync/client_test.go @@ -138,7 +138,7 @@ func sendRangeProofRequest( require.NoError(proto.Unmarshal(responseBytes, &responseProto)) var response merkledb.RangeProof - require.NoError(response.UnmarshalProto(&responseProto, merkledb.BranchFactor16)) + require.NoError(response.UnmarshalProto(&responseProto)) // modify if needed if modifyResponse != nil { @@ -456,7 +456,7 @@ func sendChangeProofRequest( if responseProto.GetChangeProof() != nil { // Server responded with a change proof var changeProof merkledb.ChangeProof - require.NoError(changeProof.UnmarshalProto(responseProto.GetChangeProof(), merkledb.BranchFactor16)) + require.NoError(changeProof.UnmarshalProto(responseProto.GetChangeProof())) // modify if needed if modifyChangeProof != nil { @@ -478,7 +478,7 @@ func sendChangeProofRequest( // Server responded with a range proof var rangeProof merkledb.RangeProof - require.NoError(rangeProof.UnmarshalProto(responseProto.GetRangeProof(), merkledb.BranchFactor16)) + require.NoError(rangeProof.UnmarshalProto(responseProto.GetRangeProof())) // modify if needed if modifyRangeProof != nil { @@ -812,7 +812,7 @@ func TestAppRequestSendFailed(t *testing.T) { gomock.Any(), gomock.Any(), gomock.Any(), - ).Return(ids.NodeID{}, nil, errAppSendFailed).Times(2) + ).Return(ids.EmptyNodeID, nil, errAppSendFailed).Times(2) _, err = client.GetChangeProof( context.Background(), diff --git a/x/sync/db.go b/x/sync/db.go index 94b5542e34c1..5a0a5164c6a6 100644 --- a/x/sync/db.go +++ b/x/sync/db.go @@ -6,6 +6,7 @@ package sync import "github.com/ava-labs/avalanchego/x/merkledb" type DB interface { + merkledb.Clearer merkledb.MerkleRootGetter merkledb.ProofGetter merkledb.ChangeProofer diff --git a/x/sync/g_db/db_client.go b/x/sync/g_db/db_client.go index 8bd936a53975..64e63bb76652 100644 --- a/x/sync/g_db/db_client.go +++ b/x/sync/g_db/db_client.go @@ -19,16 +19,14 @@ import ( var _ sync.DB = (*DBClient)(nil) -func NewDBClient(client pb.DBClient, branchFactor merkledb.BranchFactor) *DBClient { +func NewDBClient(client pb.DBClient) *DBClient { return &DBClient{ - client: client, - branchFactor: branchFactor, + client: client, } } type DBClient struct { - client pb.DBClient - branchFactor merkledb.BranchFactor + client pb.DBClient } func (c *DBClient) GetMerkleRoot(ctx context.Context) (ids.ID, error) { @@ -70,7 +68,7 @@ func (c *DBClient) GetChangeProof( } var proof merkledb.ChangeProof - if err := proof.UnmarshalProto(resp.GetChangeProof(), c.branchFactor); err != nil { + if err := proof.UnmarshalProto(resp.GetChangeProof()); err != nil { return nil, err } return &proof, nil @@ -122,7 +120,7 @@ func (c *DBClient) GetProof(ctx context.Context, key []byte) (*merkledb.Proof, e } var proof merkledb.Proof - if err := proof.UnmarshalProto(resp.Proof, c.branchFactor); err != nil { + if err := proof.UnmarshalProto(resp.Proof); err != nil { return nil, err } return &proof, nil @@ -152,7 +150,7 @@ func (c *DBClient) GetRangeProofAtRoot( } var proof merkledb.RangeProof - if err := proof.UnmarshalProto(resp.Proof, c.branchFactor); err != nil { + if err := proof.UnmarshalProto(resp.Proof); err != nil { return nil, err } return &proof, nil @@ -177,3 +175,8 @@ func (c *DBClient) CommitRangeProof( }) return err } + +func (c *DBClient) Clear() error { + _, err := c.client.Clear(context.Background(), &emptypb.Empty{}) + return err +} diff --git a/x/sync/g_db/db_server.go b/x/sync/g_db/db_server.go index b6471542dcca..820a130bb496 100644 --- a/x/sync/g_db/db_server.go +++ b/x/sync/g_db/db_server.go @@ -19,18 +19,16 @@ import ( var _ pb.DBServer = (*DBServer)(nil) -func NewDBServer(db sync.DB, branchFactor merkledb.BranchFactor) *DBServer { +func NewDBServer(db sync.DB) *DBServer { return &DBServer{ - db: db, - branchFactor: branchFactor, + db: db, } } type DBServer struct { pb.UnsafeDBServer - db sync.DB - branchFactor merkledb.BranchFactor + db sync.DB } func (s *DBServer) GetMerkleRoot( @@ -98,7 +96,7 @@ func (s *DBServer) VerifyChangeProof( req *pb.VerifyChangeProofRequest, ) (*pb.VerifyChangeProofResponse, error) { var proof merkledb.ChangeProof - if err := proof.UnmarshalProto(req.Proof, s.branchFactor); err != nil { + if err := proof.UnmarshalProto(req.Proof); err != nil { return nil, err } @@ -130,7 +128,7 @@ func (s *DBServer) CommitChangeProof( req *pb.CommitChangeProofRequest, ) (*emptypb.Empty, error) { var proof merkledb.ChangeProof - if err := proof.UnmarshalProto(req.Proof, s.branchFactor); err != nil { + if err := proof.UnmarshalProto(req.Proof); err != nil { return nil, err } @@ -201,7 +199,7 @@ func (s *DBServer) CommitRangeProof( req *pb.CommitRangeProofRequest, ) (*emptypb.Empty, error) { var proof merkledb.RangeProof - if err := proof.UnmarshalProto(req.RangeProof, s.branchFactor); err != nil { + if err := proof.UnmarshalProto(req.RangeProof); err != nil { return nil, err } @@ -218,3 +216,7 @@ func (s *DBServer) CommitRangeProof( err := s.db.CommitRangeProof(ctx, start, end, &proof) return &emptypb.Empty{}, err } + +func (s *DBServer) Clear(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { + return &emptypb.Empty{}, s.db.Clear() +} diff --git a/x/sync/manager.go b/x/sync/manager.go index 0a13a89eb32b..a7a6858d5122 100644 --- a/x/sync/manager.go +++ b/x/sync/manager.go @@ -10,12 +10,15 @@ import ( "fmt" "sync" + "golang.org/x/exp/maps" + "go.uber.org/zap" "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/maybe" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/x/merkledb" pb "github.com/ava-labs/avalanchego/proto/pb/sync" @@ -102,9 +105,9 @@ type Manager struct { cancelCtx context.CancelFunc // Set to true when StartSyncing is called. - syncing bool - closeOnce sync.Once - branchFactor merkledb.BranchFactor + syncing bool + closeOnce sync.Once + tokenSize int } type ManagerConfig struct { @@ -136,7 +139,7 @@ func NewManager(config ManagerConfig) (*Manager, error) { doneChan: make(chan struct{}), unprocessedWork: newWorkHeap(), processedWork: newWorkHeap(), - branchFactor: config.BranchFactor, + tokenSize: merkledb.BranchFactorToTokenSize[config.BranchFactor], } m.unprocessedWorkCond.L = &m.workLock @@ -404,7 +407,7 @@ func (m *Manager) findNextKey( // and traversing them from the longest key to the shortest key. // For each node in these proofs, compare if the children of that node exist // or have the same ID in the other proof. - proofKeyPath := merkledb.ToKey(lastReceivedKey, m.branchFactor) + proofKeyPath := merkledb.ToKey(lastReceivedKey) // If the received proof is an exclusion proof, the last node may be for a // key that is after the [lastReceivedKey]. @@ -431,10 +434,32 @@ func (m *Manager) findNextKey( nextKey := maybe.Nothing[[]byte]() + // Add sentinel node back into the localProofNodes, if it is missing. + // Required to ensure that a common node exists in both proofs + if len(localProofNodes) > 0 && localProofNodes[0].Key.Length() != 0 { + sentinel := merkledb.ProofNode{ + Children: map[byte]ids.ID{ + localProofNodes[0].Key.Token(0, m.tokenSize): ids.Empty, + }, + } + localProofNodes = append([]merkledb.ProofNode{sentinel}, localProofNodes...) + } + + // Add sentinel node back into the endProof, if it is missing. + // Required to ensure that a common node exists in both proofs + if len(endProof) > 0 && endProof[0].Key.Length() != 0 { + sentinel := merkledb.ProofNode{ + Children: map[byte]ids.ID{ + endProof[0].Key.Token(0, m.tokenSize): ids.Empty, + }, + } + endProof = append([]merkledb.ProofNode{sentinel}, endProof...) + } + localProofNodeIndex := len(localProofNodes) - 1 receivedProofNodeIndex := len(endProof) - 1 - // traverse the two proofs from the deepest nodes up to the root until a difference is found + // traverse the two proofs from the deepest nodes up to the sentinel node until a difference is found for localProofNodeIndex >= 0 && receivedProofNodeIndex >= 0 && nextKey.IsNothing() { localProofNode := localProofNodes[localProofNodeIndex] receivedProofNode := endProof[receivedProofNodeIndex] @@ -447,7 +472,7 @@ func (m *Manager) findNextKey( // select the deepest proof node from the two proofs switch { - case receivedProofNode.Key.TokensLength() > localProofNode.Key.TokensLength(): + case receivedProofNode.Key.Length() > localProofNode.Key.Length(): // there was a branch node in the received proof that isn't in the local proof // see if the received proof node has children not present in the local proof deepestNode = &receivedProofNode @@ -455,7 +480,7 @@ func (m *Manager) findNextKey( // we have dealt with this received node, so move on to the next received node receivedProofNodeIndex-- - case localProofNode.Key.TokensLength() > receivedProofNode.Key.TokensLength(): + case localProofNode.Key.Length() > receivedProofNode.Key.Length(): // there was a branch node in the local proof that isn't in the received proof // see if the local proof node has children not present in the received proof deepestNode = &localProofNode @@ -482,20 +507,20 @@ func (m *Manager) findNextKey( // If the deepest node has the same key as [proofKeyPath], // then all of its children have keys greater than the proof key, // so we can start at the 0 token. - startingChildToken := byte(0) + startingChildToken := 0 // If the deepest node has a key shorter than the key being proven, // we can look at the next token index of the proof key to determine which of that // node's children have keys larger than [proofKeyPath]. // Any child with a token greater than the [proofKeyPath]'s token at that // index will have a larger key. - if deepestNode.Key.TokensLength() < proofKeyPath.TokensLength() { - startingChildToken = proofKeyPath.Token(deepestNode.Key.TokensLength()) + 1 + if deepestNode.Key.Length() < proofKeyPath.Length() { + startingChildToken = int(proofKeyPath.Token(deepestNode.Key.Length(), m.tokenSize)) + 1 } // determine if there are any differences in the children for the deepest unhandled node of the two proofs - if childIndex, hasDifference := findChildDifference(deepestNode, deepestNodeFromOtherProof, startingChildToken, m.branchFactor); hasDifference { - nextKey = maybe.Some(deepestNode.Key.Append(childIndex).Bytes()) + if childIndex, hasDifference := findChildDifference(deepestNode, deepestNodeFromOtherProof, startingChildToken); hasDifference { + nextKey = maybe.Some(deepestNode.Key.Extend(merkledb.ToToken(childIndex, m.tokenSize)).Bytes()) break } } @@ -794,12 +819,27 @@ func midPoint(startMaybe, endMaybe maybe.Maybe[[]byte]) maybe.Maybe[[]byte] { // findChildDifference returns the first child index that is different between node 1 and node 2 if one exists and // a bool indicating if any difference was found -func findChildDifference(node1, node2 *merkledb.ProofNode, startIndex byte, branchFactor merkledb.BranchFactor) (byte, bool) { +func findChildDifference(node1, node2 *merkledb.ProofNode, startIndex int) (byte, bool) { + // Children indices >= [startIndex] present in at least one of the nodes. + childIndices := set.Set[byte]{} + for _, node := range []*merkledb.ProofNode{node1, node2} { + if node == nil { + continue + } + for key := range node.Children { + if int(key) >= startIndex { + childIndices.Add(key) + } + } + } + + sortedChildIndices := maps.Keys(childIndices) + slices.Sort(sortedChildIndices) var ( child1, child2 ids.ID ok1, ok2 bool ) - for childIndex := startIndex; merkledb.BranchFactor(childIndex) < branchFactor; childIndex++ { + for _, childIndex := range sortedChildIndices { if node1 != nil { child1, ok1 = node1.Children[childIndex] } diff --git a/x/sync/metrics.go b/x/sync/metrics.go index fc62d7d11212..881ca37282ef 100644 --- a/x/sync/metrics.go +++ b/x/sync/metrics.go @@ -8,7 +8,7 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) var ( @@ -74,13 +74,12 @@ func NewMetrics(namespace string, reg prometheus.Registerer) (SyncMetrics, error Help: "cumulative amount of proof requests that were successful", }), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( reg.Register(m.requestsFailed), reg.Register(m.requestsMade), reg.Register(m.requestsSucceeded), ) - return &m, errs.Err + return &m, err } func (m *metrics) RequestFailed() { diff --git a/x/sync/network_server.go b/x/sync/network_server.go index 6f21702ce397..c213bee6a739 100644 --- a/x/sync/network_server.go +++ b/x/sync/network_server.go @@ -39,7 +39,6 @@ const ( // TODO: refine this estimate. This is almost certainly a large overestimate. estimatedMessageOverhead = 4 * units.KiB maxByteSizeLimit = constants.DefaultMaxMessageSize - estimatedMessageOverhead - endProofSizeBufferAmount = 2 * units.KiB ) var ( diff --git a/x/sync/network_server_test.go b/x/sync/network_server_test.go index 60555498457f..d79b27a14c44 100644 --- a/x/sync/network_server_test.go +++ b/x/sync/network_server_test.go @@ -114,7 +114,7 @@ func Test_Server_GetRangeProof(t *testing.T) { require.NoError(proto.Unmarshal(responseBytes, &proofProto)) var p merkledb.RangeProof - require.NoError(p.UnmarshalProto(&proofProto, merkledb.BranchFactor16)) + require.NoError(p.UnmarshalProto(&proofProto)) proof = &p } return nil diff --git a/x/sync/peer_tracker.go b/x/sync/peer_tracker.go index 7c105f3363af..a1f8a66ae711 100644 --- a/x/sync/peer_tracker.go +++ b/x/sync/peer_tracker.go @@ -14,10 +14,10 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" safemath "github.com/ava-labs/avalanchego/utils/math" @@ -101,13 +101,12 @@ func newPeerTracker( ), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( registerer.Register(t.numTrackedPeers), registerer.Register(t.numResponsivePeers), registerer.Register(t.averageBandwidthMetric), ) - return t, errs.Err + return t, err } // Returns true if we're not connected to enough peers. diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index 9a6a5de2dba9..af908c9d941c 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -586,10 +586,11 @@ func TestFindNextKeyRandom(t *testing.T) { ) require.NoError(err) + config := newDefaultDBConfig() localDB, err := merkledb.New( context.Background(), memdb.New(), - newDefaultDBConfig(), + config, ) require.NoError(err) @@ -677,7 +678,7 @@ func TestFindNextKeyRandom(t *testing.T) { for _, node := range remoteProof.EndProof { for childIdx, childID := range node.Children { remoteKeyIDs = append(remoteKeyIDs, keyAndID{ - key: node.Key.Append(childIdx), + key: node.Key.Extend(merkledb.ToToken(childIdx, merkledb.BranchFactorToTokenSize[config.BranchFactor])), id: childID, }) } @@ -688,7 +689,7 @@ func TestFindNextKeyRandom(t *testing.T) { for _, node := range localProof.Path { for childIdx, childID := range node.Children { localKeyIDs = append(localKeyIDs, keyAndID{ - key: node.Key.Append(childIdx), + key: node.Key.Extend(merkledb.ToToken(childIdx, merkledb.BranchFactorToTokenSize[config.BranchFactor])), id: childID, }) }