diff --git a/.gitattributes b/.gitattributes index adc4144ffa3..13825940056 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3,3 +3,7 @@ go.sum linguist-generated text gnovm/stdlibs/generated.go linguist-generated gnovm/tests/stdlibs/generated.go linguist-generated +*.gen.gno linguist-generated +*.gen_test.gno linguist-generated +*.gen.go linguist-generated +*.gen_test.go linguist-generated \ No newline at end of file diff --git a/.github/golangci.yml b/.github/golangci.yml index ca85620b7e6..afc581d2ec5 100644 --- a/.github/golangci.yml +++ b/.github/golangci.yml @@ -85,6 +85,7 @@ issues: - gosec # Disabled linting of weak number generators - makezero # Disabled linting of intentional slice appends - goconst # Disabled linting of common mnemonics and test case strings + - unused # Disabled linting of unused mock methods - path: _\.gno linters: - errorlint # Disabled linting of error comparisons, because of lacking std lib support diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 6a6d6e02653..5d606a2a663 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -71,12 +71,20 @@ jobs: - run: make lint -C ./examples # TODO: consider running lint on every other directories, maybe in "warning" mode? # TODO: track coverage + fmt: name: Run gno fmt on examples uses: ./.github/workflows/gnofmt_template.yml with: path: "examples/..." + generate: + name: Check generated files are up to date + uses: ./.github/workflows/build_template.yml + with: + modulepath: "examples" + go-version: "1.22.x" + mod-tidy: strategy: fail-fast: false diff --git a/contribs/gnodev/go.mod b/contribs/gnodev/go.mod index 6ca47408a75..92d8494fa40 100644 --- a/contribs/gnodev/go.mod +++ b/contribs/gnodev/go.mod @@ -77,10 +77,12 @@ require ( github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/cors v1.11.1 // indirect github.com/rs/xid v1.6.0 // indirect + github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/yuin/goldmark v1.7.2 // indirect github.com/yuin/goldmark-emoji v1.0.2 // indirect + github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect go.etcd.io/bbolt v1.3.11 // indirect diff --git a/contribs/gnodev/go.sum b/contribs/gnodev/go.sum index 912345d61a8..3f22e4f2f00 100644 --- a/contribs/gnodev/go.sum +++ b/contribs/gnodev/go.sum @@ -3,8 +3,10 @@ dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/alecthomas/assert/v2 v2.7.0 h1:QtqSACNS3tF7oasA8CU6A6sXZSBDqnm7RfpLl9bZqbE= github.com/alecthomas/assert/v2 v2.7.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/chroma/v2 v2.2.0/go.mod h1:vf4zrexSH54oEjJ7EdB65tGNHmH3pGZmVkgTP5RHvAs= github.com/alecthomas/chroma/v2 v2.14.0 h1:R3+wzpnUArGcQz7fCETQBzO5n9IMNi13iIs46aU4V9E= github.com/alecthomas/chroma/v2 v2.14.0/go.mod h1:QolEbTfmUHIMVpBqxeDnNBj2uoeI4EbYP4i6n68SG4I= +github.com/alecthomas/repr v0.0.0-20220113201626-b1b626ac65ae/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= @@ -91,6 +93,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeC github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= @@ -209,6 +213,8 @@ github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/sahilm/fuzzy v0.1.1 h1:ceu5RHF8DGgoi+/dR5PsECjCDH1BE3Fnmpo7aVXOdRA= github.com/sahilm/fuzzy v0.1.1/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= +github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b h1:oV47z+jotrLVvhiLRNzACVe7/qZ8DcRlMlDucR/FARo= +github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b/go.mod h1:JprPCeMgYyLKJoAy9nxpVScm7NwFSwpibdrUKm4kcw0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -223,10 +229,13 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45 github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= github.com/yuin/goldmark v1.3.7/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.7.2 h1:NjGd7lO7zrUn/A7eKwn5PEOt4ONYGqpxSEeZuduvgxc= github.com/yuin/goldmark v1.7.2/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= github.com/yuin/goldmark-emoji v1.0.2 h1:c/RgTShNgHTtc6xdz2KKI74jJr6rWi7FPgnP9GAsO5s= github.com/yuin/goldmark-emoji v1.0.2/go.mod h1:RhP/RWpexdp+KHs7ghKnifRoIs/Bq4nDS7tRbCkOwKY= +github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ= +github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I= github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= diff --git a/contribs/gnodev/pkg/dev/node.go b/contribs/gnodev/pkg/dev/node.go index 1c91c34b027..1b8414d0daa 100644 --- a/contribs/gnodev/pkg/dev/node.go +++ b/contribs/gnodev/pkg/dev/node.go @@ -518,6 +518,8 @@ func (n *Node) rebuildNode(ctx context.Context, genesis gnoland.GnoGenesisState) // Speed up stdlib loading after first start (saves about 2-3 seconds on each reload). nodeConfig.CacheStdlibLoad = true nodeConfig.Genesis.ConsensusParams.Block.MaxGas = n.config.MaxGasPerBlock + // Genesis verification is always false with Gnodev + nodeConfig.SkipGenesisVerification = true // recoverFromError handles panics and converts them to errors. recoverFromError := func() { diff --git a/contribs/gnofaucet/go.mod b/contribs/gnofaucet/go.mod index 3abc189b86a..3d1e5f54c54 100644 --- a/contribs/gnofaucet/go.mod +++ b/contribs/gnofaucet/go.mod @@ -31,6 +31,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.11.1 // indirect github.com/rs/xid v1.6.0 // indirect + github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b // indirect go.opentelemetry.io/otel v1.29.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0 // indirect diff --git a/contribs/gnofaucet/go.sum b/contribs/gnofaucet/go.sum index fafbc4d1060..10e2c19b408 100644 --- a/contribs/gnofaucet/go.sum +++ b/contribs/gnofaucet/go.sum @@ -111,6 +111,8 @@ github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b h1:oV47z+jotrLVvhiLRNzACVe7/qZ8DcRlMlDucR/FARo= +github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b/go.mod h1:JprPCeMgYyLKJoAy9nxpVScm7NwFSwpibdrUKm4kcw0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= diff --git a/contribs/gnogenesis/go.mod b/contribs/gnogenesis/go.mod index f1b316c2bee..3056af1d4cc 100644 --- a/contribs/gnogenesis/go.mod +++ b/contribs/gnogenesis/go.mod @@ -32,6 +32,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.11.1 // indirect github.com/rs/xid v1.6.0 // indirect + github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect @@ -49,6 +50,7 @@ require ( golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect golang.org/x/mod v0.20.0 // indirect golang.org/x/net v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.24.0 // indirect golang.org/x/term v0.23.0 // indirect golang.org/x/text v0.17.0 // indirect diff --git a/contribs/gnogenesis/go.sum b/contribs/gnogenesis/go.sum index 7ba3aede534..7e4a683cad1 100644 --- a/contribs/gnogenesis/go.sum +++ b/contribs/gnogenesis/go.sum @@ -120,6 +120,8 @@ github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b h1:oV47z+jotrLVvhiLRNzACVe7/qZ8DcRlMlDucR/FARo= +github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b/go.mod h1:JprPCeMgYyLKJoAy9nxpVScm7NwFSwpibdrUKm4kcw0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= diff --git a/contribs/gnogenesis/internal/txs/txs_add_packages.go b/contribs/gnogenesis/internal/txs/txs_add_packages.go index cf863c72116..0ab5724154e 100644 --- a/contribs/gnogenesis/internal/txs/txs_add_packages.go +++ b/contribs/gnogenesis/internal/txs/txs_add_packages.go @@ -5,8 +5,9 @@ import ( "errors" "flag" "fmt" + "os" - "github.com/gnolang/gno/tm2/pkg/crypto" + "github.com/gnolang/gno/tm2/pkg/crypto/keys" "github.com/gnolang/gno/gno.land/pkg/gnoland" "github.com/gnolang/gno/gno.land/pkg/gnoland/ugnot" @@ -15,28 +16,45 @@ import ( "github.com/gnolang/gno/tm2/pkg/std" ) -var ( - errInvalidPackageDir = errors.New("invalid package directory") - errInvalidDeployerAddr = errors.New("invalid deployer address") +const ( + defaultAccount_Name = "test1" + defaultAccount_Address = "g1jg8mtutu9khhfwc4nxmuhcpftf0pajdhfvsqf5" + defaultAccount_Seed = "source bonus chronic canvas draft south burst lottery vacant surface solve popular case indicate oppose farm nothing bullet exhibit title speed wink action roast" + defaultAccount_publicKey = "gpub1pgfj7ard9eg82cjtv4u4xetrwqer2dntxyfzxz3pq0skzdkmzu0r9h6gny6eg8c9dc303xrrudee6z4he4y7cs5rnjwmyf40yaj" ) +var errInvalidPackageDir = errors.New("invalid package directory") + // Keep in sync with gno.land/cmd/start.go -var ( - defaultCreator = crypto.MustAddressFromString("g1jg8mtutu9khhfwc4nxmuhcpftf0pajdhfvsqf5") // test1 - genesisDeployFee = std.NewFee(50000, std.MustParseCoin(ugnot.ValueString(1000000))) -) +var genesisDeployFee = std.NewFee(50000, std.MustParseCoin(ugnot.ValueString(1000000))) type addPkgCfg struct { - txsCfg *txsCfg - deployerAddress string + txsCfg *txsCfg + keyName string + gnoHome string // default GNOHOME env var, just here to ease testing with parallel tests + insecurePasswordStdin bool } func (c *addPkgCfg) RegisterFlags(fs *flag.FlagSet) { fs.StringVar( - &c.deployerAddress, - "deployer-address", - defaultCreator.String(), - "the address that will be used to deploy the package", + &c.keyName, + "key-name", + "", + "The package deployer key name or address contained on gnokey", + ) + + fs.StringVar( + &c.gnoHome, + "gno-home", + os.Getenv("GNOHOME"), + "the gno home directory", + ) + + fs.BoolVar( + &c.insecurePasswordStdin, + "insecure-password-stdin", + false, + "the gno home directory", ) } @@ -65,10 +83,15 @@ func execTxsAddPackages( io commands.IO, args []string, ) error { + var ( + keyname = defaultAccount_Name + keybase keys.Keybase + pass string + ) // Load the genesis - genesis, loadErr := types.GenesisDocFromFile(cfg.txsCfg.GenesisPath) - if loadErr != nil { - return fmt.Errorf("unable to load genesis, %w", loadErr) + genesis, err := types.GenesisDocFromFile(cfg.txsCfg.GenesisPath) + if err != nil { + return fmt.Errorf("unable to load genesis, %w", err) } // Make sure the package dir is set @@ -76,19 +99,30 @@ func execTxsAddPackages( return errInvalidPackageDir } - var ( - creator = defaultCreator - err error - ) - - // Check if the deployer address is set - if cfg.deployerAddress != defaultCreator.String() { - creator, err = crypto.AddressFromString(cfg.deployerAddress) + if cfg.keyName != "" { + keyname = cfg.keyName + keybase, err = keys.NewKeyBaseFromDir(cfg.gnoHome) + if err != nil { + return fmt.Errorf("unable to load keybase: %w", err) + } + pass, err = io.GetPassword("Enter password.", cfg.insecurePasswordStdin) + if err != nil { + return fmt.Errorf("cannot read password: %w", err) + } + } else { + keybase = keys.NewInMemory() + _, err := keybase.CreateAccount(defaultAccount_Name, defaultAccount_Seed, "", "", 0, 0) if err != nil { - return fmt.Errorf("%w, %w", errInvalidDeployerAddr, err) + return fmt.Errorf("unable to create account: %w", err) } } + info, err := keybase.GetByNameOrAddress(keyname) + if err != nil { + return fmt.Errorf("unable to find key in keybase: %w", err) + } + + creator := info.GetAddress() parsedTxs := make([]gnoland.TxWithMetadata, 0) for _, path := range args { // Generate transactions from the packages (recursively) @@ -97,6 +131,10 @@ func execTxsAddPackages( return fmt.Errorf("unable to load txs from directory, %w", err) } + if err := signTxs(txs, keybase, genesis.ChainID, keyname, pass); err != nil { + return fmt.Errorf("unable to sign txs, %w", err) + } + parsedTxs = append(parsedTxs, txs...) } @@ -117,3 +155,25 @@ func execTxsAddPackages( return nil } + +func signTxs(txs []gnoland.TxWithMetadata, keybase keys.Keybase, chainID, keyname string, password string) error { + for index, tx := range txs { + // Here accountNumber and sequenceNumber are set to 0 because they are considered as 0 on genesis transactions. + signBytes, err := tx.Tx.GetSignBytes(chainID, 0, 0) + if err != nil { + return fmt.Errorf("unable to load txs from directory, %w", err) + } + signature, publicKey, err := keybase.Sign(keyname, password, signBytes) + if err != nil { + return fmt.Errorf("unable sign tx %w", err) + } + txs[index].Tx.Signatures = []std.Signature{ + { + PubKey: publicKey, + Signature: signature, + }, + } + } + + return nil +} diff --git a/contribs/gnogenesis/internal/txs/txs_add_packages_test.go b/contribs/gnogenesis/internal/txs/txs_add_packages_test.go index c3405d6ff8d..38d930401e8 100644 --- a/contribs/gnogenesis/internal/txs/txs_add_packages_test.go +++ b/contribs/gnogenesis/internal/txs/txs_add_packages_test.go @@ -2,9 +2,11 @@ package txs import ( "context" + "encoding/hex" "fmt" "os" "path/filepath" + "strings" "testing" "github.com/gnolang/contribs/gnogenesis/internal/common" @@ -12,6 +14,8 @@ import ( vmm "github.com/gnolang/gno/gno.land/pkg/sdk/vm" "github.com/gnolang/gno/tm2/pkg/bft/types" "github.com/gnolang/gno/tm2/pkg/commands" + "github.com/gnolang/gno/tm2/pkg/crypto/keys" + "github.com/gnolang/gno/tm2/pkg/crypto/keys/client" "github.com/gnolang/gno/tm2/pkg/testutils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -19,6 +23,7 @@ import ( func TestGenesis_Txs_Add_Packages(t *testing.T) { t.Parallel() + const addPkgExpectedSignature = "cfe5a15d8def04cbdaf9d08e2511db7928152b26419c4577cbfa282c83118852411f3de5d045ce934555572c21bda8042ce5c64b793a01748e49cf2cff7c2983" t.Run("invalid genesis file", func(t *testing.T) { t.Parallel() @@ -60,8 +65,10 @@ func TestGenesis_Txs_Add_Packages(t *testing.T) { assert.ErrorContains(t, cmdErr, errInvalidPackageDir.Error()) }) - t.Run("invalid deployer address", func(t *testing.T) { + t.Run("non existent key", func(t *testing.T) { t.Parallel() + keybaseDir := t.TempDir() + keyname := "beep-boop" tempGenesis, cleanup := testutils.NewTestFile(t) t.Cleanup(cleanup) @@ -69,24 +76,36 @@ func TestGenesis_Txs_Add_Packages(t *testing.T) { genesis := common.GetDefaultGenesis() require.NoError(t, genesis.SaveAs(tempGenesis.Name())) + io := commands.NewTestIO() + io.SetIn( + strings.NewReader( + fmt.Sprintf( + "%s\n", + "password", + ), + ), + ) // Create the command - cmd := NewTxsCmd(commands.NewTestIO()) + cmd := NewTxsCmd(io) args := []string{ "add", "packages", "--genesis-path", tempGenesis.Name(), t.TempDir(), // package dir - "--deployer-address", - "beep-boop", // invalid address + "--key-name", + keyname, // non-existent key name + "--gno-home", + keybaseDir, // temporaryDir for keybase + "--insecure-password-stdin", } // Run the command cmdErr := cmd.ParseAndRun(context.Background(), args) - assert.ErrorIs(t, cmdErr, errInvalidDeployerAddr) + assert.ErrorContains(t, cmdErr, "Key "+keyname+" not found") }) - t.Run("valid package", func(t *testing.T) { + t.Run("existent key wrong password", func(t *testing.T) { t.Parallel() tempGenesis, cleanup := testutils.NewTestFile(t) @@ -94,32 +113,189 @@ func TestGenesis_Txs_Add_Packages(t *testing.T) { genesis := common.GetDefaultGenesis() require.NoError(t, genesis.SaveAs(tempGenesis.Name())) + // Prepare the package + var ( + packagePath = "gno.land/p/demo/cuttlas" + dir = t.TempDir() + keybaseDir = t.TempDir() + keyname = "beep-boop" + password = "somepass" + ) + createValidFile(t, dir, packagePath) + // Create key + kb, err := keys.NewKeyBaseFromDir(keybaseDir) + require.NoError(t, err) + mnemonic, err := client.GenerateMnemonic(256) + require.NoError(t, err) + _, err = kb.CreateAccount(keyname, mnemonic, "", password+"wrong", 0, 0) + require.NoError(t, err) + + io := commands.NewTestIO() + io.SetIn( + strings.NewReader( + fmt.Sprintf( + "%s\n", + password, + ), + ), + ) + + // Create the command + cmd := NewTxsCmd(io) + args := []string{ + "add", + "packages", + "--genesis-path", + tempGenesis.Name(), + "--key-name", + keyname, // non-existent key name + "--gno-home", + keybaseDir, // temporaryDir for keybase + "--insecure-password-stdin", + dir, + } + + // Run the command + cmdErr := cmd.ParseAndRun(context.Background(), args) + assert.ErrorContains(t, cmdErr, "unable to sign txs") + }) + + t.Run("existent key correct password", func(t *testing.T) { + t.Parallel() + tempGenesis, cleanup := testutils.NewTestFile(t) + t.Cleanup(cleanup) + + genesis := common.GetDefaultGenesis() + require.NoError(t, genesis.SaveAs(tempGenesis.Name())) // Prepare the package var ( packagePath = "gno.land/p/demo/cuttlas" dir = t.TempDir() + keybaseDir = t.TempDir() + keyname = "beep-boop" + password = "somepass" ) + createValidFile(t, dir, packagePath) + // Create key + kb, err := keys.NewKeyBaseFromDir(keybaseDir) + require.NoError(t, err) + info, err := kb.CreateAccount(keyname, defaultAccount_Seed, "", password, 0, 0) + require.NoError(t, err) - createFile := func(path, data string) { - file, err := os.Create(path) - require.NoError(t, err) + io := commands.NewTestIO() + io.SetIn( + strings.NewReader( + fmt.Sprintf( + "%s\n", + password, + ), + ), + ) - _, err = file.WriteString(data) - require.NoError(t, err) + // Create the command + cmd := NewTxsCmd(io) + args := []string{ + "add", + "packages", + "--genesis-path", + tempGenesis.Name(), + "--key-name", + keyname, // non-existent key name + "--gno-home", + keybaseDir, // temporaryDir for keybase + "--insecure-password-stdin", + dir, } - // Create the gno.mod file - createFile( - filepath.Join(dir, "gno.mod"), - fmt.Sprintf("module %s\n", packagePath), + // Run the command + cmdErr := cmd.ParseAndRun(context.Background(), args) + require.NoError(t, cmdErr) + + // Validate the transactions were written down + updatedGenesis, err := types.GenesisDocFromFile(tempGenesis.Name()) + require.NoError(t, err) + require.NotNil(t, updatedGenesis.AppState) + + // Fetch the state + state := updatedGenesis.AppState.(gnoland.GnoGenesisState) + + require.Equal(t, 1, len(state.Txs)) + require.Equal(t, 1, len(state.Txs[0].Tx.Msgs)) + + msgAddPkg, ok := state.Txs[0].Tx.Msgs[0].(vmm.MsgAddPackage) + require.True(t, ok) + require.Equal(t, info.GetPubKey(), state.Txs[0].Tx.Signatures[0].PubKey) + require.Equal(t, addPkgExpectedSignature, hex.EncodeToString(state.Txs[0].Tx.Signatures[0].Signature)) + + assert.Equal(t, packagePath, msgAddPkg.Package.Path) + }) + + t.Run("ok default key", func(t *testing.T) { + t.Parallel() + + tempGenesis, cleanup := testutils.NewTestFile(t) + t.Cleanup(cleanup) + + genesis := common.GetDefaultGenesis() + require.NoError(t, genesis.SaveAs(tempGenesis.Name())) + // Prepare the package + var ( + packagePath = "gno.land/p/demo/cuttlas" + dir = t.TempDir() + keybaseDir = t.TempDir() ) + createValidFile(t, dir, packagePath) + + // Create the command + cmd := NewTxsCmd(commands.NewTestIO()) + args := []string{ + "add", + "packages", + "--genesis-path", + tempGenesis.Name(), + "--gno-home", + keybaseDir, // temporaryDir for keybase + dir, + } + + // Run the command + cmdErr := cmd.ParseAndRun(context.Background(), args) + require.NoError(t, cmdErr) + + // Validate the transactions were written down + updatedGenesis, err := types.GenesisDocFromFile(tempGenesis.Name()) + require.NoError(t, err) + require.NotNil(t, updatedGenesis.AppState) - // Create a simple main.gno - createFile( - filepath.Join(dir, "main.gno"), - "package cuttlas\n\nfunc Example() string {\nreturn \"Manos arriba!\"\n}", + // Fetch the state + state := updatedGenesis.AppState.(gnoland.GnoGenesisState) + + require.Equal(t, 1, len(state.Txs)) + require.Equal(t, 1, len(state.Txs[0].Tx.Msgs)) + + msgAddPkg, ok := state.Txs[0].Tx.Msgs[0].(vmm.MsgAddPackage) + require.True(t, ok) + require.Equal(t, defaultAccount_publicKey, state.Txs[0].Tx.Signatures[0].PubKey.String()) + require.Equal(t, addPkgExpectedSignature, hex.EncodeToString(state.Txs[0].Tx.Signatures[0].Signature)) + + assert.Equal(t, packagePath, msgAddPkg.Package.Path) + }) + + t.Run("valid package", func(t *testing.T) { + t.Parallel() + + tempGenesis, cleanup := testutils.NewTestFile(t) + t.Cleanup(cleanup) + + genesis := common.GetDefaultGenesis() + require.NoError(t, genesis.SaveAs(tempGenesis.Name())) + // Prepare the package + var ( + packagePath = "gno.land/p/demo/cuttlas" + dir = t.TempDir() ) + createValidFile(t, dir, packagePath) // Create the command cmd := NewTxsCmd(commands.NewTestIO()) @@ -148,7 +324,32 @@ func TestGenesis_Txs_Add_Packages(t *testing.T) { msgAddPkg, ok := state.Txs[0].Tx.Msgs[0].(vmm.MsgAddPackage) require.True(t, ok) + require.Equal(t, defaultAccount_publicKey, state.Txs[0].Tx.Signatures[0].PubKey.String()) + require.Equal(t, addPkgExpectedSignature, hex.EncodeToString(state.Txs[0].Tx.Signatures[0].Signature)) assert.Equal(t, packagePath, msgAddPkg.Package.Path) }) } + +func createValidFile(t *testing.T, dir string, packagePath string) { + t.Helper() + createFile := func(path, data string) { + file, err := os.Create(path) + require.NoError(t, err) + + _, err = file.WriteString(data) + require.NoError(t, err) + } + + // Create the gno.mod file + createFile( + filepath.Join(dir, "gno.mod"), + fmt.Sprintf("module %s\n", packagePath), + ) + + // Create a simple main.gno + createFile( + filepath.Join(dir, "main.gno"), + "package cuttlas\n\nfunc Example() string {\nreturn \"Manos arriba!\"\n}", + ) +} diff --git a/contribs/gnohealth/go.mod b/contribs/gnohealth/go.mod index 4f5862a0d2e..4a3f6392804 100644 --- a/contribs/gnohealth/go.mod +++ b/contribs/gnohealth/go.mod @@ -21,6 +21,7 @@ require ( github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/xid v1.6.0 // indirect + github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b // indirect github.com/stretchr/testify v1.9.0 // indirect go.opentelemetry.io/otel v1.29.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 // indirect @@ -34,6 +35,7 @@ require ( golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect golang.org/x/mod v0.20.0 // indirect golang.org/x/net v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.24.0 // indirect golang.org/x/term v0.23.0 // indirect golang.org/x/text v0.17.0 // indirect diff --git a/contribs/gnohealth/go.sum b/contribs/gnohealth/go.sum index dd287d9ca84..02e8893406a 100644 --- a/contribs/gnohealth/go.sum +++ b/contribs/gnohealth/go.sum @@ -103,6 +103,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b h1:oV47z+jotrLVvhiLRNzACVe7/qZ8DcRlMlDucR/FARo= +github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b/go.mod h1:JprPCeMgYyLKJoAy9nxpVScm7NwFSwpibdrUKm4kcw0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -149,6 +151,8 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/contribs/gnokeykc/go.mod b/contribs/gnokeykc/go.mod index 479daed22f6..157b5585828 100644 --- a/contribs/gnokeykc/go.mod +++ b/contribs/gnokeykc/go.mod @@ -35,6 +35,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/xid v1.6.0 // indirect + github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b // indirect github.com/stretchr/testify v1.9.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/zondax/hid v0.9.2 // indirect @@ -52,6 +53,7 @@ require ( golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect golang.org/x/mod v0.20.0 // indirect golang.org/x/net v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.24.0 // indirect golang.org/x/term v0.23.0 // indirect golang.org/x/text v0.17.0 // indirect diff --git a/contribs/gnokeykc/go.sum b/contribs/gnokeykc/go.sum index cacf6788d45..7aac05b84a0 100644 --- a/contribs/gnokeykc/go.sum +++ b/contribs/gnokeykc/go.sum @@ -124,6 +124,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b h1:oV47z+jotrLVvhiLRNzACVe7/qZ8DcRlMlDucR/FARo= +github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b/go.mod h1:JprPCeMgYyLKJoAy9nxpVScm7NwFSwpibdrUKm4kcw0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -180,6 +182,8 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/contribs/gnomigrate/go.mod b/contribs/gnomigrate/go.mod index cd31adc4f6f..49f40eb79af 100644 --- a/contribs/gnomigrate/go.mod +++ b/contribs/gnomigrate/go.mod @@ -29,6 +29,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.11.1 // indirect github.com/rs/xid v1.6.0 // indirect + github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect go.etcd.io/bbolt v1.3.11 // indirect go.opentelemetry.io/otel v1.29.0 // indirect @@ -44,6 +45,7 @@ require ( golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect golang.org/x/mod v0.20.0 // indirect golang.org/x/net v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.24.0 // indirect golang.org/x/term v0.23.0 // indirect golang.org/x/text v0.17.0 // indirect diff --git a/contribs/gnomigrate/go.sum b/contribs/gnomigrate/go.sum index 7ba3aede534..7e4a683cad1 100644 --- a/contribs/gnomigrate/go.sum +++ b/contribs/gnomigrate/go.sum @@ -120,6 +120,8 @@ github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b h1:oV47z+jotrLVvhiLRNzACVe7/qZ8DcRlMlDucR/FARo= +github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b/go.mod h1:JprPCeMgYyLKJoAy9nxpVScm7NwFSwpibdrUKm4kcw0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= diff --git a/examples/gno.land/p/demo/grc/grc20/token.gno b/examples/gno.land/p/demo/grc/grc20/token.gno index 3ab3abc63a3..4986eaebf04 100644 --- a/examples/gno.land/p/demo/grc/grc20/token.gno +++ b/examples/gno.land/p/demo/grc/grc20/token.gno @@ -1,7 +1,6 @@ package grc20 import ( - "math/overflow" "std" "strconv" @@ -170,17 +169,24 @@ func (led *PrivateLedger) Approve(owner, spender std.Address, amount uint64) err } // Mint increases the total supply of the token and adds the specified amount to the specified address. -func (led *PrivateLedger) Mint(address std.Address, amount uint64) error { +func (led *PrivateLedger) Mint(address std.Address, amount uint64) (err error) { if !address.IsValid() { return ErrInvalidAddress } - // XXX: math/overflow is not supporting uint64. - // This checks prevents overflow but makes the totalSupply limited to a uint63. - sum, ok := overflow.Add64(int64(led.totalSupply), int64(amount)) - if !ok { - return ErrOverflow - } + defer func() { + if r := recover(); r != nil { + if r != "addition overflow" { + panic(r) + } + err = ErrOverflow + } + }() + + // Convert amount and totalSupply to signed integers to enable + // overflow checking (not occuring on unsigned) when computing the sum. + // The maximum value for totalSupply is therefore 1<<63. + sum := int64(led.totalSupply) + int64(amount) led.totalSupply = uint64(sum) currentBalance := led.balanceOf(address) diff --git a/examples/gno.land/p/moul/fp/fp.gno b/examples/gno.land/p/moul/fp/fp.gno new file mode 100644 index 00000000000..b2811c77d5a --- /dev/null +++ b/examples/gno.land/p/moul/fp/fp.gno @@ -0,0 +1,270 @@ +// Package fp provides functional programming utilities for Gno, enabling +// transformations, filtering, and other operations on slices of interface{}. +// +// Example of chaining operations: +// +// numbers := []interface{}{1, 2, 3, 4, 5, 6} +// +// // Define predicates, mappers and reducers +// isEven := func(v interface{}) bool { return v.(int)%2 == 0 } +// double := func(v interface{}) interface{} { return v.(int) * 2 } +// sum := func(a, b interface{}) interface{} { return a.(int) + b.(int) } +// +// // Chain operations: filter even numbers, double them, then sum +// evenNums := Filter(numbers, isEven) // [2, 4, 6] +// doubled := Map(evenNums, double) // [4, 8, 12] +// result := Reduce(doubled, sum, 0) // 24 +// +// // Alternative: group by even/odd, then get even numbers +// byMod2 := func(v interface{}) interface{} { return v.(int) % 2 } +// grouped := GroupBy(numbers, byMod2) // {0: [2,4,6], 1: [1,3,5]} +// evens := grouped[0] // [2,4,6] +package fp + +// Mapper is a function type that maps an element to another element. +type Mapper func(interface{}) interface{} + +// Predicate is a function type that evaluates a condition on an element. +type Predicate func(interface{}) bool + +// Reducer is a function type that reduces two elements to a single value. +type Reducer func(interface{}, interface{}) interface{} + +// Filter filters elements from the slice that satisfy the given predicate. +// +// Example: +// +// numbers := []interface{}{-1, 0, 1, 2} +// isPositive := func(v interface{}) bool { return v.(int) > 0 } +// result := Filter(numbers, isPositive) // [1, 2] +func Filter(values []interface{}, fn Predicate) []interface{} { + result := []interface{}{} + for _, v := range values { + if fn(v) { + result = append(result, v) + } + } + return result +} + +// Map applies a function to each element in the slice. +// +// Example: +// +// numbers := []interface{}{1, 2, 3} +// toString := func(v interface{}) interface{} { return fmt.Sprintf("%d", v) } +// result := Map(numbers, toString) // ["1", "2", "3"] +func Map(values []interface{}, fn Mapper) []interface{} { + result := make([]interface{}, len(values)) + for i, v := range values { + result[i] = fn(v) + } + return result +} + +// Reduce reduces a slice to a single value by applying a function. +// +// Example: +// +// numbers := []interface{}{1, 2, 3, 4} +// sum := func(a, b interface{}) interface{} { return a.(int) + b.(int) } +// result := Reduce(numbers, sum, 0) // 10 +func Reduce(values []interface{}, fn Reducer, initial interface{}) interface{} { + acc := initial + for _, v := range values { + acc = fn(acc, v) + } + return acc +} + +// FlatMap maps each element to a collection and flattens the results. +// +// Example: +// +// words := []interface{}{"hello", "world"} +// split := func(v interface{}) interface{} { +// chars := []interface{}{} +// for _, c := range v.(string) { +// chars = append(chars, string(c)) +// } +// return chars +// } +// result := FlatMap(words, split) // ["h","e","l","l","o","w","o","r","l","d"] +func FlatMap(values []interface{}, fn Mapper) []interface{} { + result := []interface{}{} + for _, v := range values { + inner := fn(v).([]interface{}) + result = append(result, inner...) + } + return result +} + +// All returns true if all elements satisfy the predicate. +// +// Example: +// +// numbers := []interface{}{2, 4, 6, 8} +// isEven := func(v interface{}) bool { return v.(int)%2 == 0 } +// result := All(numbers, isEven) // true +func All(values []interface{}, fn Predicate) bool { + for _, v := range values { + if !fn(v) { + return false + } + } + return true +} + +// Any returns true if at least one element satisfies the predicate. +// +// Example: +// +// numbers := []interface{}{1, 3, 4, 7} +// isEven := func(v interface{}) bool { return v.(int)%2 == 0 } +// result := Any(numbers, isEven) // true (4 is even) +func Any(values []interface{}, fn Predicate) bool { + for _, v := range values { + if fn(v) { + return true + } + } + return false +} + +// None returns true if no elements satisfy the predicate. +// +// Example: +// +// numbers := []interface{}{1, 3, 5, 7} +// isEven := func(v interface{}) bool { return v.(int)%2 == 0 } +// result := None(numbers, isEven) // true (no even numbers) +func None(values []interface{}, fn Predicate) bool { + for _, v := range values { + if fn(v) { + return false + } + } + return true +} + +// Chunk splits a slice into chunks of the given size. +// +// Example: +// +// numbers := []interface{}{1, 2, 3, 4, 5} +// result := Chunk(numbers, 2) // [[1,2], [3,4], [5]] +func Chunk(values []interface{}, size int) [][]interface{} { + if size <= 0 { + return nil + } + var chunks [][]interface{} + for i := 0; i < len(values); i += size { + end := i + size + if end > len(values) { + end = len(values) + } + chunks = append(chunks, values[i:end]) + } + return chunks +} + +// Find returns the first element that satisfies the predicate and a boolean indicating if an element was found. +// +// Example: +// +// numbers := []interface{}{1, 2, 3, 4} +// isEven := func(v interface{}) bool { return v.(int)%2 == 0 } +// result, found := Find(numbers, isEven) // 2, true +func Find(values []interface{}, fn Predicate) (interface{}, bool) { + for _, v := range values { + if fn(v) { + return v, true + } + } + return nil, false +} + +// Reverse reverses the order of elements in a slice. +// +// Example: +// +// numbers := []interface{}{1, 2, 3} +// result := Reverse(numbers) // [3, 2, 1] +func Reverse(values []interface{}) []interface{} { + result := make([]interface{}, len(values)) + for i, v := range values { + result[len(values)-1-i] = v + } + return result +} + +// Zip combines two slices into a slice of pairs. If the slices have different lengths, +// extra elements from the longer slice are ignored. +// +// Example: +// +// a := []interface{}{1, 2, 3} +// b := []interface{}{"a", "b", "c"} +// result := Zip(a, b) // [[1,"a"], [2,"b"], [3,"c"]] +func Zip(a, b []interface{}) [][2]interface{} { + length := min(len(a), len(b)) + result := make([][2]interface{}, length) + for i := 0; i < length; i++ { + result[i] = [2]interface{}{a[i], b[i]} + } + return result +} + +// Unzip splits a slice of pairs into two separate slices. +// +// Example: +// +// pairs := [][2]interface{}{{1,"a"}, {2,"b"}, {3,"c"}} +// numbers, letters := Unzip(pairs) // [1,2,3], ["a","b","c"] +func Unzip(pairs [][2]interface{}) ([]interface{}, []interface{}) { + a := make([]interface{}, len(pairs)) + b := make([]interface{}, len(pairs)) + for i, pair := range pairs { + a[i] = pair[0] + b[i] = pair[1] + } + return a, b +} + +// GroupBy groups elements based on a key returned by a Mapper. +// +// Example: +// +// numbers := []interface{}{1, 2, 3, 4, 5, 6} +// byMod3 := func(v interface{}) interface{} { return v.(int) % 3 } +// result := GroupBy(numbers, byMod3) // {0: [3,6], 1: [1,4], 2: [2,5]} +func GroupBy(values []interface{}, fn Mapper) map[interface{}][]interface{} { + result := make(map[interface{}][]interface{}) + for _, v := range values { + key := fn(v) + result[key] = append(result[key], v) + } + return result +} + +// Flatten flattens a slice of slices into a single slice. +// +// Example: +// +// nested := [][]interface{}{{1,2}, {3,4}, {5}} +// result := Flatten(nested) // [1,2,3,4,5] +func Flatten(values [][]interface{}) []interface{} { + result := []interface{}{} + for _, v := range values { + result = append(result, v...) + } + return result +} + +// Helper functions +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/examples/gno.land/p/moul/fp/fp_test.gno b/examples/gno.land/p/moul/fp/fp_test.gno new file mode 100644 index 00000000000..00957486fe9 --- /dev/null +++ b/examples/gno.land/p/moul/fp/fp_test.gno @@ -0,0 +1,666 @@ +package fp + +import ( + "fmt" + "testing" +) + +func TestMap(t *testing.T) { + tests := []struct { + name string + input []interface{} + fn func(interface{}) interface{} + expected []interface{} + }{ + { + name: "multiply numbers by 2", + input: []interface{}{1, 2, 3}, + fn: func(v interface{}) interface{} { return v.(int) * 2 }, + expected: []interface{}{2, 4, 6}, + }, + { + name: "empty slice", + input: []interface{}{}, + fn: func(v interface{}) interface{} { return v.(int) * 2 }, + expected: []interface{}{}, + }, + { + name: "convert numbers to strings", + input: []interface{}{1, 2, 3}, + fn: func(v interface{}) interface{} { return fmt.Sprintf("%d", v.(int)) }, + expected: []interface{}{"1", "2", "3"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := Map(tt.input, tt.fn) + if !equalSlices(result, tt.expected) { + t.Errorf("Map failed, expected %v, got %v", tt.expected, result) + } + }) + } +} + +func TestFilter(t *testing.T) { + tests := []struct { + name string + input []interface{} + fn func(interface{}) bool + expected []interface{} + }{ + { + name: "filter even numbers", + input: []interface{}{1, 2, 3, 4}, + fn: func(v interface{}) bool { return v.(int)%2 == 0 }, + expected: []interface{}{2, 4}, + }, + { + name: "empty slice", + input: []interface{}{}, + fn: func(v interface{}) bool { return v.(int)%2 == 0 }, + expected: []interface{}{}, + }, + { + name: "no matches", + input: []interface{}{1, 3, 5}, + fn: func(v interface{}) bool { return v.(int)%2 == 0 }, + expected: []interface{}{}, + }, + { + name: "all matches", + input: []interface{}{2, 4, 6}, + fn: func(v interface{}) bool { return v.(int)%2 == 0 }, + expected: []interface{}{2, 4, 6}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := Filter(tt.input, tt.fn) + if !equalSlices(result, tt.expected) { + t.Errorf("Filter failed, expected %v, got %v", tt.expected, result) + } + }) + } +} + +func TestReduce(t *testing.T) { + tests := []struct { + name string + input []interface{} + fn func(interface{}, interface{}) interface{} + initial interface{} + expected interface{} + }{ + { + name: "sum numbers", + input: []interface{}{1, 2, 3}, + fn: func(a, b interface{}) interface{} { return a.(int) + b.(int) }, + initial: 0, + expected: 6, + }, + { + name: "empty slice", + input: []interface{}{}, + fn: func(a, b interface{}) interface{} { return a.(int) + b.(int) }, + initial: 0, + expected: 0, + }, + { + name: "concatenate strings", + input: []interface{}{"a", "b", "c"}, + fn: func(a, b interface{}) interface{} { return a.(string) + b.(string) }, + initial: "", + expected: "abc", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := Reduce(tt.input, tt.fn, tt.initial) + if result != tt.expected { + t.Errorf("Reduce failed, expected %v, got %v", tt.expected, result) + } + }) + } +} + +func TestFlatMap(t *testing.T) { + tests := []struct { + name string + input []interface{} + fn func(interface{}) interface{} + expected []interface{} + }{ + { + name: "split words into chars", + input: []interface{}{"go", "fn"}, + fn: func(word interface{}) interface{} { + chars := []interface{}{} + for _, c := range word.(string) { + chars = append(chars, string(c)) + } + return chars + }, + expected: []interface{}{"g", "o", "f", "n"}, + }, + { + name: "empty string handling", + input: []interface{}{"", "a", ""}, + fn: func(word interface{}) interface{} { + chars := []interface{}{} + for _, c := range word.(string) { + chars = append(chars, string(c)) + } + return chars + }, + expected: []interface{}{"a"}, + }, + { + name: "nil handling", + input: []interface{}{nil, "a", nil}, + fn: func(word interface{}) interface{} { + if word == nil { + return []interface{}{} + } + return []interface{}{word} + }, + expected: []interface{}{"a"}, + }, + { + name: "empty slice result", + input: []interface{}{"", "", ""}, + fn: func(word interface{}) interface{} { + return []interface{}{} + }, + expected: []interface{}{}, + }, + { + name: "nested array flattening", + input: []interface{}{1, 2, 3}, + fn: func(n interface{}) interface{} { + return []interface{}{n, n} + }, + expected: []interface{}{1, 1, 2, 2, 3, 3}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := FlatMap(tt.input, tt.fn) + if !equalSlices(result, tt.expected) { + t.Errorf("FlatMap failed, expected %v, got %v", tt.expected, result) + } + }) + } +} + +func TestAllAnyNone(t *testing.T) { + tests := []struct { + name string + input []interface{} + fn func(interface{}) bool + expectedAll bool + expectedAny bool + expectedNone bool + }{ + { + name: "all even numbers", + input: []interface{}{2, 4, 6, 8}, + fn: func(x interface{}) bool { return x.(int)%2 == 0 }, + expectedAll: true, + expectedAny: true, + expectedNone: false, + }, + { + name: "no even numbers", + input: []interface{}{1, 3, 5, 7}, + fn: func(x interface{}) bool { return x.(int)%2 == 0 }, + expectedAll: false, + expectedAny: false, + expectedNone: true, + }, + { + name: "mixed even/odd numbers", + input: []interface{}{1, 2, 3, 4}, + fn: func(x interface{}) bool { return x.(int)%2 == 0 }, + expectedAll: false, + expectedAny: true, + expectedNone: false, + }, + { + name: "empty slice", + input: []interface{}{}, + fn: func(x interface{}) bool { return x.(int)%2 == 0 }, + expectedAll: true, // vacuously true + expectedAny: false, // vacuously false + expectedNone: true, // vacuously true + }, + { + name: "nil predicate handling", + input: []interface{}{nil, nil, nil}, + fn: func(x interface{}) bool { return x == nil }, + expectedAll: true, + expectedAny: true, + expectedNone: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resultAll := All(tt.input, tt.fn) + if resultAll != tt.expectedAll { + t.Errorf("All failed, expected %v, got %v", tt.expectedAll, resultAll) + } + + resultAny := Any(tt.input, tt.fn) + if resultAny != tt.expectedAny { + t.Errorf("Any failed, expected %v, got %v", tt.expectedAny, resultAny) + } + + resultNone := None(tt.input, tt.fn) + if resultNone != tt.expectedNone { + t.Errorf("None failed, expected %v, got %v", tt.expectedNone, resultNone) + } + }) + } +} + +func TestChunk(t *testing.T) { + tests := []struct { + name string + input []interface{} + size int + expected [][]interface{} + }{ + { + name: "normal chunks", + input: []interface{}{1, 2, 3, 4, 5}, + size: 2, + expected: [][]interface{}{{1, 2}, {3, 4}, {5}}, + }, + { + name: "empty slice", + input: []interface{}{}, + size: 2, + expected: [][]interface{}{}, + }, + { + name: "chunk size equals length", + input: []interface{}{1, 2, 3}, + size: 3, + expected: [][]interface{}{{1, 2, 3}}, + }, + { + name: "chunk size larger than length", + input: []interface{}{1, 2}, + size: 3, + expected: [][]interface{}{{1, 2}}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := Chunk(tt.input, tt.size) + if !equalNestedSlices(result, tt.expected) { + t.Errorf("Chunk failed, expected %v, got %v", tt.expected, result) + } + }) + } +} + +func TestFind(t *testing.T) { + tests := []struct { + name string + input []interface{} + fn func(interface{}) bool + expected interface{} + shouldFound bool + }{ + { + name: "find first number greater than 2", + input: []interface{}{1, 2, 3, 4}, + fn: func(v interface{}) bool { return v.(int) > 2 }, + expected: 3, + shouldFound: true, + }, + { + name: "empty slice", + input: []interface{}{}, + fn: func(v interface{}) bool { return v.(int) > 2 }, + expected: nil, + shouldFound: false, + }, + { + name: "no match", + input: []interface{}{1, 2}, + fn: func(v interface{}) bool { return v.(int) > 10 }, + expected: nil, + shouldFound: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, found := Find(tt.input, tt.fn) + if found != tt.shouldFound { + t.Errorf("Find failed, expected found=%v, got found=%v", tt.shouldFound, found) + } + if found && result != tt.expected { + t.Errorf("Find failed, expected %v, got %v", tt.expected, result) + } + }) + } +} + +func TestReverse(t *testing.T) { + tests := []struct { + name string + input []interface{} + expected []interface{} + }{ + { + name: "normal sequence", + input: []interface{}{1, 2, 3, 4}, + expected: []interface{}{4, 3, 2, 1}, + }, + { + name: "empty slice", + input: []interface{}{}, + expected: []interface{}{}, + }, + { + name: "single element", + input: []interface{}{1}, + expected: []interface{}{1}, + }, + { + name: "mixed types", + input: []interface{}{1, "a", true, 2.5}, + expected: []interface{}{2.5, true, "a", 1}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := Reverse(tt.input) + if !equalSlices(result, tt.expected) { + t.Errorf("Reverse failed, expected %v, got %v", tt.expected, result) + } + }) + } +} + +func TestZipUnzip(t *testing.T) { + tests := []struct { + name string + a []interface{} + b []interface{} + expectedZip [][2]interface{} + expectedA []interface{} + expectedB []interface{} + }{ + { + name: "normal case", + a: []interface{}{1, 2, 3}, + b: []interface{}{"a", "b", "c"}, + expectedZip: [][2]interface{}{{1, "a"}, {2, "b"}, {3, "c"}}, + expectedA: []interface{}{1, 2, 3}, + expectedB: []interface{}{"a", "b", "c"}, + }, + { + name: "empty slices", + a: []interface{}{}, + b: []interface{}{}, + expectedZip: [][2]interface{}{}, + expectedA: []interface{}{}, + expectedB: []interface{}{}, + }, + { + name: "different lengths - a shorter", + a: []interface{}{1, 2}, + b: []interface{}{"a", "b", "c"}, + expectedZip: [][2]interface{}{{1, "a"}, {2, "b"}}, + expectedA: []interface{}{1, 2}, + expectedB: []interface{}{"a", "b"}, + }, + { + name: "different lengths - b shorter", + a: []interface{}{1, 2, 3}, + b: []interface{}{"a"}, + expectedZip: [][2]interface{}{{1, "a"}}, + expectedA: []interface{}{1}, + expectedB: []interface{}{"a"}, + }, + { + name: "mixed types", + a: []interface{}{1, true, "x"}, + b: []interface{}{2.5, false, "y"}, + expectedZip: [][2]interface{}{{1, 2.5}, {true, false}, {"x", "y"}}, + expectedA: []interface{}{1, true, "x"}, + expectedB: []interface{}{2.5, false, "y"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + zipped := Zip(tt.a, tt.b) + if len(zipped) != len(tt.expectedZip) { + t.Errorf("Zip failed, expected length %v, got %v", len(tt.expectedZip), len(zipped)) + } + for i, pair := range zipped { + if pair[0] != tt.expectedZip[i][0] || pair[1] != tt.expectedZip[i][1] { + t.Errorf("Zip failed at index %d, expected %v, got %v", i, tt.expectedZip[i], pair) + } + } + + unzippedA, unzippedB := Unzip(zipped) + if !equalSlices(unzippedA, tt.expectedA) { + t.Errorf("Unzip failed for slice A, expected %v, got %v", tt.expectedA, unzippedA) + } + if !equalSlices(unzippedB, tt.expectedB) { + t.Errorf("Unzip failed for slice B, expected %v, got %v", tt.expectedB, unzippedB) + } + }) + } +} + +func TestGroupBy(t *testing.T) { + tests := []struct { + name string + input []interface{} + fn func(interface{}) interface{} + expected map[interface{}][]interface{} + }{ + { + name: "group by even/odd", + input: []interface{}{1, 2, 3, 4, 5, 6}, + fn: func(v interface{}) interface{} { return v.(int) % 2 }, + expected: map[interface{}][]interface{}{ + 0: {2, 4, 6}, + 1: {1, 3, 5}, + }, + }, + { + name: "empty slice", + input: []interface{}{}, + fn: func(v interface{}) interface{} { return v.(int) % 2 }, + expected: map[interface{}][]interface{}{}, + }, + { + name: "single group", + input: []interface{}{2, 4, 6}, + fn: func(v interface{}) interface{} { return v.(int) % 2 }, + expected: map[interface{}][]interface{}{ + 0: {2, 4, 6}, + }, + }, + { + name: "group by type", + input: []interface{}{1, "a", 2, "b", true}, + fn: func(v interface{}) interface{} { + switch v.(type) { + case int: + return "int" + case string: + return "string" + default: + return "other" + } + }, + expected: map[interface{}][]interface{}{ + "int": {1, 2}, + "string": {"a", "b"}, + "other": {true}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GroupBy(tt.input, tt.fn) + if len(result) != len(tt.expected) { + t.Errorf("GroupBy failed, expected %d groups, got %d", len(tt.expected), len(result)) + } + for k, v := range tt.expected { + if !equalSlices(result[k], v) { + t.Errorf("GroupBy failed for key %v, expected %v, got %v", k, v, result[k]) + } + } + }) + } +} + +func TestFlatten(t *testing.T) { + tests := []struct { + name string + input [][]interface{} + expected []interface{} + }{ + { + name: "normal nested slices", + input: [][]interface{}{{1, 2}, {3, 4}, {5}}, + expected: []interface{}{1, 2, 3, 4, 5}, + }, + { + name: "empty outer slice", + input: [][]interface{}{}, + expected: []interface{}{}, + }, + { + name: "empty inner slices", + input: [][]interface{}{{}, {}, {}}, + expected: []interface{}{}, + }, + { + name: "mixed types", + input: [][]interface{}{{1, "a"}, {true, 2.5}, {nil}}, + expected: []interface{}{1, "a", true, 2.5, nil}, + }, + { + name: "single element slices", + input: [][]interface{}{{1}, {2}, {3}}, + expected: []interface{}{1, 2, 3}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := Flatten(tt.input) + if !equalSlices(result, tt.expected) { + t.Errorf("Flatten failed, expected %v, got %v", tt.expected, result) + } + }) + } +} + +func TestContains(t *testing.T) { + tests := []struct { + name string + slice []interface{} + item interface{} + expected bool + }{ + { + name: "contains integer", + slice: []interface{}{1, 2, 3}, + item: 2, + expected: true, + }, + { + name: "does not contain integer", + slice: []interface{}{1, 2, 3}, + item: 4, + expected: false, + }, + { + name: "contains string", + slice: []interface{}{"a", "b", "c"}, + item: "b", + expected: true, + }, + { + name: "empty slice", + slice: []interface{}{}, + item: 1, + expected: false, + }, + { + name: "contains nil", + slice: []interface{}{1, nil, 3}, + item: nil, + expected: true, + }, + { + name: "mixed types", + slice: []interface{}{1, "a", true}, + item: true, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := contains(tt.slice, tt.item) + if result != tt.expected { + t.Errorf("contains failed, expected %v, got %v", tt.expected, result) + } + }) + } +} + +// Helper function for testing +func contains(slice []interface{}, item interface{}) bool { + for _, v := range slice { + if v == item { + return true + } + } + return false +} + +// Helper functions for comparing slices +func equalSlices(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +func equalNestedSlices(a, b [][]interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !equalSlices(a[i], b[i]) { + return false + } + } + return true +} diff --git a/examples/gno.land/p/moul/fp/gno.mod b/examples/gno.land/p/moul/fp/gno.mod new file mode 100644 index 00000000000..905fa0f1c0e --- /dev/null +++ b/examples/gno.land/p/moul/fp/gno.mod @@ -0,0 +1 @@ +module gno.land/p/moul/fp diff --git a/examples/gno.land/p/moul/memo/gno.mod b/examples/gno.land/p/moul/memo/gno.mod new file mode 100644 index 00000000000..4a9948c30f7 --- /dev/null +++ b/examples/gno.land/p/moul/memo/gno.mod @@ -0,0 +1 @@ +module gno.land/p/moul/memo diff --git a/examples/gno.land/p/moul/memo/memo.gno b/examples/gno.land/p/moul/memo/memo.gno new file mode 100644 index 00000000000..e31f13aab15 --- /dev/null +++ b/examples/gno.land/p/moul/memo/memo.gno @@ -0,0 +1,134 @@ +// Package memo provides a simple memoization utility to cache function results. +// +// The package offers a Memoizer type that can cache function results based on keys, +// with optional validation of cached values. This is useful for expensive computations +// that need to be cached and potentially invalidated based on custom conditions. +// +// /!\ Important Warning for Gno Usage: +// In Gno, storage updates only persist during transactions. This means: +// - Cache entries created during queries will NOT persist +// - Creating cache entries during queries will actually decrease performance +// as it wastes resources trying to save data that won't be saved +// +// Best Practices: +// - Use this pattern in transaction-driven contexts rather than query/render scenarios +// - Consider controlled cache updates, e.g., by specific accounts (like oracles) +// - Ideal for cases where cache updates happen every N blocks or on specific events +// - Carefully evaluate if caching will actually improve performance in your use case +// +// Basic usage example: +// +// m := memo.New() +// +// // Cache expensive computation +// result := m.Memoize("key", func() interface{} { +// // expensive operation +// return "computed-value" +// }) +// +// // Subsequent calls with same key return cached result +// result = m.Memoize("key", func() interface{} { +// // function won't be called, cached value is returned +// return "computed-value" +// }) +// +// Example with validation: +// +// type TimestampedValue struct { +// Value string +// Timestamp time.Time +// } +// +// m := memo.New() +// +// // Cache value with timestamp +// result := m.MemoizeWithValidator( +// "key", +// func() interface{} { +// return TimestampedValue{ +// Value: "data", +// Timestamp: time.Now(), +// } +// }, +// func(cached interface{}) bool { +// // Validate that the cached value is not older than 1 hour +// if tv, ok := cached.(TimestampedValue); ok { +// return time.Since(tv.Timestamp) < time.Hour +// } +// return false +// }, +// ) +package memo + +import ( + "gno.land/p/demo/btree" + "gno.land/p/demo/ufmt" +) + +// Record implements the btree.Record interface for our cache entries +type cacheEntry struct { + key interface{} + value interface{} +} + +// Less implements btree.Record interface +func (e cacheEntry) Less(than btree.Record) bool { + // Convert the other record to cacheEntry + other := than.(cacheEntry) + // Compare string representations of keys for consistent ordering + return ufmt.Sprintf("%v", e.key) < ufmt.Sprintf("%v", other.key) +} + +// Memoizer is a structure to handle memoization of function results. +type Memoizer struct { + cache *btree.BTree +} + +// New creates a new Memoizer instance. +func New() *Memoizer { + return &Memoizer{ + cache: btree.New(), + } +} + +// Memoize ensures the result of the given function is cached for the specified key. +func (m *Memoizer) Memoize(key interface{}, fn func() interface{}) interface{} { + entry := cacheEntry{key: key} + if found := m.cache.Get(entry); found != nil { + return found.(cacheEntry).value + } + + value := fn() + m.cache.Insert(cacheEntry{key: key, value: value}) + return value +} + +// MemoizeWithValidator ensures the result is cached and valid according to the validator function. +func (m *Memoizer) MemoizeWithValidator(key interface{}, fn func() interface{}, isValid func(interface{}) bool) interface{} { + entry := cacheEntry{key: key} + if found := m.cache.Get(entry); found != nil { + cachedEntry := found.(cacheEntry) + if isValid(cachedEntry.value) { + return cachedEntry.value + } + } + + value := fn() + m.cache.Insert(cacheEntry{key: key, value: value}) + return value +} + +// Invalidate removes the cached value for the specified key. +func (m *Memoizer) Invalidate(key interface{}) { + m.cache.Delete(cacheEntry{key: key}) +} + +// Clear clears all cached values. +func (m *Memoizer) Clear() { + m.cache.Clear(true) +} + +// Size returns the number of items currently in the cache. +func (m *Memoizer) Size() int { + return m.cache.Len() +} diff --git a/examples/gno.land/p/moul/memo/memo_test.gno b/examples/gno.land/p/moul/memo/memo_test.gno new file mode 100644 index 00000000000..44dde5df640 --- /dev/null +++ b/examples/gno.land/p/moul/memo/memo_test.gno @@ -0,0 +1,449 @@ +package memo + +import ( + "std" + "testing" + "time" +) + +type timestampedValue struct { + value interface{} + timestamp time.Time +} + +// complexKey is used to test struct keys +type complexKey struct { + ID int + Name string +} + +func TestMemoize(t *testing.T) { + tests := []struct { + name string + key interface{} + value interface{} + callCount *int + }{ + { + name: "string key and value", + key: "test-key", + value: "test-value", + callCount: new(int), + }, + { + name: "int key and value", + key: 42, + value: 123, + callCount: new(int), + }, + { + name: "mixed types", + key: "number", + value: 42, + callCount: new(int), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := New() + if m.Size() != 0 { + t.Errorf("Initial size = %d, want 0", m.Size()) + } + + fn := func() interface{} { + *tt.callCount++ + return tt.value + } + + // First call should compute + result := m.Memoize(tt.key, fn) + if result != tt.value { + t.Errorf("Memoize() = %v, want %v", result, tt.value) + } + if *tt.callCount != 1 { + t.Errorf("Function called %d times, want 1", *tt.callCount) + } + if m.Size() != 1 { + t.Errorf("Size after first call = %d, want 1", m.Size()) + } + + // Second call should use cache + result = m.Memoize(tt.key, fn) + if result != tt.value { + t.Errorf("Memoize() second call = %v, want %v", result, tt.value) + } + if *tt.callCount != 1 { + t.Errorf("Function called %d times, want 1", *tt.callCount) + } + if m.Size() != 1 { + t.Errorf("Size after second call = %d, want 1", m.Size()) + } + }) + } +} + +func TestMemoizeWithValidator(t *testing.T) { + tests := []struct { + name string + key interface{} + value interface{} + validDuration time.Duration + waitDuration time.Duration + expectedCalls int + shouldRecompute bool + }{ + { + name: "valid cache", + key: "key1", + value: "value1", + validDuration: time.Hour, + waitDuration: time.Millisecond, + expectedCalls: 1, + shouldRecompute: false, + }, + { + name: "expired cache", + key: "key2", + value: "value2", + validDuration: time.Millisecond, + waitDuration: time.Millisecond * 2, + expectedCalls: 2, + shouldRecompute: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := New() + callCount := 0 + + fn := func() interface{} { + callCount++ + return timestampedValue{ + value: tt.value, + timestamp: time.Now(), + } + } + + isValid := func(cached interface{}) bool { + if tv, ok := cached.(timestampedValue); ok { + return time.Since(tv.timestamp) < tt.validDuration + } + return false + } + + // First call + result := m.MemoizeWithValidator(tt.key, fn, isValid) + if tv, ok := result.(timestampedValue); !ok || tv.value != tt.value { + t.Errorf("MemoizeWithValidator() = %v, want value %v", result, tt.value) + } + + // Wait + std.TestSkipHeights(10) + + // Second call + result = m.MemoizeWithValidator(tt.key, fn, isValid) + if tv, ok := result.(timestampedValue); !ok || tv.value != tt.value { + t.Errorf("MemoizeWithValidator() second call = %v, want value %v", result, tt.value) + } + + if callCount != tt.expectedCalls { + t.Errorf("Function called %d times, want %d", callCount, tt.expectedCalls) + } + }) + } +} + +func TestInvalidate(t *testing.T) { + tests := []struct { + name string + key interface{} + value interface{} + callCount *int + }{ + { + name: "invalidate existing key", + key: "test-key", + value: "test-value", + callCount: new(int), + }, + { + name: "invalidate non-existing key", + key: "missing-key", + value: "test-value", + callCount: new(int), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := New() + fn := func() interface{} { + *tt.callCount++ + return tt.value + } + + // First call + m.Memoize(tt.key, fn) + if m.Size() != 1 { + t.Errorf("Size after first call = %d, want 1", m.Size()) + } + + // Invalidate + m.Invalidate(tt.key) + if m.Size() != 0 { + t.Errorf("Size after invalidate = %d, want 0", m.Size()) + } + + // Call again should recompute + result := m.Memoize(tt.key, fn) + if result != tt.value { + t.Errorf("Memoize() after invalidate = %v, want %v", result, tt.value) + } + if *tt.callCount != 2 { + t.Errorf("Function called %d times, want 2", *tt.callCount) + } + if m.Size() != 1 { + t.Errorf("Size after recompute = %d, want 1", m.Size()) + } + }) + } +} + +func TestClear(t *testing.T) { + m := New() + callCount := 0 + + fn := func() interface{} { + callCount++ + return "value" + } + + // Cache some values + m.Memoize("key1", fn) + m.Memoize("key2", fn) + + if callCount != 2 { + t.Errorf("Initial calls = %d, want 2", callCount) + } + if m.Size() != 2 { + t.Errorf("Size after initial calls = %d, want 2", m.Size()) + } + + // Clear cache + m.Clear() + if m.Size() != 0 { + t.Errorf("Size after clear = %d, want 0", m.Size()) + } + + // Recompute values + m.Memoize("key1", fn) + m.Memoize("key2", fn) + + if callCount != 4 { + t.Errorf("Calls after clear = %d, want 4", callCount) + } + if m.Size() != 2 { + t.Errorf("Size after recompute = %d, want 2", m.Size()) + } +} + +func TestSize(t *testing.T) { + m := New() + + if m.Size() != 0 { + t.Errorf("Initial size = %d, want 0", m.Size()) + } + + callCount := 0 + fn := func() interface{} { + callCount++ + return "value" + } + + // Add items + m.Memoize("key1", fn) + if m.Size() != 1 { + t.Errorf("Size after first insert = %d, want 1", m.Size()) + } + + m.Memoize("key2", fn) + if m.Size() != 2 { + t.Errorf("Size after second insert = %d, want 2", m.Size()) + } + + // Duplicate key should not increase size + m.Memoize("key1", fn) + if m.Size() != 2 { + t.Errorf("Size after duplicate insert = %d, want 2", m.Size()) + } + + // Remove item + m.Invalidate("key1") + if m.Size() != 1 { + t.Errorf("Size after invalidate = %d, want 1", m.Size()) + } + + // Clear all + m.Clear() + if m.Size() != 0 { + t.Errorf("Size after clear = %d, want 0", m.Size()) + } +} + +func TestMemoizeWithDifferentKeyTypes(t *testing.T) { + tests := []struct { + name string + keys []interface{} // Now an array of keys + values []string // Corresponding values + callCount *int + }{ + { + name: "integer keys", + keys: []interface{}{42, 43}, + values: []string{"value-for-42", "value-for-43"}, + callCount: new(int), + }, + { + name: "float keys", + keys: []interface{}{3.14, 2.718}, + values: []string{"value-for-pi", "value-for-e"}, + callCount: new(int), + }, + { + name: "bool keys", + keys: []interface{}{true, false}, + values: []string{"value-for-true", "value-for-false"}, + callCount: new(int), + }, + /* + { + name: "struct keys", + keys: []interface{}{ + complexKey{ID: 1, Name: "test1"}, + complexKey{ID: 2, Name: "test2"}, + }, + values: []string{"value-for-struct1", "value-for-struct2"}, + callCount: new(int), + }, + { + name: "nil and empty interface keys", + keys: []interface{}{nil, interface{}(nil)}, + values: []string{"value-for-nil", "value-for-empty-interface"}, + callCount: new(int), + }, + */ + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := New() + + // Test both keys + for i, key := range tt.keys { + value := tt.values[i] + fn := func() interface{} { + *tt.callCount++ + return value + } + + // First call should compute + result := m.Memoize(key, fn) + if result != value { + t.Errorf("Memoize() for key %v = %v, want %v", key, result, value) + } + if *tt.callCount != i+1 { + t.Errorf("Function called %d times, want %d", *tt.callCount, i+1) + } + } + + // Verify size includes both entries + if m.Size() != 2 { + t.Errorf("Size after both inserts = %d, want 2", m.Size()) + } + + // Second call for each key should use cache + for i, key := range tt.keys { + initialCount := *tt.callCount + result := m.Memoize(key, func() interface{} { + *tt.callCount++ + return "should-not-be-called" + }) + + if result != tt.values[i] { + t.Errorf("Memoize() second call for key %v = %v, want %v", key, result, tt.values[i]) + } + if *tt.callCount != initialCount { + t.Errorf("Cache miss for key %v", key) + } + } + + // Test invalidate for each key + for i, key := range tt.keys { + m.Invalidate(key) + if m.Size() != 1-i { + t.Errorf("Size after invalidate %d = %d, want %d", i+1, m.Size(), 1-i) + } + } + }) + } +} + +func TestMultipleKeyTypes(t *testing.T) { + m := New() + callCount := 0 + + // Insert different key types simultaneously (two of each type) + keys := []interface{}{ + 42, 43, // ints + "string-key1", "string-key2", // strings + 3.14, 2.718, // floats + true, false, // bools + } + + for i, key := range keys { + value := i + m.Memoize(key, func() interface{} { + callCount++ + return value + }) + } + + // Verify size includes all entries + if m.Size() != len(keys) { + t.Errorf("Size = %d, want %d", m.Size(), len(keys)) + } + + // Verify all values are cached correctly + for i, key := range keys { + initialCount := callCount + result := m.Memoize(key, func() interface{} { + callCount++ + return -1 // Should never be returned if cache works + }) + + if result != i { + t.Errorf("Memoize(%v) = %v, want %v", key, result, i) + } + if callCount != initialCount { + t.Errorf("Cache miss for key %v", key) + } + } + + // Test invalidation of pairs + for i := 0; i < len(keys); i += 2 { + m.Invalidate(keys[i]) + m.Invalidate(keys[i+1]) + expectedSize := len(keys) - (i + 2) + if m.Size() != expectedSize { + t.Errorf("Size after invalidating pair %d = %d, want %d", i/2, m.Size(), expectedSize) + } + } + + // Clear and verify + m.Clear() + if m.Size() != 0 { + t.Errorf("Size after clear = %d, want 0", m.Size()) + } +} diff --git a/examples/gno.land/p/moul/xmath/generate.go b/examples/gno.land/p/moul/xmath/generate.go new file mode 100644 index 00000000000..ad70adb06bd --- /dev/null +++ b/examples/gno.land/p/moul/xmath/generate.go @@ -0,0 +1,3 @@ +package xmath + +//go:generate go run generator.go diff --git a/examples/gno.land/p/moul/xmath/generator.go b/examples/gno.land/p/moul/xmath/generator.go new file mode 100644 index 00000000000..afe5a4341fa --- /dev/null +++ b/examples/gno.land/p/moul/xmath/generator.go @@ -0,0 +1,184 @@ +//go:build ignore + +package main + +import ( + "bytes" + "fmt" + "go/format" + "log" + "os" + "strings" + "text/template" +) + +type Type struct { + Name string + ZeroValue string + Signed bool + Float bool +} + +var types = []Type{ + {"Int8", "0", true, false}, + {"Int16", "0", true, false}, + {"Int32", "0", true, false}, + {"Int64", "0", true, false}, + {"Int", "0", true, false}, + {"Uint8", "0", false, false}, + {"Uint16", "0", false, false}, + {"Uint32", "0", false, false}, + {"Uint64", "0", false, false}, + {"Uint", "0", false, false}, + {"Float32", "0.0", true, true}, + {"Float64", "0.0", true, true}, +} + +const sourceTpl = `// Code generated by generator.go; DO NOT EDIT. +package xmath + +{{ range .Types }} +// {{.Name}} helpers +func Max{{.Name}}(a, b {{.Name | lower}}) {{.Name | lower}} { + if a > b { + return a + } + return b +} + +func Min{{.Name}}(a, b {{.Name | lower}}) {{.Name | lower}} { + if a < b { + return a + } + return b +} + +func Clamp{{.Name}}(value, min, max {{.Name | lower}}) {{.Name | lower}} { + if value < min { + return min + } + if value > max { + return max + } + return value +} +{{if .Signed}} +func Abs{{.Name}}(x {{.Name | lower}}) {{.Name | lower}} { + if x < 0 { + return -x + } + return x +} + +func Sign{{.Name}}(x {{.Name | lower}}) {{.Name | lower}} { + if x < 0 { + return -1 + } + if x > 0 { + return 1 + } + return 0 +} +{{end}} +{{end}} +` + +const testTpl = `package xmath + +import "testing" + +{{range .Types}} +func Test{{.Name}}Helpers(t *testing.T) { + // Test Max{{.Name}} + if Max{{.Name}}(1, 2) != 2 { + t.Error("Max{{.Name}}(1, 2) should be 2") + } + {{if .Signed}}if Max{{.Name}}(-1, -2) != -1 { + t.Error("Max{{.Name}}(-1, -2) should be -1") + }{{end}} + + // Test Min{{.Name}} + if Min{{.Name}}(1, 2) != 1 { + t.Error("Min{{.Name}}(1, 2) should be 1") + } + {{if .Signed}}if Min{{.Name}}(-1, -2) != -2 { + t.Error("Min{{.Name}}(-1, -2) should be -2") + }{{end}} + + // Test Clamp{{.Name}} + if Clamp{{.Name}}(5, 1, 3) != 3 { + t.Error("Clamp{{.Name}}(5, 1, 3) should be 3") + } + if Clamp{{.Name}}(0, 1, 3) != 1 { + t.Error("Clamp{{.Name}}(0, 1, 3) should be 1") + } + if Clamp{{.Name}}(2, 1, 3) != 2 { + t.Error("Clamp{{.Name}}(2, 1, 3) should be 2") + } + {{if .Signed}} + // Test Abs{{.Name}} + if Abs{{.Name}}(-5) != 5 { + t.Error("Abs{{.Name}}(-5) should be 5") + } + if Abs{{.Name}}(5) != 5 { + t.Error("Abs{{.Name}}(5) should be 5") + } + + // Test Sign{{.Name}} + if Sign{{.Name}}(-5) != -1 { + t.Error("Sign{{.Name}}(-5) should be -1") + } + if Sign{{.Name}}(5) != 1 { + t.Error("Sign{{.Name}}(5) should be 1") + } + if Sign{{.Name}}({{.ZeroValue}}) != 0 { + t.Error("Sign{{.Name}}({{.ZeroValue}}) should be 0") + } + {{end}} +} +{{end}} +` + +func main() { + funcMap := template.FuncMap{ + "lower": strings.ToLower, + } + + // Generate source file + sourceTmpl := template.Must(template.New("source").Funcs(funcMap).Parse(sourceTpl)) + var sourceOut bytes.Buffer + if err := sourceTmpl.Execute(&sourceOut, struct{ Types []Type }{types}); err != nil { + log.Fatal(err) + } + + // Format the generated code + formattedSource, err := format.Source(sourceOut.Bytes()) + if err != nil { + log.Fatal(err) + } + + // Write source file + if err := os.WriteFile("xmath.gen.gno", formattedSource, 0644); err != nil { + log.Fatal(err) + } + + // Generate test file + testTmpl := template.Must(template.New("test").Parse(testTpl)) + var testOut bytes.Buffer + if err := testTmpl.Execute(&testOut, struct{ Types []Type }{types}); err != nil { + log.Fatal(err) + } + + // Format the generated test code + formattedTest, err := format.Source(testOut.Bytes()) + if err != nil { + log.Fatal(err) + } + + // Write test file + if err := os.WriteFile("xmath.gen_test.gno", formattedTest, 0644); err != nil { + log.Fatal(err) + } + + fmt.Println("Generated xmath.gen.gno and xmath.gen_test.gno") +} diff --git a/examples/gno.land/p/moul/xmath/gno.mod b/examples/gno.land/p/moul/xmath/gno.mod new file mode 100644 index 00000000000..63b782c88f2 --- /dev/null +++ b/examples/gno.land/p/moul/xmath/gno.mod @@ -0,0 +1 @@ +module gno.land/p/moul/xmath diff --git a/examples/gno.land/p/moul/xmath/xmath.gen.gno b/examples/gno.land/p/moul/xmath/xmath.gen.gno new file mode 100644 index 00000000000..266c77e1e84 --- /dev/null +++ b/examples/gno.land/p/moul/xmath/xmath.gen.gno @@ -0,0 +1,421 @@ +// Code generated by generator.go; DO NOT EDIT. +package xmath + +// Int8 helpers +func MaxInt8(a, b int8) int8 { + if a > b { + return a + } + return b +} + +func MinInt8(a, b int8) int8 { + if a < b { + return a + } + return b +} + +func ClampInt8(value, min, max int8) int8 { + if value < min { + return min + } + if value > max { + return max + } + return value +} + +func AbsInt8(x int8) int8 { + if x < 0 { + return -x + } + return x +} + +func SignInt8(x int8) int8 { + if x < 0 { + return -1 + } + if x > 0 { + return 1 + } + return 0 +} + +// Int16 helpers +func MaxInt16(a, b int16) int16 { + if a > b { + return a + } + return b +} + +func MinInt16(a, b int16) int16 { + if a < b { + return a + } + return b +} + +func ClampInt16(value, min, max int16) int16 { + if value < min { + return min + } + if value > max { + return max + } + return value +} + +func AbsInt16(x int16) int16 { + if x < 0 { + return -x + } + return x +} + +func SignInt16(x int16) int16 { + if x < 0 { + return -1 + } + if x > 0 { + return 1 + } + return 0 +} + +// Int32 helpers +func MaxInt32(a, b int32) int32 { + if a > b { + return a + } + return b +} + +func MinInt32(a, b int32) int32 { + if a < b { + return a + } + return b +} + +func ClampInt32(value, min, max int32) int32 { + if value < min { + return min + } + if value > max { + return max + } + return value +} + +func AbsInt32(x int32) int32 { + if x < 0 { + return -x + } + return x +} + +func SignInt32(x int32) int32 { + if x < 0 { + return -1 + } + if x > 0 { + return 1 + } + return 0 +} + +// Int64 helpers +func MaxInt64(a, b int64) int64 { + if a > b { + return a + } + return b +} + +func MinInt64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func ClampInt64(value, min, max int64) int64 { + if value < min { + return min + } + if value > max { + return max + } + return value +} + +func AbsInt64(x int64) int64 { + if x < 0 { + return -x + } + return x +} + +func SignInt64(x int64) int64 { + if x < 0 { + return -1 + } + if x > 0 { + return 1 + } + return 0 +} + +// Int helpers +func MaxInt(a, b int) int { + if a > b { + return a + } + return b +} + +func MinInt(a, b int) int { + if a < b { + return a + } + return b +} + +func ClampInt(value, min, max int) int { + if value < min { + return min + } + if value > max { + return max + } + return value +} + +func AbsInt(x int) int { + if x < 0 { + return -x + } + return x +} + +func SignInt(x int) int { + if x < 0 { + return -1 + } + if x > 0 { + return 1 + } + return 0 +} + +// Uint8 helpers +func MaxUint8(a, b uint8) uint8 { + if a > b { + return a + } + return b +} + +func MinUint8(a, b uint8) uint8 { + if a < b { + return a + } + return b +} + +func ClampUint8(value, min, max uint8) uint8 { + if value < min { + return min + } + if value > max { + return max + } + return value +} + +// Uint16 helpers +func MaxUint16(a, b uint16) uint16 { + if a > b { + return a + } + return b +} + +func MinUint16(a, b uint16) uint16 { + if a < b { + return a + } + return b +} + +func ClampUint16(value, min, max uint16) uint16 { + if value < min { + return min + } + if value > max { + return max + } + return value +} + +// Uint32 helpers +func MaxUint32(a, b uint32) uint32 { + if a > b { + return a + } + return b +} + +func MinUint32(a, b uint32) uint32 { + if a < b { + return a + } + return b +} + +func ClampUint32(value, min, max uint32) uint32 { + if value < min { + return min + } + if value > max { + return max + } + return value +} + +// Uint64 helpers +func MaxUint64(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func MinUint64(a, b uint64) uint64 { + if a < b { + return a + } + return b +} + +func ClampUint64(value, min, max uint64) uint64 { + if value < min { + return min + } + if value > max { + return max + } + return value +} + +// Uint helpers +func MaxUint(a, b uint) uint { + if a > b { + return a + } + return b +} + +func MinUint(a, b uint) uint { + if a < b { + return a + } + return b +} + +func ClampUint(value, min, max uint) uint { + if value < min { + return min + } + if value > max { + return max + } + return value +} + +// Float32 helpers +func MaxFloat32(a, b float32) float32 { + if a > b { + return a + } + return b +} + +func MinFloat32(a, b float32) float32 { + if a < b { + return a + } + return b +} + +func ClampFloat32(value, min, max float32) float32 { + if value < min { + return min + } + if value > max { + return max + } + return value +} + +func AbsFloat32(x float32) float32 { + if x < 0 { + return -x + } + return x +} + +func SignFloat32(x float32) float32 { + if x < 0 { + return -1 + } + if x > 0 { + return 1 + } + return 0 +} + +// Float64 helpers +func MaxFloat64(a, b float64) float64 { + if a > b { + return a + } + return b +} + +func MinFloat64(a, b float64) float64 { + if a < b { + return a + } + return b +} + +func ClampFloat64(value, min, max float64) float64 { + if value < min { + return min + } + if value > max { + return max + } + return value +} + +func AbsFloat64(x float64) float64 { + if x < 0 { + return -x + } + return x +} + +func SignFloat64(x float64) float64 { + if x < 0 { + return -1 + } + if x > 0 { + return 1 + } + return 0 +} diff --git a/examples/gno.land/p/moul/xmath/xmath.gen_test.gno b/examples/gno.land/p/moul/xmath/xmath.gen_test.gno new file mode 100644 index 00000000000..16c80fc983d --- /dev/null +++ b/examples/gno.land/p/moul/xmath/xmath.gen_test.gno @@ -0,0 +1,466 @@ +package xmath + +import "testing" + +func TestInt8Helpers(t *testing.T) { + // Test MaxInt8 + if MaxInt8(1, 2) != 2 { + t.Error("MaxInt8(1, 2) should be 2") + } + if MaxInt8(-1, -2) != -1 { + t.Error("MaxInt8(-1, -2) should be -1") + } + + // Test MinInt8 + if MinInt8(1, 2) != 1 { + t.Error("MinInt8(1, 2) should be 1") + } + if MinInt8(-1, -2) != -2 { + t.Error("MinInt8(-1, -2) should be -2") + } + + // Test ClampInt8 + if ClampInt8(5, 1, 3) != 3 { + t.Error("ClampInt8(5, 1, 3) should be 3") + } + if ClampInt8(0, 1, 3) != 1 { + t.Error("ClampInt8(0, 1, 3) should be 1") + } + if ClampInt8(2, 1, 3) != 2 { + t.Error("ClampInt8(2, 1, 3) should be 2") + } + + // Test AbsInt8 + if AbsInt8(-5) != 5 { + t.Error("AbsInt8(-5) should be 5") + } + if AbsInt8(5) != 5 { + t.Error("AbsInt8(5) should be 5") + } + + // Test SignInt8 + if SignInt8(-5) != -1 { + t.Error("SignInt8(-5) should be -1") + } + if SignInt8(5) != 1 { + t.Error("SignInt8(5) should be 1") + } + if SignInt8(0) != 0 { + t.Error("SignInt8(0) should be 0") + } + +} + +func TestInt16Helpers(t *testing.T) { + // Test MaxInt16 + if MaxInt16(1, 2) != 2 { + t.Error("MaxInt16(1, 2) should be 2") + } + if MaxInt16(-1, -2) != -1 { + t.Error("MaxInt16(-1, -2) should be -1") + } + + // Test MinInt16 + if MinInt16(1, 2) != 1 { + t.Error("MinInt16(1, 2) should be 1") + } + if MinInt16(-1, -2) != -2 { + t.Error("MinInt16(-1, -2) should be -2") + } + + // Test ClampInt16 + if ClampInt16(5, 1, 3) != 3 { + t.Error("ClampInt16(5, 1, 3) should be 3") + } + if ClampInt16(0, 1, 3) != 1 { + t.Error("ClampInt16(0, 1, 3) should be 1") + } + if ClampInt16(2, 1, 3) != 2 { + t.Error("ClampInt16(2, 1, 3) should be 2") + } + + // Test AbsInt16 + if AbsInt16(-5) != 5 { + t.Error("AbsInt16(-5) should be 5") + } + if AbsInt16(5) != 5 { + t.Error("AbsInt16(5) should be 5") + } + + // Test SignInt16 + if SignInt16(-5) != -1 { + t.Error("SignInt16(-5) should be -1") + } + if SignInt16(5) != 1 { + t.Error("SignInt16(5) should be 1") + } + if SignInt16(0) != 0 { + t.Error("SignInt16(0) should be 0") + } + +} + +func TestInt32Helpers(t *testing.T) { + // Test MaxInt32 + if MaxInt32(1, 2) != 2 { + t.Error("MaxInt32(1, 2) should be 2") + } + if MaxInt32(-1, -2) != -1 { + t.Error("MaxInt32(-1, -2) should be -1") + } + + // Test MinInt32 + if MinInt32(1, 2) != 1 { + t.Error("MinInt32(1, 2) should be 1") + } + if MinInt32(-1, -2) != -2 { + t.Error("MinInt32(-1, -2) should be -2") + } + + // Test ClampInt32 + if ClampInt32(5, 1, 3) != 3 { + t.Error("ClampInt32(5, 1, 3) should be 3") + } + if ClampInt32(0, 1, 3) != 1 { + t.Error("ClampInt32(0, 1, 3) should be 1") + } + if ClampInt32(2, 1, 3) != 2 { + t.Error("ClampInt32(2, 1, 3) should be 2") + } + + // Test AbsInt32 + if AbsInt32(-5) != 5 { + t.Error("AbsInt32(-5) should be 5") + } + if AbsInt32(5) != 5 { + t.Error("AbsInt32(5) should be 5") + } + + // Test SignInt32 + if SignInt32(-5) != -1 { + t.Error("SignInt32(-5) should be -1") + } + if SignInt32(5) != 1 { + t.Error("SignInt32(5) should be 1") + } + if SignInt32(0) != 0 { + t.Error("SignInt32(0) should be 0") + } + +} + +func TestInt64Helpers(t *testing.T) { + // Test MaxInt64 + if MaxInt64(1, 2) != 2 { + t.Error("MaxInt64(1, 2) should be 2") + } + if MaxInt64(-1, -2) != -1 { + t.Error("MaxInt64(-1, -2) should be -1") + } + + // Test MinInt64 + if MinInt64(1, 2) != 1 { + t.Error("MinInt64(1, 2) should be 1") + } + if MinInt64(-1, -2) != -2 { + t.Error("MinInt64(-1, -2) should be -2") + } + + // Test ClampInt64 + if ClampInt64(5, 1, 3) != 3 { + t.Error("ClampInt64(5, 1, 3) should be 3") + } + if ClampInt64(0, 1, 3) != 1 { + t.Error("ClampInt64(0, 1, 3) should be 1") + } + if ClampInt64(2, 1, 3) != 2 { + t.Error("ClampInt64(2, 1, 3) should be 2") + } + + // Test AbsInt64 + if AbsInt64(-5) != 5 { + t.Error("AbsInt64(-5) should be 5") + } + if AbsInt64(5) != 5 { + t.Error("AbsInt64(5) should be 5") + } + + // Test SignInt64 + if SignInt64(-5) != -1 { + t.Error("SignInt64(-5) should be -1") + } + if SignInt64(5) != 1 { + t.Error("SignInt64(5) should be 1") + } + if SignInt64(0) != 0 { + t.Error("SignInt64(0) should be 0") + } + +} + +func TestIntHelpers(t *testing.T) { + // Test MaxInt + if MaxInt(1, 2) != 2 { + t.Error("MaxInt(1, 2) should be 2") + } + if MaxInt(-1, -2) != -1 { + t.Error("MaxInt(-1, -2) should be -1") + } + + // Test MinInt + if MinInt(1, 2) != 1 { + t.Error("MinInt(1, 2) should be 1") + } + if MinInt(-1, -2) != -2 { + t.Error("MinInt(-1, -2) should be -2") + } + + // Test ClampInt + if ClampInt(5, 1, 3) != 3 { + t.Error("ClampInt(5, 1, 3) should be 3") + } + if ClampInt(0, 1, 3) != 1 { + t.Error("ClampInt(0, 1, 3) should be 1") + } + if ClampInt(2, 1, 3) != 2 { + t.Error("ClampInt(2, 1, 3) should be 2") + } + + // Test AbsInt + if AbsInt(-5) != 5 { + t.Error("AbsInt(-5) should be 5") + } + if AbsInt(5) != 5 { + t.Error("AbsInt(5) should be 5") + } + + // Test SignInt + if SignInt(-5) != -1 { + t.Error("SignInt(-5) should be -1") + } + if SignInt(5) != 1 { + t.Error("SignInt(5) should be 1") + } + if SignInt(0) != 0 { + t.Error("SignInt(0) should be 0") + } + +} + +func TestUint8Helpers(t *testing.T) { + // Test MaxUint8 + if MaxUint8(1, 2) != 2 { + t.Error("MaxUint8(1, 2) should be 2") + } + + // Test MinUint8 + if MinUint8(1, 2) != 1 { + t.Error("MinUint8(1, 2) should be 1") + } + + // Test ClampUint8 + if ClampUint8(5, 1, 3) != 3 { + t.Error("ClampUint8(5, 1, 3) should be 3") + } + if ClampUint8(0, 1, 3) != 1 { + t.Error("ClampUint8(0, 1, 3) should be 1") + } + if ClampUint8(2, 1, 3) != 2 { + t.Error("ClampUint8(2, 1, 3) should be 2") + } + +} + +func TestUint16Helpers(t *testing.T) { + // Test MaxUint16 + if MaxUint16(1, 2) != 2 { + t.Error("MaxUint16(1, 2) should be 2") + } + + // Test MinUint16 + if MinUint16(1, 2) != 1 { + t.Error("MinUint16(1, 2) should be 1") + } + + // Test ClampUint16 + if ClampUint16(5, 1, 3) != 3 { + t.Error("ClampUint16(5, 1, 3) should be 3") + } + if ClampUint16(0, 1, 3) != 1 { + t.Error("ClampUint16(0, 1, 3) should be 1") + } + if ClampUint16(2, 1, 3) != 2 { + t.Error("ClampUint16(2, 1, 3) should be 2") + } + +} + +func TestUint32Helpers(t *testing.T) { + // Test MaxUint32 + if MaxUint32(1, 2) != 2 { + t.Error("MaxUint32(1, 2) should be 2") + } + + // Test MinUint32 + if MinUint32(1, 2) != 1 { + t.Error("MinUint32(1, 2) should be 1") + } + + // Test ClampUint32 + if ClampUint32(5, 1, 3) != 3 { + t.Error("ClampUint32(5, 1, 3) should be 3") + } + if ClampUint32(0, 1, 3) != 1 { + t.Error("ClampUint32(0, 1, 3) should be 1") + } + if ClampUint32(2, 1, 3) != 2 { + t.Error("ClampUint32(2, 1, 3) should be 2") + } + +} + +func TestUint64Helpers(t *testing.T) { + // Test MaxUint64 + if MaxUint64(1, 2) != 2 { + t.Error("MaxUint64(1, 2) should be 2") + } + + // Test MinUint64 + if MinUint64(1, 2) != 1 { + t.Error("MinUint64(1, 2) should be 1") + } + + // Test ClampUint64 + if ClampUint64(5, 1, 3) != 3 { + t.Error("ClampUint64(5, 1, 3) should be 3") + } + if ClampUint64(0, 1, 3) != 1 { + t.Error("ClampUint64(0, 1, 3) should be 1") + } + if ClampUint64(2, 1, 3) != 2 { + t.Error("ClampUint64(2, 1, 3) should be 2") + } + +} + +func TestUintHelpers(t *testing.T) { + // Test MaxUint + if MaxUint(1, 2) != 2 { + t.Error("MaxUint(1, 2) should be 2") + } + + // Test MinUint + if MinUint(1, 2) != 1 { + t.Error("MinUint(1, 2) should be 1") + } + + // Test ClampUint + if ClampUint(5, 1, 3) != 3 { + t.Error("ClampUint(5, 1, 3) should be 3") + } + if ClampUint(0, 1, 3) != 1 { + t.Error("ClampUint(0, 1, 3) should be 1") + } + if ClampUint(2, 1, 3) != 2 { + t.Error("ClampUint(2, 1, 3) should be 2") + } + +} + +func TestFloat32Helpers(t *testing.T) { + // Test MaxFloat32 + if MaxFloat32(1, 2) != 2 { + t.Error("MaxFloat32(1, 2) should be 2") + } + if MaxFloat32(-1, -2) != -1 { + t.Error("MaxFloat32(-1, -2) should be -1") + } + + // Test MinFloat32 + if MinFloat32(1, 2) != 1 { + t.Error("MinFloat32(1, 2) should be 1") + } + if MinFloat32(-1, -2) != -2 { + t.Error("MinFloat32(-1, -2) should be -2") + } + + // Test ClampFloat32 + if ClampFloat32(5, 1, 3) != 3 { + t.Error("ClampFloat32(5, 1, 3) should be 3") + } + if ClampFloat32(0, 1, 3) != 1 { + t.Error("ClampFloat32(0, 1, 3) should be 1") + } + if ClampFloat32(2, 1, 3) != 2 { + t.Error("ClampFloat32(2, 1, 3) should be 2") + } + + // Test AbsFloat32 + if AbsFloat32(-5) != 5 { + t.Error("AbsFloat32(-5) should be 5") + } + if AbsFloat32(5) != 5 { + t.Error("AbsFloat32(5) should be 5") + } + + // Test SignFloat32 + if SignFloat32(-5) != -1 { + t.Error("SignFloat32(-5) should be -1") + } + if SignFloat32(5) != 1 { + t.Error("SignFloat32(5) should be 1") + } + if SignFloat32(0.0) != 0 { + t.Error("SignFloat32(0.0) should be 0") + } + +} + +func TestFloat64Helpers(t *testing.T) { + // Test MaxFloat64 + if MaxFloat64(1, 2) != 2 { + t.Error("MaxFloat64(1, 2) should be 2") + } + if MaxFloat64(-1, -2) != -1 { + t.Error("MaxFloat64(-1, -2) should be -1") + } + + // Test MinFloat64 + if MinFloat64(1, 2) != 1 { + t.Error("MinFloat64(1, 2) should be 1") + } + if MinFloat64(-1, -2) != -2 { + t.Error("MinFloat64(-1, -2) should be -2") + } + + // Test ClampFloat64 + if ClampFloat64(5, 1, 3) != 3 { + t.Error("ClampFloat64(5, 1, 3) should be 3") + } + if ClampFloat64(0, 1, 3) != 1 { + t.Error("ClampFloat64(0, 1, 3) should be 1") + } + if ClampFloat64(2, 1, 3) != 2 { + t.Error("ClampFloat64(2, 1, 3) should be 2") + } + + // Test AbsFloat64 + if AbsFloat64(-5) != 5 { + t.Error("AbsFloat64(-5) should be 5") + } + if AbsFloat64(5) != 5 { + t.Error("AbsFloat64(5) should be 5") + } + + // Test SignFloat64 + if SignFloat64(-5) != -1 { + t.Error("SignFloat64(-5) should be -1") + } + if SignFloat64(5) != 1 { + t.Error("SignFloat64(5) should be 1") + } + if SignFloat64(0.0) != 0 { + t.Error("SignFloat64(0.0) should be 0") + } + +} diff --git a/examples/gno.land/r/stefann/home/home.gno b/examples/gno.land/r/stefann/home/home.gno index 9586f377311..f54721ce37c 100644 --- a/examples/gno.land/r/stefann/home/home.gno +++ b/examples/gno.land/r/stefann/home/home.gno @@ -8,6 +8,8 @@ import ( "gno.land/p/demo/avl" "gno.land/p/demo/ownable" "gno.land/p/demo/ufmt" + "gno.land/r/demo/users" + "gno.land/r/leon/hof" "gno.land/r/stefann/registry" ) @@ -23,7 +25,6 @@ type Sponsor struct { } type Profile struct { - pfp string aboutMe []string } @@ -49,15 +50,15 @@ var ( func init() { owner = ownable.NewWithAddress(registry.MainAddr()) + hof.Register() profile = Profile{ - pfp: "https://i.ibb.co/Bc5YNCx/DSC-0095a.jpg", aboutMe: []string{ - `### About Me`, - `Hey there! I’m Stefan, a student of Computer Science. I’m all about exploring and adventure — whether it’s diving into the latest tech or discovering a new city, I’m always up for the challenge!`, + `## About Me`, + `### Hey there! I’m Stefan, a student of Computer Science. I’m all about exploring and adventure — whether it’s diving into the latest tech or discovering a new city, I’m always up for the challenge!`, - `### Contributions`, - `I'm just getting started, but you can follow my journey through gno.land right [here](https://github.com/gnolang/hackerspace/issues/94) 🔗`, + `## Contributions`, + `### I'm just getting started, but you can follow my journey through gno.land right [here](https://github.com/gnolang/hackerspace/issues/94) 🔗`, }, } @@ -83,7 +84,7 @@ func init() { } sponsorship = Sponsorship{ - maxSponsors: 5, + maxSponsors: 3, sponsors: avl.NewTree(), DonationsCount: 0, sponsorsCount: 0, @@ -106,11 +107,6 @@ func UpdateJarLink(newLink string) { travel.jarLink = newLink } -func UpdatePFP(url string) { - owner.AssertCallerIsOwner() - profile.pfp = url -} - func UpdateAboutMe(aboutMeStr string) { owner.AssertCallerIsOwner() profile.aboutMe = strings.Split(aboutMeStr, "|") @@ -203,46 +199,27 @@ func Render(path string) string { } func renderAboutMe() string { - out := "
No sponsors yet. Be the first to tip the jar!
` + "\n" + return out + "No sponsors yet. Be the first to tip the jar!\n" } topSponsors := GetTopSponsors() @@ -266,38 +259,30 @@ func renderSponsors() string { numSponsors = sponsorship.maxSponsors } - out += `')
- }
- l := n.Lines().Len()
- for i := 0; i < l; i++ {
- line := n.Lines().At(i)
- r.Writer.RawWrite(w, line.Value(source))
- }
- if r.WrapperRenderer != nil {
- r.WrapperRenderer(w, c, false)
- } else {
- _, _ = w.WriteString("
\n")
- }
- return ast.WalkContinue, nil
-}
-
-type highlighting struct {
- options []Option
-}
-
-// Highlighting is a goldmark.Extender implementation.
-var Highlighting = &highlighting{
- options: []Option{},
-}
-
-// NewHighlighting returns a new extension with given options.
-func NewHighlighting(opts ...Option) goldmark.Extender {
- return &highlighting{
- options: opts,
- }
-}
-
-// Extend implements goldmark.Extender.
-func (e *highlighting) Extend(m goldmark.Markdown) {
- m.Renderer().AddOptions(renderer.WithNodeRenderers(
- util.Prioritized(NewHTMLRenderer(e.options...), 200),
- ))
-}
diff --git a/gno.land/pkg/gnoweb/markdown/highlighting_test.go b/gno.land/pkg/gnoweb/markdown/highlighting_test.go
deleted file mode 100644
index 25bc4fedd61..00000000000
--- a/gno.land/pkg/gnoweb/markdown/highlighting_test.go
+++ /dev/null
@@ -1,568 +0,0 @@
-// This file was copied from https://github.com/yuin/goldmark-highlighting
-
-package markdown
-
-import (
- "bytes"
- "fmt"
- "strings"
- "testing"
-
- "github.com/alecthomas/chroma/v2"
- chromahtml "github.com/alecthomas/chroma/v2/formatters/html"
- "github.com/yuin/goldmark"
- "github.com/yuin/goldmark/testutil"
- "github.com/yuin/goldmark/util"
-)
-
-func TestHighlighting(t *testing.T) {
- var css bytes.Buffer
- markdown := goldmark.New(
- goldmark.WithExtensions(
- NewHighlighting(
- WithStyle("monokai"),
- WithCSSWriter(&css),
- WithFormatOptions(
- chromahtml.WithClasses(true),
- chromahtml.WithLineNumbers(false),
- ),
- WithWrapperRenderer(func(w util.BufWriter, c CodeBlockContext, entering bool) {
- _, ok := c.Language()
- if entering {
- if !ok {
- w.WriteString("")
- return
- }
- w.WriteString(``)
- } else {
- if !ok {
- w.WriteString("")
- return
- }
- w.WriteString(``)
- }
- }),
- WithCodeBlockOptions(func(c CodeBlockContext) []chromahtml.Option {
- if language, ok := c.Language(); ok {
- // Turn on line numbers for Go only.
- if string(language) == "go" {
- return []chromahtml.Option{
- chromahtml.WithLineNumbers(true),
- }
- }
- }
- return nil
- }),
- ),
- ),
- )
- var buffer bytes.Buffer
- if err := markdown.Convert([]byte(`
-Title
-=======
-`+"``` go\n"+`func main() {
- fmt.Println("ok")
-}
-`+"```"+`
-`), &buffer); err != nil {
- t.Fatal(err)
- }
-
- if strings.TrimSpace(buffer.String()) != strings.TrimSpace(`
-Title
-1func main() {
-2 fmt.Println("ok")
-3}
-
-`) {
- t.Errorf("failed to render HTML\n%s", buffer.String())
- }
-
- expected := strings.TrimSpace(`/* Background */ .bg { color: #f8f8f2; background-color: #272822; }
-/* PreWrapper */ .chroma { color: #f8f8f2; background-color: #272822; }
-/* LineNumbers targeted by URL anchor */ .chroma .ln:target { color: #f8f8f2; background-color: #3c3d38 }
-/* LineNumbersTable targeted by URL anchor */ .chroma .lnt:target { color: #f8f8f2; background-color: #3c3d38 }
-/* Error */ .chroma .err { color: #960050; background-color: #1e0010 }
-/* LineLink */ .chroma .lnlinks { outline: none; text-decoration: none; color: inherit }
-/* LineTableTD */ .chroma .lntd { vertical-align: top; padding: 0; margin: 0; border: 0; }
-/* LineTable */ .chroma .lntable { border-spacing: 0; padding: 0; margin: 0; border: 0; }
-/* LineHighlight */ .chroma .hl { background-color: #3c3d38 }
-/* LineNumbersTable */ .chroma .lnt { white-space: pre; -webkit-user-select: none; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #7f7f7f }
-/* LineNumbers */ .chroma .ln { white-space: pre; -webkit-user-select: none; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #7f7f7f }
-/* Line */ .chroma .line { display: flex; }
-/* Keyword */ .chroma .k { color: #66d9ef }
-/* KeywordConstant */ .chroma .kc { color: #66d9ef }
-/* KeywordDeclaration */ .chroma .kd { color: #66d9ef }
-/* KeywordNamespace */ .chroma .kn { color: #f92672 }
-/* KeywordPseudo */ .chroma .kp { color: #66d9ef }
-/* KeywordReserved */ .chroma .kr { color: #66d9ef }
-/* KeywordType */ .chroma .kt { color: #66d9ef }
-/* NameAttribute */ .chroma .na { color: #a6e22e }
-/* NameClass */ .chroma .nc { color: #a6e22e }
-/* NameConstant */ .chroma .no { color: #66d9ef }
-/* NameDecorator */ .chroma .nd { color: #a6e22e }
-/* NameException */ .chroma .ne { color: #a6e22e }
-/* NameFunction */ .chroma .nf { color: #a6e22e }
-/* NameOther */ .chroma .nx { color: #a6e22e }
-/* NameTag */ .chroma .nt { color: #f92672 }
-/* Literal */ .chroma .l { color: #ae81ff }
-/* LiteralDate */ .chroma .ld { color: #e6db74 }
-/* LiteralString */ .chroma .s { color: #e6db74 }
-/* LiteralStringAffix */ .chroma .sa { color: #e6db74 }
-/* LiteralStringBacktick */ .chroma .sb { color: #e6db74 }
-/* LiteralStringChar */ .chroma .sc { color: #e6db74 }
-/* LiteralStringDelimiter */ .chroma .dl { color: #e6db74 }
-/* LiteralStringDoc */ .chroma .sd { color: #e6db74 }
-/* LiteralStringDouble */ .chroma .s2 { color: #e6db74 }
-/* LiteralStringEscape */ .chroma .se { color: #ae81ff }
-/* LiteralStringHeredoc */ .chroma .sh { color: #e6db74 }
-/* LiteralStringInterpol */ .chroma .si { color: #e6db74 }
-/* LiteralStringOther */ .chroma .sx { color: #e6db74 }
-/* LiteralStringRegex */ .chroma .sr { color: #e6db74 }
-/* LiteralStringSingle */ .chroma .s1 { color: #e6db74 }
-/* LiteralStringSymbol */ .chroma .ss { color: #e6db74 }
-/* LiteralNumber */ .chroma .m { color: #ae81ff }
-/* LiteralNumberBin */ .chroma .mb { color: #ae81ff }
-/* LiteralNumberFloat */ .chroma .mf { color: #ae81ff }
-/* LiteralNumberHex */ .chroma .mh { color: #ae81ff }
-/* LiteralNumberInteger */ .chroma .mi { color: #ae81ff }
-/* LiteralNumberIntegerLong */ .chroma .il { color: #ae81ff }
-/* LiteralNumberOct */ .chroma .mo { color: #ae81ff }
-/* Operator */ .chroma .o { color: #f92672 }
-/* OperatorWord */ .chroma .ow { color: #f92672 }
-/* Comment */ .chroma .c { color: #75715e }
-/* CommentHashbang */ .chroma .ch { color: #75715e }
-/* CommentMultiline */ .chroma .cm { color: #75715e }
-/* CommentSingle */ .chroma .c1 { color: #75715e }
-/* CommentSpecial */ .chroma .cs { color: #75715e }
-/* CommentPreproc */ .chroma .cp { color: #75715e }
-/* CommentPreprocFile */ .chroma .cpf { color: #75715e }
-/* GenericDeleted */ .chroma .gd { color: #f92672 }
-/* GenericEmph */ .chroma .ge { font-style: italic }
-/* GenericInserted */ .chroma .gi { color: #a6e22e }
-/* GenericStrong */ .chroma .gs { font-weight: bold }
-/* GenericSubheading */ .chroma .gu { color: #75715e }`)
-
- gotten := strings.TrimSpace(css.String())
-
- if expected != gotten {
- diff := testutil.DiffPretty([]byte(expected), []byte(gotten))
- t.Errorf("incorrect CSS.\n%s", string(diff))
- }
-}
-
-func TestHighlighting2(t *testing.T) {
- markdown := goldmark.New(
- goldmark.WithExtensions(
- Highlighting,
- ),
- )
- var buffer bytes.Buffer
- if err := markdown.Convert([]byte(`
-Title
-=======
-`+"```"+`
-func main() {
- fmt.Println("ok")
-}
-`+"```"+`
-`), &buffer); err != nil {
- t.Fatal(err)
- }
-
- if strings.TrimSpace(buffer.String()) != strings.TrimSpace(`
-Title
-func main() {
- fmt.Println("ok")
-}
-
-`) {
- t.Error("failed to render HTML")
- }
-}
-
-func TestHighlighting3(t *testing.T) {
- markdown := goldmark.New(
- goldmark.WithExtensions(
- Highlighting,
- ),
- )
- var buffer bytes.Buffer
- if err := markdown.Convert([]byte(`
-Title
-=======
-
-`+"```"+`cpp {hl_lines=[1,2]}
-#include
-int main() {
- std::cout<< "hello" << std::endl;
-}
-`+"```"+`
-`), &buffer); err != nil {
- t.Fatal(err)
- }
- if strings.TrimSpace(buffer.String()) != strings.TrimSpace(`
-Title
-#include <iostream>
-int main() {
- std::cout<< "hello" << std::endl;
-}
-
-`) {
- t.Errorf("failed to render HTML:\n%s", buffer.String())
- }
-}
-
-func TestHighlightingCustom(t *testing.T) {
- custom := chroma.MustNewStyle("custom", chroma.StyleEntries{
- chroma.Background: "#cccccc bg:#1d1d1d",
- chroma.Comment: "#999999",
- chroma.CommentSpecial: "#cd0000",
- chroma.Keyword: "#cc99cd",
- chroma.KeywordDeclaration: "#cc99cd",
- chroma.KeywordNamespace: "#cc99cd",
- chroma.KeywordType: "#cc99cd",
- chroma.Operator: "#67cdcc",
- chroma.OperatorWord: "#cdcd00",
- chroma.NameClass: "#f08d49",
- chroma.NameBuiltin: "#f08d49",
- chroma.NameFunction: "#f08d49",
- chroma.NameException: "bold #666699",
- chroma.NameVariable: "#00cdcd",
- chroma.LiteralString: "#7ec699",
- chroma.LiteralNumber: "#f08d49",
- chroma.LiteralStringBoolean: "#f08d49",
- chroma.GenericHeading: "bold #000080",
- chroma.GenericSubheading: "bold #800080",
- chroma.GenericDeleted: "#e2777a",
- chroma.GenericInserted: "#cc99cd",
- chroma.GenericError: "#e2777a",
- chroma.GenericEmph: "italic",
- chroma.GenericStrong: "bold",
- chroma.GenericPrompt: "bold #000080",
- chroma.GenericOutput: "#888",
- chroma.GenericTraceback: "#04D",
- chroma.GenericUnderline: "underline",
- chroma.Error: "border:#e2777a",
- })
-
- var css bytes.Buffer
- markdown := goldmark.New(
- goldmark.WithExtensions(
- NewHighlighting(
- WithStyle("monokai"), // to make sure it is overrided even if present
- WithCustomStyle(custom),
- WithCSSWriter(&css),
- WithFormatOptions(
- chromahtml.WithClasses(true),
- chromahtml.WithLineNumbers(false),
- ),
- WithWrapperRenderer(func(w util.BufWriter, c CodeBlockContext, entering bool) {
- _, ok := c.Language()
- if entering {
- if !ok {
- w.WriteString("")
- return
- }
- w.WriteString(``)
- } else {
- if !ok {
- w.WriteString("")
- return
- }
- w.WriteString(``)
- }
- }),
- WithCodeBlockOptions(func(c CodeBlockContext) []chromahtml.Option {
- if language, ok := c.Language(); ok {
- // Turn on line numbers for Go only.
- if string(language) == "go" {
- return []chromahtml.Option{
- chromahtml.WithLineNumbers(true),
- }
- }
- }
- return nil
- }),
- ),
- ),
- )
- var buffer bytes.Buffer
- if err := markdown.Convert([]byte(`
-Title
-=======
-`+"``` go\n"+`func main() {
- fmt.Println("ok")
-}
-`+"```"+`
-`), &buffer); err != nil {
- t.Fatal(err)
- }
-
- if strings.TrimSpace(buffer.String()) != strings.TrimSpace(`
-Title
-1func main() {
-2 fmt.Println("ok")
-3}
-
-`) {
- t.Error("failed to render HTML", buffer.String())
- }
-
- expected := strings.TrimSpace(`/* Background */ .bg { color: #cccccc; background-color: #1d1d1d; }
-/* PreWrapper */ .chroma { color: #cccccc; background-color: #1d1d1d; }
-/* LineNumbers targeted by URL anchor */ .chroma .ln:target { color: #cccccc; background-color: #333333 }
-/* LineNumbersTable targeted by URL anchor */ .chroma .lnt:target { color: #cccccc; background-color: #333333 }
-/* Error */ .chroma .err { }
-/* LineLink */ .chroma .lnlinks { outline: none; text-decoration: none; color: inherit }
-/* LineTableTD */ .chroma .lntd { vertical-align: top; padding: 0; margin: 0; border: 0; }
-/* LineTable */ .chroma .lntable { border-spacing: 0; padding: 0; margin: 0; border: 0; }
-/* LineHighlight */ .chroma .hl { background-color: #333333 }
-/* LineNumbersTable */ .chroma .lnt { white-space: pre; -webkit-user-select: none; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #666666 }
-/* LineNumbers */ .chroma .ln { white-space: pre; -webkit-user-select: none; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #666666 }
-/* Line */ .chroma .line { display: flex; }
-/* Keyword */ .chroma .k { color: #cc99cd }
-/* KeywordConstant */ .chroma .kc { color: #cc99cd }
-/* KeywordDeclaration */ .chroma .kd { color: #cc99cd }
-/* KeywordNamespace */ .chroma .kn { color: #cc99cd }
-/* KeywordPseudo */ .chroma .kp { color: #cc99cd }
-/* KeywordReserved */ .chroma .kr { color: #cc99cd }
-/* KeywordType */ .chroma .kt { color: #cc99cd }
-/* NameBuiltin */ .chroma .nb { color: #f08d49 }
-/* NameClass */ .chroma .nc { color: #f08d49 }
-/* NameException */ .chroma .ne { color: #666699; font-weight: bold }
-/* NameFunction */ .chroma .nf { color: #f08d49 }
-/* NameVariable */ .chroma .nv { color: #00cdcd }
-/* LiteralString */ .chroma .s { color: #7ec699 }
-/* LiteralStringAffix */ .chroma .sa { color: #7ec699 }
-/* LiteralStringBacktick */ .chroma .sb { color: #7ec699 }
-/* LiteralStringChar */ .chroma .sc { color: #7ec699 }
-/* LiteralStringDelimiter */ .chroma .dl { color: #7ec699 }
-/* LiteralStringDoc */ .chroma .sd { color: #7ec699 }
-/* LiteralStringDouble */ .chroma .s2 { color: #7ec699 }
-/* LiteralStringEscape */ .chroma .se { color: #7ec699 }
-/* LiteralStringHeredoc */ .chroma .sh { color: #7ec699 }
-/* LiteralStringInterpol */ .chroma .si { color: #7ec699 }
-/* LiteralStringOther */ .chroma .sx { color: #7ec699 }
-/* LiteralStringRegex */ .chroma .sr { color: #7ec699 }
-/* LiteralStringSingle */ .chroma .s1 { color: #7ec699 }
-/* LiteralStringSymbol */ .chroma .ss { color: #7ec699 }
-/* LiteralNumber */ .chroma .m { color: #f08d49 }
-/* LiteralNumberBin */ .chroma .mb { color: #f08d49 }
-/* LiteralNumberFloat */ .chroma .mf { color: #f08d49 }
-/* LiteralNumberHex */ .chroma .mh { color: #f08d49 }
-/* LiteralNumberInteger */ .chroma .mi { color: #f08d49 }
-/* LiteralNumberIntegerLong */ .chroma .il { color: #f08d49 }
-/* LiteralNumberOct */ .chroma .mo { color: #f08d49 }
-/* Operator */ .chroma .o { color: #67cdcc }
-/* OperatorWord */ .chroma .ow { color: #cdcd00 }
-/* Comment */ .chroma .c { color: #999999 }
-/* CommentHashbang */ .chroma .ch { color: #999999 }
-/* CommentMultiline */ .chroma .cm { color: #999999 }
-/* CommentSingle */ .chroma .c1 { color: #999999 }
-/* CommentSpecial */ .chroma .cs { color: #cd0000 }
-/* CommentPreproc */ .chroma .cp { color: #999999 }
-/* CommentPreprocFile */ .chroma .cpf { color: #999999 }
-/* GenericDeleted */ .chroma .gd { color: #e2777a }
-/* GenericEmph */ .chroma .ge { font-style: italic }
-/* GenericError */ .chroma .gr { color: #e2777a }
-/* GenericHeading */ .chroma .gh { color: #000080; font-weight: bold }
-/* GenericInserted */ .chroma .gi { color: #cc99cd }
-/* GenericOutput */ .chroma .go { color: #888888 }
-/* GenericPrompt */ .chroma .gp { color: #000080; font-weight: bold }
-/* GenericStrong */ .chroma .gs { font-weight: bold }
-/* GenericSubheading */ .chroma .gu { color: #800080; font-weight: bold }
-/* GenericTraceback */ .chroma .gt { color: #0044dd }
-/* GenericUnderline */ .chroma .gl { text-decoration: underline }`)
-
- gotten := strings.TrimSpace(css.String())
-
- if expected != gotten {
- diff := testutil.DiffPretty([]byte(expected), []byte(gotten))
- t.Errorf("incorrect CSS.\n%s", string(diff))
- }
-}
-
-func TestHighlightingHlLines(t *testing.T) {
- markdown := goldmark.New(
- goldmark.WithExtensions(
- NewHighlighting(
- WithFormatOptions(
- chromahtml.WithClasses(true),
- ),
- ),
- ),
- )
-
- for i, test := range []struct {
- attributes string
- expect []int
- }{
- {`hl_lines=["2"]`, []int{2}},
- {`hl_lines=["2-3",5],linenostart=5`, []int{2, 3, 5}},
- {`hl_lines=["2-3"]`, []int{2, 3}},
- {`hl_lines=["2-3",5],linenostart="5"`, []int{2, 3}}, // linenostart must be a number. string values are ignored
- } {
- t.Run(fmt.Sprint(i), func(t *testing.T) {
- var buffer bytes.Buffer
- codeBlock := fmt.Sprintf(`bash {%s}
-LINE1
-LINE2
-LINE3
-LINE4
-LINE5
-LINE6
-LINE7
-LINE8
-`, test.attributes)
-
- if err := markdown.Convert([]byte(`
-`+"```"+codeBlock+"```"+`
-`), &buffer); err != nil {
- t.Fatal(err)
- }
-
- for _, line := range test.expect {
- expectStr := fmt.Sprintf("LINE%d\n", line)
- if !strings.Contains(buffer.String(), expectStr) {
- t.Fatal("got\n", buffer.String(), "\nexpected\n", expectStr)
- }
- }
- })
- }
-}
-
-type nopPreWrapper struct{}
-
-// Start is called to write a start element.
-func (nopPreWrapper) Start(code bool, styleAttr string) string { return "" }
-
-// End is called to write the end
element.
-func (nopPreWrapper) End(code bool) string { return "" }
-
-func TestHighlightingLinenos(t *testing.T) {
- outputLineNumbersInTable := `
-
-1
-
-
-LINE1
-
-`
-
- for i, test := range []struct {
- attributes string
- lineNumbers bool
- lineNumbersInTable bool
- expect string
- }{
- {`linenos=true`, false, false, `1LINE1
-`},
- {`linenos=false`, false, false, `LINE1
-`},
- {``, true, false, `1LINE1
-`},
- {``, true, true, outputLineNumbersInTable},
- {`linenos=inline`, true, true, `1LINE1
-`},
- {`linenos=foo`, false, false, `1LINE1
-`},
- {`linenos=table`, false, false, outputLineNumbersInTable},
- } {
- t.Run(fmt.Sprint(i), func(t *testing.T) {
- markdown := goldmark.New(
- goldmark.WithExtensions(
- NewHighlighting(
- WithFormatOptions(
- chromahtml.WithLineNumbers(test.lineNumbers),
- chromahtml.LineNumbersInTable(test.lineNumbersInTable),
- chromahtml.WithPreWrapper(nopPreWrapper{}),
- chromahtml.WithClasses(true),
- ),
- ),
- ),
- )
-
- var buffer bytes.Buffer
- codeBlock := fmt.Sprintf(`bash {%s}
-LINE1
-`, test.attributes)
-
- content := "```" + codeBlock + "```"
-
- if err := markdown.Convert([]byte(content), &buffer); err != nil {
- t.Fatal(err)
- }
-
- s := strings.TrimSpace(buffer.String())
-
- if s != test.expect {
- t.Fatal("got\n", s, "\nexpected\n", test.expect)
- }
- })
- }
-}
-
-func TestHighlightingGuessLanguage(t *testing.T) {
- markdown := goldmark.New(
- goldmark.WithExtensions(
- NewHighlighting(
- WithGuessLanguage(true),
- WithFormatOptions(
- chromahtml.WithClasses(true),
- chromahtml.WithLineNumbers(true),
- ),
- ),
- ),
- )
- var buffer bytes.Buffer
- if err := markdown.Convert([]byte("```"+`
-LINE
-`+"```"), &buffer); err != nil {
- t.Fatal(err)
- }
- if strings.TrimSpace(buffer.String()) != strings.TrimSpace(`
-1LINE
-
-`) {
- t.Errorf("render mismatch, got\n%s", buffer.String())
- }
-}
-
-func TestCoalesceNeeded(t *testing.T) {
- markdown := goldmark.New(
- goldmark.WithExtensions(
- NewHighlighting(
- // WithGuessLanguage(true),
- WithFormatOptions(
- chromahtml.WithClasses(true),
- chromahtml.WithLineNumbers(true),
- ),
- ),
- ),
- )
- var buffer bytes.Buffer
- if err := markdown.Convert([]byte("```http"+`
-GET /foo HTTP/1.1
-Content-Type: application/json
-User-Agent: foo
-
-{
- "hello": "world"
-}
-`+"```"), &buffer); err != nil {
- t.Fatal(err)
- }
- if strings.TrimSpace(buffer.String()) != strings.TrimSpace(`
-1GET /foo HTTP/1.1
-2Content-Type: application/json
-3User-Agent: foo
-4
-5{
-6 "hello": "world"
-7}
-
-`) {
- t.Errorf("render mismatch, got\n%s", buffer.String())
- }
-}
diff --git a/gno.land/pkg/gnoweb/public/styles.css b/gno.land/pkg/gnoweb/public/styles.css
index a1d7860c63e..ce6c8bae639 100644
--- a/gno.land/pkg/gnoweb/public/styles.css
+++ b/gno.land/pkg/gnoweb/public/styles.css
@@ -1,3 +1,3 @@
@font-face{font-family:Roboto;font-style:normal;font-weight:900;font-display:swap;src:url(fonts/roboto/roboto-mono-normal.woff2) format("woff2"),url(fonts/roboto/roboto-mono-normal.woff) format("woff")}@font-face{font-family:Inter var;font-weight:100 900;font-display:block;font-style:oblique 0deg 10deg;src:url(fonts/intervar/Intervar.woff2) format("woff2")}*,:after,:before{--tw-border-spacing-x:0;--tw-border-spacing-y:0;--tw-translate-x:0;--tw-translate-y:0;--tw-rotate:0;--tw-skew-x:0;--tw-skew-y:0;--tw-scale-x:1;--tw-scale-y:1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness:proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:rgba(59,130,246,.5);--tw-ring-offset-shadow:0 0 #0000;--tw-ring-shadow:0 0 #0000;--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: ;--tw-contain-size: ;--tw-contain-layout: ;--tw-contain-paint: ;--tw-contain-style: }::backdrop{--tw-border-spacing-x:0;--tw-border-spacing-y:0;--tw-translate-x:0;--tw-translate-y:0;--tw-rotate:0;--tw-skew-x:0;--tw-skew-y:0;--tw-scale-x:1;--tw-scale-y:1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness:proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:rgba(59,130,246,.5);--tw-ring-offset-shadow:0 0 #0000;--tw-ring-shadow:0 0 #0000;--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: ;--tw-contain-size: ;--tw-contain-layout: ;--tw-contain-paint: ;--tw-contain-style: }
-/*! tailwindcss v3.4.14 | MIT License | https://tailwindcss.com*/*,:after,:before{box-sizing:border-box;border:0 solid #bdbdbd}:after,:before{--tw-content:""}:host,html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;font-feature-settings:normal;font-variation-settings:normal;-webkit-tap-highlight-color:transparent}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,pre,samp{font-family:Roboto,Menlo,Consolas,Ubuntu Mono,Roboto Mono,DejaVu Sans Mono,monospace;;font-feature-settings:normal;font-variation-settings:normal;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-feature-settings:inherit;font-variation-settings:inherit;font-size:100%;font-weight:inherit;line-height:inherit;letter-spacing:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,input:where([type=button]),input:where([type=reset]),input:where([type=submit]){-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dd,dl,figure,h1,h2,h3,h4,h5,h6,hr,p,pre{margin:0}fieldset{margin:0}fieldset,legend{padding:0}menu,ol,ul{list-style:none;margin:0;padding:0}dialog{padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#7c7c7c}input::placeholder,textarea::placeholder{opacity:1;color:#7c7c7c}[role=button],button{cursor:pointer}:disabled{cursor:default}audio,canvas,embed,iframe,img,object,svg,video{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]:where(:not([hidden=until-found])){display:none}html{--tw-bg-opacity:1;background-color:rgb(255 255 255/var(--tw-bg-opacity));font-family:Inter var,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji,sans-serif;font-size:1rem;--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity));font-feature-settings:"kern" on,"liga" on,"calt" on,"zero" on;-webkit-font-feature-settings:"kern" on,"liga" on,"calt" on,"zero" on;-webkit-text-size-adjust:100%;-moz-text-size-adjust:100%;text-size-adjust:100%;-moz-osx-font-smoothing:grayscale;font-smoothing:antialiased;font-variant-ligatures:contextual common-ligatures;font-kerning:normal;text-rendering:optimizeLegibility}svg{max-height:100%;max-width:100%}form{margin-top:0;margin-bottom:0}.realm-content{overflow-wrap:break-word;padding-top:2.5rem;font-size:1rem}.realm-content>:first-child{margin-top:0!important}.realm-content a{font-weight:500;--tw-text-opacity:1;color:rgb(34 108 87/var(--tw-text-opacity))}.realm-content a:hover{text-decoration-line:underline}.realm-content h1,.realm-content h2,.realm-content h3,.realm-content h4{margin-top:3rem;line-height:1.25;--tw-text-opacity:1;color:rgb(8 8 9/var(--tw-text-opacity))}.realm-content h2,.realm-content h2 *{font-weight:700}.realm-content h3,.realm-content h3 *,.realm-content h4,.realm-content h4 *{font-weight:600}.realm-content h1+h2,.realm-content h2+h3,.realm-content h3+h4{margin-top:1rem}.realm-content h1{font-size:2.375rem;font-weight:700}.realm-content h2{font-size:1.5rem}.realm-content h3{margin-top:2.5rem;font-size:1.25rem}.realm-content h3,.realm-content h4{--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity))}.realm-content h4{margin-top:1.5rem;margin-bottom:1.5rem;font-size:1.125rem;font-weight:500}.realm-content p{margin-top:1.25rem;margin-bottom:1.25rem}.realm-content strong{font-weight:700;--tw-text-opacity:1;color:rgb(8 8 9/var(--tw-text-opacity))}.realm-content strong *{font-weight:700}.realm-content em{font-style:oblique 10deg}.realm-content blockquote{margin-top:1rem;margin-bottom:1rem;border-left-width:4px;--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity));padding-left:1rem;--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity));font-style:oblique 10deg}.realm-content ol,.realm-content ul{margin-top:1.5rem;margin-bottom:1.5rem;padding-left:1rem}.realm-content ol li,.realm-content ul li{margin-bottom:.5rem}.realm-content img{margin-top:2rem;margin-bottom:2rem;max-width:100%}.realm-content figure{margin-top:1.5rem;margin-bottom:1.5rem;text-align:center}.realm-content figcaption{font-size:.875rem;--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity))}.realm-content :not(pre)>code{border-radius:.25rem;background-color:rgb(226 226 226/var(--tw-bg-opacity));padding:.125rem .25rem;font-size:.96em}.realm-content :not(pre)>code,.realm-content pre{--tw-bg-opacity:1;font-family:Roboto,Menlo,Consolas,Ubuntu Mono,Roboto Mono,DejaVu Sans Mono,monospace;}.realm-content pre{overflow-x:auto;border-radius:.375rem;background-color:rgb(240 240 240/var(--tw-bg-opacity));padding:1rem}.realm-content hr{margin-top:2.5rem;margin-bottom:2.5rem;border-top-width:1px;--tw-border-opacity:1;border-color:rgb(226 226 226/var(--tw-border-opacity))}.realm-content table{margin-top:2rem;margin-bottom:2rem;width:100%;border-collapse:collapse}.realm-content td,.realm-content th{border-width:1px;--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity));padding:.5rem 1rem}.realm-content th{--tw-bg-opacity:1;background-color:rgb(226 226 226/var(--tw-bg-opacity));font-weight:700}.realm-content caption{margin-top:.5rem;text-align:left;font-size:.875rem;--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity))}.realm-content q{margin-top:1.5rem;margin-bottom:1.5rem;border-left-width:4px;--tw-border-opacity:1;border-left-color:rgb(204 204 204/var(--tw-border-opacity));padding-left:1rem;--tw-text-opacity:1;color:rgb(85 85 85/var(--tw-text-opacity));font-style:oblique 10deg;quotes:"“" "”" "‘" "’"}.realm-content q:after,.realm-content q:before{margin-right:.25rem;font-size:1.5rem;--tw-text-opacity:1;color:rgb(153 153 153/var(--tw-text-opacity));content:open-quote;vertical-align:-.4rem}.realm-content q:after{content:close-quote}.realm-content q:before{content:open-quote}.realm-content q:after{content:close-quote}.realm-content ol ol,.realm-content ol ul,.realm-content ul ol,.realm-content ul ul{margin-top:.75rem;margin-bottom:.5rem;padding-left:1rem}.realm-content ul{list-style-type:disc}.realm-content ol{list-style-type:decimal}.realm-content table th:first-child,.realm-content td:first-child{padding-left:0}.realm-content table th:last-child,.realm-content td:last-child{padding-right:0}.realm-content abbr[title]{cursor:help;border-bottom-width:1px;border-style:dotted}.realm-content details{margin-top:1.25rem;margin-bottom:1.25rem}.realm-content summary{cursor:pointer;font-weight:700}.realm-content a code{color:inherit}.realm-content video{margin-top:2rem;margin-bottom:2rem;max-width:100%}.realm-content math{font-family:Roboto,Menlo,Consolas,Ubuntu Mono,Roboto Mono,DejaVu Sans Mono,monospace;}.realm-content small{font-size:.875rem}.realm-content del{text-decoration-line:line-through}.realm-content sub{vertical-align:sub;font-size:.75rem}.realm-content sup{vertical-align:super;font-size:.75rem}.realm-content button,.realm-content input{border-width:1px;--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity));padding:.5rem 1rem}main :is(h1,h2,h3,h4){scroll-margin-top:6rem}::-moz-selection{--tw-bg-opacity:1;background-color:rgb(34 108 87/var(--tw-bg-opacity));--tw-text-opacity:1;color:rgb(255 255 255/var(--tw-text-opacity))}::selection{--tw-bg-opacity:1;background-color:rgb(34 108 87/var(--tw-bg-opacity));--tw-text-opacity:1;color:rgb(255 255 255/var(--tw-text-opacity))}.sidemenu .peer:checked+label>svg{--tw-text-opacity:1;color:rgb(34 108 87/var(--tw-text-opacity))}.toc-expend-btn:has(#toc-expend:checked)+nav{display:block}.toc-expend-btn:has(#toc-expend:checked) .toc-expend-btn_ico{--tw-rotate:180deg;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.main-header:has(#sidemenu-docs:checked)+main #sidebar #sidebar-docs,.main-header:has(#sidemenu-meta:checked)+main #sidebar #sidebar-meta,.main-header:has(#sidemenu-source:checked)+main #sidebar #sidebar-source,.main-header:has(#sidemenu-summary:checked)+main #sidebar #sidebar-summary{display:block}@media (min-width:40rem){:is(.main-header:has(#sidemenu-source:checked),.main-header:has(#sidemenu-docs:checked),.main-header:has(#sidemenu-meta:checked)) .main-navigation,:is(.main-header:has(#sidemenu-source:checked),.main-header:has(#sidemenu-docs:checked),.main-header:has(#sidemenu-meta:checked))+main .realm-content{grid-column:span 6/span 6}:is(.main-header:has(#sidemenu-source:checked),.main-header:has(#sidemenu-docs:checked),.main-header:has(#sidemenu-meta:checked)) .sidemenu,:is(.main-header:has(#sidemenu-source:checked),.main-header:has(#sidemenu-docs:checked),.main-header:has(#sidemenu-meta:checked))+main #sidebar{grid-column:span 4/span 4}}:is(.main-header:has(#sidemenu-source:checked),.main-header:has(#sidemenu-docs:checked),.main-header:has(#sidemenu-meta:checked))+main #sidebar:before{position:absolute;top:0;left:-1.75rem;z-index:-1;display:block;height:100%;width:50vw;--tw-bg-opacity:1;background-color:rgb(226 226 226/var(--tw-bg-opacity));--tw-content:"";content:var(--tw-content)}main :is(.source-code)>pre{overflow:scroll;border-radius:.375rem;--tw-bg-opacity:1!important;background-color:rgb(255 255 255/var(--tw-bg-opacity))!important;padding:1rem .25rem;font-family:Roboto,Menlo,Consolas,Ubuntu Mono,Roboto Mono,DejaVu Sans Mono,monospace;;font-size:.875rem}@media (min-width:40rem){main :is(.source-code)>pre{padding:2rem .75rem;font-size:1rem}}main .realm-content>pre a:hover{text-decoration-line:none}main :is(.realm-content,.source-code)>pre .chroma-ln:target{background-color:transparent!important}main :is(.realm-content,.source-code)>pre .chroma-line:has(.chroma-ln:target),main :is(.realm-content,.source-code)>pre .chroma-line:has(.chroma-ln:target) .chroma-cl,main :is(.realm-content,.source-code)>pre .chroma-line:has(.chroma-lnlinks:hover),main :is(.realm-content,.source-code)>pre .chroma-line:has(.chroma-lnlinks:hover) .chroma-cl{border-radius:.375rem;--tw-bg-opacity:1!important;background-color:rgb(226 226 226/var(--tw-bg-opacity))!important}main :is(.realm-content,.source-code)>pre .chroma-ln{scroll-margin-top:6rem}.absolute{position:absolute}.relative{position:relative}.sticky{position:sticky}.bottom-1{bottom:.25rem}.left-0{left:0}.right-2{right:.5rem}.right-3{right:.75rem}.top-0{top:0}.top-1\/2{top:50%}.top-14{top:3.5rem}.top-2{top:.5rem}.z-max{z-index:9999}.col-span-1{grid-column:span 1/span 1}.col-span-10{grid-column:span 10/span 10}.col-span-3{grid-column:span 3/span 3}.col-span-7{grid-column:span 7/span 7}.row-span-1{grid-row:span 1/span 1}.row-start-1{grid-row-start:1}.mx-auto{margin-left:auto;margin-right:auto}.mb-1{margin-bottom:.25rem}.mb-2{margin-bottom:.5rem}.mb-3{margin-bottom:.75rem}.mb-4{margin-bottom:1rem}.mb-8{margin-bottom:2rem}.mr-10{margin-right:2.5rem}.mt-1{margin-top:.25rem}.mt-10{margin-top:2.5rem}.mt-2{margin-top:.5rem}.mt-4{margin-top:1rem}.mt-6{margin-top:1.5rem}.mt-8{margin-top:2rem}.line-clamp-2{overflow:hidden;display:-webkit-box;-webkit-box-orient:vertical;-webkit-line-clamp:2}.block{display:block}.inline-block{display:inline-block}.inline{display:inline}.flex{display:flex}.grid{display:grid}.hidden{display:none}.h-10{height:2.5rem}.h-4{height:1rem}.h-5{height:1.25rem}.h-6{height:1.5rem}.h-full{height:100%}.max-h-screen{max-height:100vh}.min-h-full{min-height:100%}.min-h-screen{min-height:100vh}.w-10{width:2.5rem}.w-4{width:1rem}.w-5{width:1.25rem}.w-full{width:100%}.min-w-2{min-width:.5rem}.min-w-48{min-width:12rem}.max-w-screen-max{max-width:98.75rem}.shrink-0{flex-shrink:0}.grow-\[2\]{flex-grow:2}.-translate-y-1\/2{--tw-translate-y:-50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.cursor-pointer{cursor:pointer}.list-none{list-style-type:none}.appearance-none{-webkit-appearance:none;-moz-appearance:none;appearance:none}.grid-flow-dense{grid-auto-flow:dense}.auto-rows-min{grid-auto-rows:min-content}.grid-cols-1{grid-template-columns:repeat(1,minmax(0,1fr))}.grid-cols-10{grid-template-columns:repeat(10,minmax(0,1fr))}.flex-col{flex-direction:column}.items-start{align-items:flex-start}.items-center{align-items:center}.items-stretch{align-items:stretch}.justify-end{justify-content:flex-end}.justify-center{justify-content:center}.justify-between{justify-content:space-between}.gap-0\.5{gap:.125rem}.gap-1{gap:.25rem}.gap-1\.5{gap:.375rem}.gap-2{gap:.5rem}.gap-3{gap:.75rem}.gap-4{gap:1rem}.gap-8{gap:2rem}.gap-x-20{-moz-column-gap:5rem;column-gap:5rem}.gap-x-3{-moz-column-gap:.75rem;column-gap:.75rem}.gap-y-2{row-gap:.5rem}.space-y-2>:not([hidden])~:not([hidden]){--tw-space-y-reverse:0;margin-top:calc(.5rem*(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.5rem*var(--tw-space-y-reverse))}.overflow-hidden{overflow:hidden}.overflow-scroll{overflow:scroll}.whitespace-pre-wrap{white-space:pre-wrap}.rounded{border-radius:.375rem}.rounded-sm{border-radius:.25rem}.border{border-width:1px}.border-b{border-bottom-width:1px}.border-l{border-left-width:1px}.border-t{border-top-width:1px}.border-gray-100{--tw-border-opacity:1;border-color:rgb(226 226 226/var(--tw-border-opacity))}.bg-gray-100{--tw-bg-opacity:1;background-color:rgb(226 226 226/var(--tw-bg-opacity))}.bg-gray-50{--tw-bg-opacity:1;background-color:rgb(240 240 240/var(--tw-bg-opacity))}.bg-light{--tw-bg-opacity:1;background-color:rgb(255 255 255/var(--tw-bg-opacity))}.bg-transparent{background-color:transparent}.p-1\.5{padding:.375rem}.p-2{padding:.5rem}.p-4{padding:1rem}.px-1{padding-left:.25rem;padding-right:.25rem}.px-10{padding-left:2.5rem;padding-right:2.5rem}.px-2{padding-left:.5rem;padding-right:.5rem}.px-3{padding-left:.75rem;padding-right:.75rem}.px-4{padding-left:1rem;padding-right:1rem}.py-1{padding-top:.25rem;padding-bottom:.25rem}.py-1\.5{padding-top:.375rem;padding-bottom:.375rem}.py-2{padding-top:.5rem;padding-bottom:.5rem}.py-px{padding-top:1px;padding-bottom:1px}.pb-24{padding-bottom:6rem}.pb-3{padding-bottom:.75rem}.pb-4{padding-bottom:1rem}.pb-6{padding-bottom:1.5rem}.pb-8{padding-bottom:2rem}.pl-4{padding-left:1rem}.pr-10{padding-right:2.5rem}.pt-0\.5{padding-top:.125rem}.pt-2{padding-top:.5rem}.font-mono{font-family:Roboto,Menlo,Consolas,Ubuntu Mono,Roboto Mono,DejaVu Sans Mono,monospace;}.text-100{font-size:.875rem}.text-200{font-size:1rem}.text-50{font-size:.75rem}.text-600{font-size:1.5rem}.font-bold{font-weight:700}.font-medium{font-weight:500}.font-normal{font-weight:400}.font-semibold{font-weight:600}.capitalize{text-transform:capitalize}.leading-tight{line-height:1.25}.text-gray-300{--tw-text-opacity:1;color:rgb(153 153 153/var(--tw-text-opacity))}.text-gray-400{--tw-text-opacity:1;color:rgb(124 124 124/var(--tw-text-opacity))}.text-gray-600{--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity))}.text-gray-800{--tw-text-opacity:1;color:rgb(19 19 19/var(--tw-text-opacity))}.text-gray-900{--tw-text-opacity:1;color:rgb(8 8 9/var(--tw-text-opacity))}.text-green-600{--tw-text-opacity:1;color:rgb(34 108 87/var(--tw-text-opacity))}.outline-none{outline:2px solid transparent;outline-offset:2px}.text-stroke{-webkit-text-stroke:currentColor;-webkit-text-stroke-width:.6px}.no-scrollbar::-webkit-scrollbar{display:none}.no-scrollbar{-ms-overflow-style:none;scrollbar-width:none}.\*\:pl-0>*{padding-left:0}.before\:px-\[0\.18rem\]:before{content:var(--tw-content);padding-left:.18rem;padding-right:.18rem}.before\:text-gray-300:before{content:var(--tw-content);--tw-text-opacity:1;color:rgb(153 153 153/var(--tw-text-opacity))}.before\:content-\[\'\/\'\]:before{--tw-content:"/";content:var(--tw-content)}.before\:content-\[\'open\'\]:before{--tw-content:"open";content:var(--tw-content)}.after\:absolute:after{content:var(--tw-content);position:absolute}.after\:bottom-0:after{content:var(--tw-content);bottom:0}.after\:left-0:after{content:var(--tw-content);left:0}.after\:block:after{content:var(--tw-content);display:block}.after\:h-1:after{content:var(--tw-content);height:.25rem}.after\:w-full:after{content:var(--tw-content);width:100%}.after\:rounded-t-sm:after{content:var(--tw-content);border-top-left-radius:.25rem;border-top-right-radius:.25rem}.after\:bg-green-600:after{content:var(--tw-content);--tw-bg-opacity:1;background-color:rgb(34 108 87/var(--tw-bg-opacity))}.first\:border-t:first-child{border-top-width:1px}.hover\:border-gray-300:hover{--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity))}.hover\:bg-gray-100:hover{--tw-bg-opacity:1;background-color:rgb(226 226 226/var(--tw-bg-opacity))}.hover\:bg-gray-50:hover{--tw-bg-opacity:1;background-color:rgb(240 240 240/var(--tw-bg-opacity))}.hover\:bg-green-600:hover{--tw-bg-opacity:1;background-color:rgb(34 108 87/var(--tw-bg-opacity))}.hover\:text-gray-600:hover{--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity))}.hover\:text-green-600:hover{--tw-text-opacity:1;color:rgb(34 108 87/var(--tw-text-opacity))}.hover\:text-light:hover{--tw-text-opacity:1;color:rgb(255 255 255/var(--tw-text-opacity))}.hover\:underline:hover{text-decoration-line:underline}.focus\:border-gray-300:focus{--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity))}.focus\:border-l-gray-300:focus{--tw-border-opacity:1;border-left-color:rgb(153 153 153/var(--tw-border-opacity))}.group:hover .group-hover\:border-gray-300{--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity))}.group:hover .group-hover\:border-l-gray-300{--tw-border-opacity:1;border-left-color:rgb(153 153 153/var(--tw-border-opacity))}.group.is-active .group-\[\.is-active\]\:text-green-600{--tw-text-opacity:1;color:rgb(34 108 87/var(--tw-text-opacity))}.peer:checked~.peer-checked\:before\:content-\[\'close\'\]:before{--tw-content:"close";content:var(--tw-content)}.peer:focus-within~.peer-focus-within\:hidden{display:none}.has-\[ul\:empty\]\:hidden:has(ul:empty){display:none}.has-\[\:focus-within\]\:border-gray-300:has(:focus-within){--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity))}.has-\[\:focus\]\:border-gray-300:has(:focus){--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity))}@media (min-width:30rem){.sm\:gap-6{gap:1.5rem}}@media (min-width:40rem){.md\:col-span-3{grid-column:span 3/span 3}.md\:mb-0{margin-bottom:0}.md\:h-4{height:1rem}.md\:grid-cols-4{grid-template-columns:repeat(4,minmax(0,1fr))}.md\:flex-row{flex-direction:row}.md\:items-center{align-items:center}.md\:gap-x-8{-moz-column-gap:2rem;column-gap:2rem}.md\:px-10{padding-left:2.5rem;padding-right:2.5rem}.md\:pb-0{padding-bottom:0}}@media (min-width:51.25rem){.lg\:order-2{order:2}.lg\:col-span-3{grid-column:span 3/span 3}.lg\:col-span-7{grid-column:span 7/span 7}.lg\:row-span-2{grid-row:span 2/span 2}.lg\:row-start-1{grid-row-start:1}.lg\:row-start-2{grid-row-start:2}.lg\:mb-4{margin-bottom:1rem}.lg\:mt-0{margin-top:0}.lg\:mt-10{margin-top:2.5rem}.lg\:block{display:block}.lg\:hidden{display:none}.lg\:grid-cols-10{grid-template-columns:repeat(10,minmax(0,1fr))}.lg\:flex-row{flex-direction:row}.lg\:justify-start{justify-content:flex-start}.lg\:justify-between{justify-content:space-between}.lg\:gap-x-20{-moz-column-gap:5rem;column-gap:5rem}.lg\:border-none{border-style:none}.lg\:bg-transparent{background-color:transparent}.lg\:p-0{padding:0}.lg\:px-0{padding-left:0;padding-right:0}.lg\:px-2{padding-left:.5rem;padding-right:.5rem}.lg\:py-1\.5{padding-top:.375rem;padding-bottom:.375rem}.lg\:pb-28{padding-bottom:7rem}.lg\:pt-2{padding-top:.5rem}.lg\:text-200{font-size:1rem}.lg\:font-semibold{font-weight:600}.lg\:hover\:bg-transparent:hover{background-color:transparent}}@media (min-width:63.75rem){.xl\:inline{display:inline}.xl\:hidden{display:none}.xl\:grid-cols-10{grid-template-columns:repeat(10,minmax(0,1fr))}.xl\:flex-row{flex-direction:row}.xl\:items-center{align-items:center}.xl\:gap-20{gap:5rem}.xl\:gap-6{gap:1.5rem}.xl\:pt-0{padding-top:0}}@media (min-width:85.375rem){.xxl\:inline-block{display:inline-block}.xxl\:h-4{height:1rem}.xxl\:w-4{width:1rem}.xxl\:gap-20{gap:5rem}.xxl\:gap-x-32{-moz-column-gap:8rem;column-gap:8rem}.xxl\:pr-1{padding-right:.25rem}}
\ No newline at end of file
+/*! tailwindcss v3.4.14 | MIT License | https://tailwindcss.com*/*,:after,:before{box-sizing:border-box;border:0 solid #bdbdbd}:after,:before{--tw-content:""}:host,html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;font-feature-settings:normal;font-variation-settings:normal;-webkit-tap-highlight-color:transparent}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,pre,samp{font-family:Roboto,Menlo,Consolas,Ubuntu Mono,Roboto Mono,DejaVu Sans Mono,monospace;;font-feature-settings:normal;font-variation-settings:normal;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-feature-settings:inherit;font-variation-settings:inherit;font-size:100%;font-weight:inherit;line-height:inherit;letter-spacing:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,input:where([type=button]),input:where([type=reset]),input:where([type=submit]){-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dd,dl,figure,h1,h2,h3,h4,h5,h6,hr,p,pre{margin:0}fieldset{margin:0}fieldset,legend{padding:0}menu,ol,ul{list-style:none;margin:0;padding:0}dialog{padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#7c7c7c}input::placeholder,textarea::placeholder{opacity:1;color:#7c7c7c}[role=button],button{cursor:pointer}:disabled{cursor:default}audio,canvas,embed,iframe,img,object,svg,video{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]:where(:not([hidden=until-found])){display:none}html{--tw-bg-opacity:1;background-color:rgb(255 255 255/var(--tw-bg-opacity));font-family:Inter var,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji,sans-serif;font-size:1rem;--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity));font-feature-settings:"kern" on,"liga" on,"calt" on,"zero" on;-webkit-font-feature-settings:"kern" on,"liga" on,"calt" on,"zero" on;-webkit-text-size-adjust:100%;-moz-text-size-adjust:100%;text-size-adjust:100%;-moz-osx-font-smoothing:grayscale;font-smoothing:antialiased;font-variant-ligatures:contextual common-ligatures;font-kerning:normal;text-rendering:optimizeLegibility}svg{max-height:100%;max-width:100%}form{margin-top:0;margin-bottom:0}.realm-content{overflow-wrap:break-word;padding-top:2.5rem;font-size:1rem}.realm-content>:first-child{margin-top:0!important}.realm-content a{font-weight:500;--tw-text-opacity:1;color:rgb(34 108 87/var(--tw-text-opacity))}.realm-content a:hover{text-decoration-line:underline}.realm-content h1,.realm-content h2,.realm-content h3,.realm-content h4{margin-top:3rem;line-height:1.25;--tw-text-opacity:1;color:rgb(8 8 9/var(--tw-text-opacity))}.realm-content h2,.realm-content h2 *{font-weight:700}.realm-content h3,.realm-content h3 *,.realm-content h4,.realm-content h4 *{font-weight:600}.realm-content h1+h2,.realm-content h2+h3,.realm-content h3+h4{margin-top:1rem}.realm-content h1{font-size:2.375rem;font-weight:700}.realm-content h2{font-size:1.5rem}.realm-content h3{margin-top:2.5rem;font-size:1.25rem}.realm-content h3,.realm-content h4{--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity))}.realm-content h4{margin-top:1.5rem;margin-bottom:1.5rem;font-size:1.125rem;font-weight:500}.realm-content p{margin-top:1.25rem;margin-bottom:1.25rem}.realm-content strong{font-weight:700;--tw-text-opacity:1;color:rgb(8 8 9/var(--tw-text-opacity))}.realm-content strong *{font-weight:700}.realm-content em{font-style:oblique 10deg}.realm-content blockquote{margin-top:1rem;margin-bottom:1rem;border-left-width:4px;--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity));padding-left:1rem;--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity));font-style:oblique 10deg}.realm-content ol,.realm-content ul{margin-top:1.5rem;margin-bottom:1.5rem;padding-left:1rem}.realm-content ol li,.realm-content ul li{margin-bottom:.5rem}.realm-content img{margin-top:2rem;margin-bottom:2rem;max-width:100%}.realm-content figure{margin-top:1.5rem;margin-bottom:1.5rem;text-align:center}.realm-content figcaption{font-size:.875rem;--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity))}.realm-content :not(pre)>code{border-radius:.25rem;background-color:rgb(226 226 226/var(--tw-bg-opacity));padding:.125rem .25rem;font-size:.96em}.realm-content :not(pre)>code,.realm-content pre{--tw-bg-opacity:1;font-family:Roboto,Menlo,Consolas,Ubuntu Mono,Roboto Mono,DejaVu Sans Mono,monospace;}.realm-content pre{overflow-x:auto;border-radius:.375rem;background-color:rgb(240 240 240/var(--tw-bg-opacity));padding:1rem}.realm-content hr{margin-top:2.5rem;margin-bottom:2.5rem;border-top-width:1px;--tw-border-opacity:1;border-color:rgb(226 226 226/var(--tw-border-opacity))}.realm-content table{margin-top:2rem;margin-bottom:2rem;width:100%;border-collapse:collapse}.realm-content td,.realm-content th{border-width:1px;--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity));padding:.5rem 1rem}.realm-content th{--tw-bg-opacity:1;background-color:rgb(226 226 226/var(--tw-bg-opacity));font-weight:700}.realm-content caption{margin-top:.5rem;text-align:left;font-size:.875rem;--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity))}.realm-content q{margin-top:1.5rem;margin-bottom:1.5rem;border-left-width:4px;--tw-border-opacity:1;border-left-color:rgb(204 204 204/var(--tw-border-opacity));padding-left:1rem;--tw-text-opacity:1;color:rgb(85 85 85/var(--tw-text-opacity));font-style:oblique 10deg;quotes:"“" "”" "‘" "’"}.realm-content q:after,.realm-content q:before{margin-right:.25rem;font-size:1.5rem;--tw-text-opacity:1;color:rgb(153 153 153/var(--tw-text-opacity));content:open-quote;vertical-align:-.4rem}.realm-content q:after{content:close-quote}.realm-content q:before{content:open-quote}.realm-content q:after{content:close-quote}.realm-content ol ol,.realm-content ol ul,.realm-content ul ol,.realm-content ul ul{margin-top:.75rem;margin-bottom:.5rem;padding-left:1rem}.realm-content ul{list-style-type:disc}.realm-content ol{list-style-type:decimal}.realm-content table th:first-child,.realm-content td:first-child{padding-left:0}.realm-content table th:last-child,.realm-content td:last-child{padding-right:0}.realm-content abbr[title]{cursor:help;border-bottom-width:1px;border-style:dotted}.realm-content details{margin-top:1.25rem;margin-bottom:1.25rem}.realm-content summary{cursor:pointer;font-weight:700}.realm-content a code{color:inherit}.realm-content video{margin-top:2rem;margin-bottom:2rem;max-width:100%}.realm-content math{font-family:Roboto,Menlo,Consolas,Ubuntu Mono,Roboto Mono,DejaVu Sans Mono,monospace;}.realm-content small{font-size:.875rem}.realm-content del{text-decoration-line:line-through}.realm-content sub{vertical-align:sub;font-size:.75rem}.realm-content sup{vertical-align:super;font-size:.75rem}.realm-content button,.realm-content input{border-width:1px;--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity));padding:.5rem 1rem}main :is(h1,h2,h3,h4){scroll-margin-top:6rem}::-moz-selection{--tw-bg-opacity:1;background-color:rgb(34 108 87/var(--tw-bg-opacity));--tw-text-opacity:1;color:rgb(255 255 255/var(--tw-text-opacity))}::selection{--tw-bg-opacity:1;background-color:rgb(34 108 87/var(--tw-bg-opacity));--tw-text-opacity:1;color:rgb(255 255 255/var(--tw-text-opacity))}.sidemenu .peer:checked+label>svg{--tw-text-opacity:1;color:rgb(34 108 87/var(--tw-text-opacity))}.toc-expend-btn:has(#toc-expend:checked)+nav{display:block}.toc-expend-btn:has(#toc-expend:checked) .toc-expend-btn_ico{--tw-rotate:180deg;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.main-header:has(#sidemenu-docs:checked)+main #sidebar #sidebar-docs,.main-header:has(#sidemenu-meta:checked)+main #sidebar #sidebar-meta,.main-header:has(#sidemenu-source:checked)+main #sidebar #sidebar-source,.main-header:has(#sidemenu-summary:checked)+main #sidebar #sidebar-summary{display:block}@media (min-width:40rem){:is(.main-header:has(#sidemenu-source:checked),.main-header:has(#sidemenu-docs:checked),.main-header:has(#sidemenu-meta:checked)) .main-navigation,:is(.main-header:has(#sidemenu-source:checked),.main-header:has(#sidemenu-docs:checked),.main-header:has(#sidemenu-meta:checked))+main .realm-content{grid-column:span 6/span 6}:is(.main-header:has(#sidemenu-source:checked),.main-header:has(#sidemenu-docs:checked),.main-header:has(#sidemenu-meta:checked)) .sidemenu,:is(.main-header:has(#sidemenu-source:checked),.main-header:has(#sidemenu-docs:checked),.main-header:has(#sidemenu-meta:checked))+main #sidebar{grid-column:span 4/span 4}}:is(.main-header:has(#sidemenu-source:checked),.main-header:has(#sidemenu-docs:checked),.main-header:has(#sidemenu-meta:checked))+main #sidebar:before{position:absolute;top:0;left:-1.75rem;z-index:-1;display:block;height:100%;width:50vw;--tw-bg-opacity:1;background-color:rgb(226 226 226/var(--tw-bg-opacity));--tw-content:"";content:var(--tw-content)}main :is(.source-code)>pre{overflow:scroll;border-radius:.375rem;--tw-bg-opacity:1!important;background-color:rgb(255 255 255/var(--tw-bg-opacity))!important;padding:1rem .25rem;font-family:Roboto,Menlo,Consolas,Ubuntu Mono,Roboto Mono,DejaVu Sans Mono,monospace;;font-size:.875rem}@media (min-width:40rem){main :is(.source-code)>pre{padding:2rem .75rem;font-size:1rem}}main .realm-content>pre a:hover{text-decoration-line:none}main :is(.realm-content,.source-code)>pre .chroma-ln:target{background-color:transparent!important}main :is(.realm-content,.source-code)>pre .chroma-line:has(.chroma-ln:target),main :is(.realm-content,.source-code)>pre .chroma-line:has(.chroma-ln:target) .chroma-cl,main :is(.realm-content,.source-code)>pre .chroma-line:has(.chroma-lnlinks:hover),main :is(.realm-content,.source-code)>pre .chroma-line:has(.chroma-lnlinks:hover) .chroma-cl{border-radius:.375rem;--tw-bg-opacity:1!important;background-color:rgb(226 226 226/var(--tw-bg-opacity))!important}main :is(.realm-content,.source-code)>pre .chroma-ln{scroll-margin-top:6rem}.absolute{position:absolute}.relative{position:relative}.sticky{position:sticky}.bottom-1{bottom:.25rem}.left-0{left:0}.right-2{right:.5rem}.right-3{right:.75rem}.top-0{top:0}.top-1\/2{top:50%}.top-14{top:3.5rem}.top-2{top:.5rem}.z-max{z-index:9999}.col-span-1{grid-column:span 1/span 1}.col-span-10{grid-column:span 10/span 10}.col-span-3{grid-column:span 3/span 3}.col-span-7{grid-column:span 7/span 7}.row-span-1{grid-row:span 1/span 1}.row-start-1{grid-row-start:1}.mx-auto{margin-left:auto;margin-right:auto}.mb-1{margin-bottom:.25rem}.mb-2{margin-bottom:.5rem}.mb-3{margin-bottom:.75rem}.mb-4{margin-bottom:1rem}.mb-8{margin-bottom:2rem}.mr-10{margin-right:2.5rem}.mt-1{margin-top:.25rem}.mt-10{margin-top:2.5rem}.mt-2{margin-top:.5rem}.mt-4{margin-top:1rem}.mt-6{margin-top:1.5rem}.mt-8{margin-top:2rem}.line-clamp-2{overflow:hidden;display:-webkit-box;-webkit-box-orient:vertical;-webkit-line-clamp:2}.block{display:block}.inline-block{display:inline-block}.inline{display:inline}.flex{display:flex}.grid{display:grid}.hidden{display:none}.h-10{height:2.5rem}.h-4{height:1rem}.h-5{height:1.25rem}.h-6{height:1.5rem}.h-full{height:100%}.max-h-screen{max-height:100vh}.min-h-full{min-height:100%}.min-h-screen{min-height:100vh}.w-10{width:2.5rem}.w-4{width:1rem}.w-5{width:1.25rem}.w-full{width:100%}.min-w-2{min-width:.5rem}.min-w-48{min-width:12rem}.max-w-screen-max{max-width:98.75rem}.shrink-0{flex-shrink:0}.grow-\[2\]{flex-grow:2}.-translate-y-1\/2{--tw-translate-y:-50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.cursor-pointer{cursor:pointer}.list-none{list-style-type:none}.appearance-none{-webkit-appearance:none;-moz-appearance:none;appearance:none}.grid-flow-dense{grid-auto-flow:dense}.auto-rows-min{grid-auto-rows:min-content}.grid-cols-1{grid-template-columns:repeat(1,minmax(0,1fr))}.grid-cols-10{grid-template-columns:repeat(10,minmax(0,1fr))}.flex-col{flex-direction:column}.items-start{align-items:flex-start}.items-center{align-items:center}.items-stretch{align-items:stretch}.justify-end{justify-content:flex-end}.justify-center{justify-content:center}.justify-between{justify-content:space-between}.gap-0\.5{gap:.125rem}.gap-1{gap:.25rem}.gap-1\.5{gap:.375rem}.gap-2{gap:.5rem}.gap-3{gap:.75rem}.gap-4{gap:1rem}.gap-8{gap:2rem}.gap-x-20{-moz-column-gap:5rem;column-gap:5rem}.gap-x-3{-moz-column-gap:.75rem;column-gap:.75rem}.gap-y-2{row-gap:.5rem}.space-y-2>:not([hidden])~:not([hidden]){--tw-space-y-reverse:0;margin-top:calc(.5rem*(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.5rem*var(--tw-space-y-reverse))}.overflow-hidden{overflow:hidden}.overflow-scroll{overflow:scroll}.whitespace-pre-wrap{white-space:pre-wrap}.rounded{border-radius:.375rem}.rounded-sm{border-radius:.25rem}.border{border-width:1px}.border-b{border-bottom-width:1px}.border-l{border-left-width:1px}.border-t{border-top-width:1px}.border-gray-100{--tw-border-opacity:1;border-color:rgb(226 226 226/var(--tw-border-opacity))}.bg-gray-100{--tw-bg-opacity:1;background-color:rgb(226 226 226/var(--tw-bg-opacity))}.bg-gray-300{--tw-bg-opacity:1;background-color:rgb(153 153 153/var(--tw-bg-opacity))}.bg-gray-50{--tw-bg-opacity:1;background-color:rgb(240 240 240/var(--tw-bg-opacity))}.bg-light{--tw-bg-opacity:1;background-color:rgb(255 255 255/var(--tw-bg-opacity))}.bg-transparent{background-color:transparent}.p-1\.5{padding:.375rem}.p-2{padding:.5rem}.p-4{padding:1rem}.px-1{padding-left:.25rem;padding-right:.25rem}.px-10{padding-left:2.5rem;padding-right:2.5rem}.px-2{padding-left:.5rem;padding-right:.5rem}.px-3{padding-left:.75rem;padding-right:.75rem}.px-4{padding-left:1rem;padding-right:1rem}.py-1{padding-top:.25rem;padding-bottom:.25rem}.py-1\.5{padding-top:.375rem;padding-bottom:.375rem}.py-2{padding-top:.5rem;padding-bottom:.5rem}.py-px{padding-top:1px;padding-bottom:1px}.pb-24{padding-bottom:6rem}.pb-3{padding-bottom:.75rem}.pb-4{padding-bottom:1rem}.pb-6{padding-bottom:1.5rem}.pb-8{padding-bottom:2rem}.pl-4{padding-left:1rem}.pr-10{padding-right:2.5rem}.pt-0\.5{padding-top:.125rem}.pt-2{padding-top:.5rem}.font-mono{font-family:Roboto,Menlo,Consolas,Ubuntu Mono,Roboto Mono,DejaVu Sans Mono,monospace;}.text-100{font-size:.875rem}.text-200{font-size:1rem}.text-50{font-size:.75rem}.text-600{font-size:1.5rem}.font-bold{font-weight:700}.font-medium{font-weight:500}.font-normal{font-weight:400}.font-semibold{font-weight:600}.capitalize{text-transform:capitalize}.leading-tight{line-height:1.25}.text-gray-300{--tw-text-opacity:1;color:rgb(153 153 153/var(--tw-text-opacity))}.text-gray-400{--tw-text-opacity:1;color:rgb(124 124 124/var(--tw-text-opacity))}.text-gray-600{--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity))}.text-gray-800{--tw-text-opacity:1;color:rgb(19 19 19/var(--tw-text-opacity))}.text-gray-900{--tw-text-opacity:1;color:rgb(8 8 9/var(--tw-text-opacity))}.text-green-600{--tw-text-opacity:1;color:rgb(34 108 87/var(--tw-text-opacity))}.text-light{--tw-text-opacity:1;color:rgb(255 255 255/var(--tw-text-opacity))}.outline-none{outline:2px solid transparent;outline-offset:2px}.text-stroke{-webkit-text-stroke:currentColor;-webkit-text-stroke-width:.6px}.no-scrollbar::-webkit-scrollbar{display:none}.no-scrollbar{-ms-overflow-style:none;scrollbar-width:none}.\*\:pl-0>*{padding-left:0}.before\:px-\[0\.18rem\]:before{content:var(--tw-content);padding-left:.18rem;padding-right:.18rem}.before\:text-gray-300:before{content:var(--tw-content);--tw-text-opacity:1;color:rgb(153 153 153/var(--tw-text-opacity))}.before\:content-\[\'\/\'\]:before{--tw-content:"/";content:var(--tw-content)}.before\:content-\[\'\:\'\]:before{--tw-content:":";content:var(--tw-content)}.before\:content-\[\'open\'\]:before{--tw-content:"open";content:var(--tw-content)}.after\:absolute:after{content:var(--tw-content);position:absolute}.after\:bottom-0:after{content:var(--tw-content);bottom:0}.after\:left-0:after{content:var(--tw-content);left:0}.after\:block:after{content:var(--tw-content);display:block}.after\:h-1:after{content:var(--tw-content);height:.25rem}.after\:w-full:after{content:var(--tw-content);width:100%}.after\:rounded-t-sm:after{content:var(--tw-content);border-top-left-radius:.25rem;border-top-right-radius:.25rem}.after\:bg-green-600:after{content:var(--tw-content);--tw-bg-opacity:1;background-color:rgb(34 108 87/var(--tw-bg-opacity))}.first\:border-t:first-child{border-top-width:1px}.hover\:border-gray-300:hover{--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity))}.hover\:bg-gray-100:hover{--tw-bg-opacity:1;background-color:rgb(226 226 226/var(--tw-bg-opacity))}.hover\:bg-gray-50:hover{--tw-bg-opacity:1;background-color:rgb(240 240 240/var(--tw-bg-opacity))}.hover\:bg-green-600:hover{--tw-bg-opacity:1;background-color:rgb(34 108 87/var(--tw-bg-opacity))}.hover\:text-gray-600:hover{--tw-text-opacity:1;color:rgb(84 89 93/var(--tw-text-opacity))}.hover\:text-green-600:hover{--tw-text-opacity:1;color:rgb(34 108 87/var(--tw-text-opacity))}.hover\:text-light:hover{--tw-text-opacity:1;color:rgb(255 255 255/var(--tw-text-opacity))}.hover\:underline:hover{text-decoration-line:underline}.focus\:border-gray-300:focus{--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity))}.focus\:border-l-gray-300:focus{--tw-border-opacity:1;border-left-color:rgb(153 153 153/var(--tw-border-opacity))}.group:hover .group-hover\:border-gray-300{--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity))}.group:hover .group-hover\:border-l-gray-300{--tw-border-opacity:1;border-left-color:rgb(153 153 153/var(--tw-border-opacity))}.group.is-active .group-\[\.is-active\]\:text-green-600{--tw-text-opacity:1;color:rgb(34 108 87/var(--tw-text-opacity))}.peer:checked~.peer-checked\:before\:content-\[\'close\'\]:before{--tw-content:"close";content:var(--tw-content)}.peer:focus-within~.peer-focus-within\:hidden{display:none}.has-\[ul\:empty\]\:hidden:has(ul:empty){display:none}.has-\[\:focus-within\]\:border-gray-300:has(:focus-within){--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity))}.has-\[\:focus\]\:border-gray-300:has(:focus){--tw-border-opacity:1;border-color:rgb(153 153 153/var(--tw-border-opacity))}@media (min-width:30rem){.sm\:gap-6{gap:1.5rem}}@media (min-width:40rem){.md\:col-span-3{grid-column:span 3/span 3}.md\:mb-0{margin-bottom:0}.md\:h-4{height:1rem}.md\:grid-cols-4{grid-template-columns:repeat(4,minmax(0,1fr))}.md\:flex-row{flex-direction:row}.md\:items-center{align-items:center}.md\:gap-x-8{-moz-column-gap:2rem;column-gap:2rem}.md\:px-10{padding-left:2.5rem;padding-right:2.5rem}.md\:pb-0{padding-bottom:0}}@media (min-width:51.25rem){.lg\:order-2{order:2}.lg\:col-span-3{grid-column:span 3/span 3}.lg\:col-span-7{grid-column:span 7/span 7}.lg\:row-span-2{grid-row:span 2/span 2}.lg\:row-start-1{grid-row-start:1}.lg\:row-start-2{grid-row-start:2}.lg\:mb-4{margin-bottom:1rem}.lg\:mt-0{margin-top:0}.lg\:mt-10{margin-top:2.5rem}.lg\:block{display:block}.lg\:hidden{display:none}.lg\:grid-cols-10{grid-template-columns:repeat(10,minmax(0,1fr))}.lg\:flex-row{flex-direction:row}.lg\:justify-start{justify-content:flex-start}.lg\:justify-between{justify-content:space-between}.lg\:gap-x-20{-moz-column-gap:5rem;column-gap:5rem}.lg\:border-none{border-style:none}.lg\:bg-transparent{background-color:transparent}.lg\:p-0{padding:0}.lg\:px-0{padding-left:0;padding-right:0}.lg\:px-2{padding-left:.5rem;padding-right:.5rem}.lg\:py-1\.5{padding-top:.375rem;padding-bottom:.375rem}.lg\:pb-28{padding-bottom:7rem}.lg\:pt-2{padding-top:.5rem}.lg\:text-200{font-size:1rem}.lg\:font-semibold{font-weight:600}.lg\:hover\:bg-transparent:hover{background-color:transparent}}@media (min-width:63.75rem){.xl\:inline{display:inline}.xl\:hidden{display:none}.xl\:grid-cols-10{grid-template-columns:repeat(10,minmax(0,1fr))}.xl\:flex-row{flex-direction:row}.xl\:items-center{align-items:center}.xl\:gap-20{gap:5rem}.xl\:gap-6{gap:1.5rem}.xl\:pt-0{padding-top:0}}@media (min-width:85.375rem){.xxl\:inline-block{display:inline-block}.xxl\:h-4{height:1rem}.xxl\:w-4{width:1rem}.xxl\:gap-20{gap:5rem}.xxl\:gap-x-32{-moz-column-gap:8rem;column-gap:8rem}.xxl\:pr-1{padding-right:.25rem}}
\ No newline at end of file
diff --git a/gno.land/pkg/gnoweb/url.go b/gno.land/pkg/gnoweb/url.go
index bc03f2182d9..9127225d490 100644
--- a/gno.land/pkg/gnoweb/url.go
+++ b/gno.land/pkg/gnoweb/url.go
@@ -4,145 +4,253 @@ import (
"errors"
"fmt"
"net/url"
+ "path/filepath"
"regexp"
+ "slices"
"strings"
)
-type PathKind byte
+var ErrURLInvalidPath = errors.New("invalid path")
-const (
- KindInvalid PathKind = 0
- KindRealm PathKind = 'r'
- KindPure PathKind = 'p'
-)
+// rePkgOrRealmPath matches and validates a flexible path.
+var rePkgOrRealmPath = regexp.MustCompile(`^/[a-z][a-z0-9_/]*$`)
// GnoURL decomposes the parts of an URL to query a realm.
type GnoURL struct {
// Example full path:
- // gno.land/r/demo/users:jae$help&a=b?c=d
+ // gno.land/r/demo/users/render.gno:jae$help&a=b?c=d
Domain string // gno.land
Path string // /r/demo/users
Args string // jae
WebQuery url.Values // help&a=b
Query url.Values // c=d
+ File string // render.gno
}
-func (url GnoURL) EncodeArgs() string {
+// EncodeFlag is used to specify which URL components to encode.
+type EncodeFlag int
+
+const (
+ EncodeDomain EncodeFlag = 1 << iota // Encode the domain component
+ EncodePath // Encode the path component
+ EncodeArgs // Encode the arguments component
+ EncodeWebQuery // Encode the web query component
+ EncodeQuery // Encode the query component
+ EncodeNoEscape // Disable escaping of arguments
+)
+
+// Encode constructs a URL string from the components of a GnoURL struct,
+// encoding the specified components based on the provided EncodeFlag bitmask.
+//
+// The function selectively encodes the URL's path, arguments, web query, and
+// query parameters, depending on the flags set in encodeFlags.
+//
+// Returns a string representing the encoded URL.
+//
+// Example:
+//
+// gnoURL := GnoURL{
+// Domain: "gno.land",
+// Path: "/r/demo/users",
+// Args: "john",
+// File: "render.gno",
+// }
+//
+// encodedURL := gnoURL.Encode(EncodePath | EncodeArgs)
+// fmt.Println(encodedURL) // Output: /r/demo/users/render.gno:john
+//
+// URL components are encoded using url.PathEscape unless EncodeNoEscape is specified.
+func (gnoURL GnoURL) Encode(encodeFlags EncodeFlag) string {
var urlstr strings.Builder
- if url.Args != "" {
- urlstr.WriteString(url.Args)
- }
- if len(url.Query) > 0 {
- urlstr.WriteString("?" + url.Query.Encode())
- }
+ noEscape := encodeFlags.Has(EncodeNoEscape)
- return urlstr.String()
-}
+ if encodeFlags.Has(EncodeDomain) {
+ urlstr.WriteString(gnoURL.Domain)
+ }
-func (url GnoURL) EncodePath() string {
- var urlstr strings.Builder
- urlstr.WriteString(url.Path)
- if url.Args != "" {
- urlstr.WriteString(":" + url.Args)
+ if encodeFlags.Has(EncodePath) {
+ path := gnoURL.Path
+ urlstr.WriteString(path)
}
- if len(url.Query) > 0 {
- urlstr.WriteString("?" + url.Query.Encode())
+ if len(gnoURL.File) > 0 {
+ urlstr.WriteRune('/')
+ urlstr.WriteString(gnoURL.File)
}
- return urlstr.String()
-}
+ if encodeFlags.Has(EncodeArgs) && gnoURL.Args != "" {
+ if encodeFlags.Has(EncodePath) {
+ urlstr.WriteRune(':')
+ }
-func (url GnoURL) EncodeWebPath() string {
- var urlstr strings.Builder
- urlstr.WriteString(url.Path)
- if url.Args != "" {
- pathEscape := escapeDollarSign(url.Args)
- urlstr.WriteString(":" + pathEscape)
+ // XXX: Arguments should ideally always be escaped,
+ // but this may require changes in some realms.
+ args := gnoURL.Args
+ if !noEscape {
+ args = escapeDollarSign(url.PathEscape(args))
+ }
+
+ urlstr.WriteString(args)
}
- if len(url.WebQuery) > 0 {
- urlstr.WriteString("$" + url.WebQuery.Encode())
+ if encodeFlags.Has(EncodeWebQuery) && len(gnoURL.WebQuery) > 0 {
+ urlstr.WriteRune('$')
+ if noEscape {
+ urlstr.WriteString(NoEscapeQuery(gnoURL.WebQuery))
+ } else {
+ urlstr.WriteString(gnoURL.WebQuery.Encode())
+ }
}
- if len(url.Query) > 0 {
- urlstr.WriteString("?" + url.Query.Encode())
+ if encodeFlags.Has(EncodeQuery) && len(gnoURL.Query) > 0 {
+ urlstr.WriteRune('?')
+ if noEscape {
+ urlstr.WriteString(NoEscapeQuery(gnoURL.Query))
+ } else {
+ urlstr.WriteString(gnoURL.Query.Encode())
+ }
}
return urlstr.String()
}
-func (url GnoURL) Kind() PathKind {
- if len(url.Path) < 2 {
- return KindInvalid
- }
- pk := PathKind(url.Path[1])
- switch pk {
- case KindPure, KindRealm:
- return pk
- }
- return KindInvalid
+// Has checks if the EncodeFlag contains all the specified flags.
+func (f EncodeFlag) Has(flags EncodeFlag) bool {
+ return f&flags != 0
}
-var (
- ErrURLMalformedPath = errors.New("malformed URL path")
- ErrURLInvalidPathKind = errors.New("invalid path kind")
-)
+func escapeDollarSign(s string) string {
+ return strings.ReplaceAll(s, "$", "%24")
+}
-// reRealName match a realm path
-// - matches[1]: path
-// - matches[2]: path args
-var reRealmPath = regexp.MustCompile(`^` +
- `(/(?:[a-zA-Z0-9_-]+)/` + // path kind
- `[a-zA-Z][a-zA-Z0-9_-]*` + // First path segment
- `(?:/[a-zA-Z][.a-zA-Z0-9_-]*)*/?)` + // Additional path segments
- `([:$](?:.*))?$`, // Remaining portions args, separate by `$` or `:`
-)
+// EncodeArgs encodes the arguments and query parameters into a string.
+// This function is intended to be passed as a realm `Render` argument.
+func (gnoURL GnoURL) EncodeArgs() string {
+ return gnoURL.Encode(EncodeArgs | EncodeQuery | EncodeNoEscape)
+}
+// EncodeURL encodes the path, arguments, and query parameters into a string.
+// This function provides the full representation of the URL without the web query.
+func (gnoURL GnoURL) EncodeURL() string {
+ return gnoURL.Encode(EncodePath | EncodeArgs | EncodeQuery)
+}
+
+// EncodeWebURL encodes the path, package arguments, web query, and query into a string.
+// This function provides the full representation of the URL.
+func (gnoURL GnoURL) EncodeWebURL() string {
+ return gnoURL.Encode(EncodePath | EncodeArgs | EncodeWebQuery | EncodeQuery)
+}
+
+// IsPure checks if the URL path represents a pure path.
+func (gnoURL GnoURL) IsPure() bool {
+ return strings.HasPrefix(gnoURL.Path, "/p/")
+}
+
+// IsRealm checks if the URL path represents a realm path.
+func (gnoURL GnoURL) IsRealm() bool {
+ return strings.HasPrefix(gnoURL.Path, "/r/")
+}
+
+// IsFile checks if the URL path represents a file.
+func (gnoURL GnoURL) IsFile() bool {
+ return gnoURL.File != ""
+}
+
+// IsDir checks if the URL path represents a directory.
+func (gnoURL GnoURL) IsDir() bool {
+ return !gnoURL.IsFile() &&
+ len(gnoURL.Path) > 0 && gnoURL.Path[len(gnoURL.Path)-1] == '/'
+}
+
+func (gnoURL GnoURL) IsValid() bool {
+ return rePkgOrRealmPath.MatchString(gnoURL.Path)
+}
+
+// ParseGnoURL parses a URL into a GnoURL structure, extracting and validating its components.
func ParseGnoURL(u *url.URL) (*GnoURL, error) {
- matches := reRealmPath.FindStringSubmatch(u.EscapedPath())
- if len(matches) != 3 {
- return nil, fmt.Errorf("%w: %s", ErrURLMalformedPath, u.Path)
+ var webargs string
+ path, args, found := strings.Cut(u.EscapedPath(), ":")
+ if found {
+ args, webargs, _ = strings.Cut(args, "$")
+ } else {
+ path, webargs, _ = strings.Cut(path, "$")
+ }
+
+ upath, err := url.PathUnescape(path)
+ if err != nil {
+ return nil, fmt.Errorf("unable to unescape path %q: %w", path, err)
}
- path := matches[1]
- args := matches[2]
+ var file string
- if len(args) > 0 {
- switch args[0] {
- case ':':
- args = args[1:]
- case '$':
- default:
- return nil, fmt.Errorf("%w: %s", ErrURLMalformedPath, u.Path)
+ // A file is considered as one that either ends with an extension or
+ // contains an uppercase rune
+ ext := filepath.Ext(upath)
+ base := filepath.Base(upath)
+ if ext != "" || strings.ToLower(base) != base {
+ file = base
+ upath = strings.TrimSuffix(upath, base)
+
+ // Trim last slash if any
+ if i := strings.LastIndexByte(upath, '/'); i > 0 {
+ upath = upath[:i]
}
}
- var err error
+ if !rePkgOrRealmPath.MatchString(upath) {
+ return nil, fmt.Errorf("%w: %q", ErrURLInvalidPath, upath)
+ }
+
webquery := url.Values{}
- args, webargs, found := strings.Cut(args, "$")
- if found {
- if webquery, err = url.ParseQuery(webargs); err != nil {
- return nil, fmt.Errorf("unable to parse webquery %q: %w ", webquery, err)
+ if len(webargs) > 0 {
+ var parseErr error
+ if webquery, parseErr = url.ParseQuery(webargs); parseErr != nil {
+ return nil, fmt.Errorf("unable to parse webquery %q: %w", webargs, parseErr)
}
}
uargs, err := url.PathUnescape(args)
if err != nil {
- return nil, fmt.Errorf("unable to unescape path %q: %w", args, err)
+ return nil, fmt.Errorf("unable to unescape args %q: %w", args, err)
}
return &GnoURL{
- Path: path,
+ Path: upath,
Args: uargs,
WebQuery: webquery,
Query: u.Query(),
Domain: u.Hostname(),
+ File: file,
}, nil
}
-func escapeDollarSign(s string) string {
- return strings.ReplaceAll(s, "$", "%24")
+// NoEscapeQuery generates a URL-encoded query string from the given url.Values,
+// without escaping the keys and values. The query parameters are sorted by key.
+func NoEscapeQuery(v url.Values) string {
+ // Encode encodes the values into “URL encoded” form
+ // ("bar=baz&foo=quux") sorted by key.
+ if len(v) == 0 {
+ return ""
+ }
+ var buf strings.Builder
+ keys := make([]string, 0, len(v))
+ for k := range v {
+ keys = append(keys, k)
+ }
+ slices.Sort(keys)
+ for _, k := range keys {
+ vs := v[k]
+ keyEscaped := k
+ for _, v := range vs {
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(keyEscaped)
+ buf.WriteByte('=')
+ buf.WriteString(v)
+ }
+ }
+ return buf.String()
}
diff --git a/gno.land/pkg/gnoweb/url_test.go b/gno.land/pkg/gnoweb/url_test.go
index 73cfdda69bd..7a491eaa149 100644
--- a/gno.land/pkg/gnoweb/url_test.go
+++ b/gno.land/pkg/gnoweb/url_test.go
@@ -19,8 +19,9 @@ func TestParseGnoURL(t *testing.T) {
Name: "malformed url",
Input: "https://gno.land/r/dem)o:$?",
Expected: nil,
- Err: ErrURLMalformedPath,
+ Err: ErrURLInvalidPath,
},
+
{
Name: "simple",
Input: "https://gno.land/r/simple/test",
@@ -30,8 +31,32 @@ func TestParseGnoURL(t *testing.T) {
WebQuery: url.Values{},
Query: url.Values{},
},
- Err: nil,
},
+
+ {
+ Name: "file",
+ Input: "https://gno.land/r/simple/test/encode.gno",
+ Expected: &GnoURL{
+ Domain: "gno.land",
+ Path: "/r/simple/test",
+ WebQuery: url.Values{},
+ Query: url.Values{},
+ File: "encode.gno",
+ },
+ },
+
+ {
+ Name: "complex file path",
+ Input: "https://gno.land/r/simple/test///...gno",
+ Expected: &GnoURL{
+ Domain: "gno.land",
+ Path: "/r/simple/test//",
+ WebQuery: url.Values{},
+ Query: url.Values{},
+ File: "...gno",
+ },
+ },
+
{
Name: "webquery + query",
Input: "https://gno.land/r/demo/foo$help&func=Bar&name=Baz",
@@ -46,7 +71,6 @@ func TestParseGnoURL(t *testing.T) {
Query: url.Values{},
Domain: "gno.land",
},
- Err: nil,
},
{
@@ -61,7 +85,6 @@ func TestParseGnoURL(t *testing.T) {
Query: url.Values{},
Domain: "gno.land",
},
- Err: nil,
},
{
@@ -78,7 +101,6 @@ func TestParseGnoURL(t *testing.T) {
},
Domain: "gno.land",
},
- Err: nil,
},
{
@@ -93,7 +115,6 @@ func TestParseGnoURL(t *testing.T) {
},
Domain: "gno.land",
},
- Err: nil,
},
{
@@ -108,22 +129,140 @@ func TestParseGnoURL(t *testing.T) {
Query: url.Values{},
Domain: "gno.land",
},
- Err: nil,
},
- // XXX: more tests
+ {
+ Name: "unknown path kind",
+ Input: "https://gno.land/x/demo/foo",
+ Expected: &GnoURL{
+ Path: "/x/demo/foo",
+ Args: "",
+ WebQuery: url.Values{},
+ Query: url.Values{},
+ Domain: "gno.land",
+ },
+ },
+
+ {
+ Name: "empty path",
+ Input: "https://gno.land/r/",
+ Expected: &GnoURL{
+ Path: "/r/",
+ Args: "",
+ WebQuery: url.Values{},
+ Query: url.Values{},
+ Domain: "gno.land",
+ },
+ },
+
+ {
+ Name: "complex query",
+ Input: "https://gno.land/r/demo/foo$help?func=Bar&name=Baz&age=30",
+ Expected: &GnoURL{
+ Path: "/r/demo/foo",
+ Args: "",
+ WebQuery: url.Values{
+ "help": []string{""},
+ },
+ Query: url.Values{
+ "func": []string{"Bar"},
+ "name": []string{"Baz"},
+ "age": []string{"30"},
+ },
+ Domain: "gno.land",
+ },
+ },
+
+ {
+ Name: "multiple web queries",
+ Input: "https://gno.land/r/demo/foo$help&func=Bar$test=123",
+ Expected: &GnoURL{
+ Path: "/r/demo/foo",
+ Args: "",
+ WebQuery: url.Values{
+ "help": []string{""},
+ "func": []string{"Bar$test=123"},
+ },
+ Query: url.Values{},
+ Domain: "gno.land",
+ },
+ },
+
+ {
+ Name: "webquery-args-webquery",
+ Input: "https://gno.land/r/demo/aaa$bbb:CCC&DDD$EEE",
+ Err: ErrURLInvalidPath, // `/r/demo/aaa$bbb` is an invalid path
+ },
+
+ {
+ Name: "args-webquery-args",
+ Input: "https://gno.land/r/demo/aaa:BBB$CCC&DDD:EEE",
+ Expected: &GnoURL{
+ Domain: "gno.land",
+ Path: "/r/demo/aaa",
+ Args: "BBB",
+ WebQuery: url.Values{
+ "CCC": []string{""},
+ "DDD:EEE": []string{""},
+ },
+ Query: url.Values{},
+ },
+ },
+
+ {
+ Name: "escaped characters in args",
+ Input: "https://gno.land/r/demo/foo:example%20with%20spaces$tz=Europe/Paris",
+ Expected: &GnoURL{
+ Path: "/r/demo/foo",
+ Args: "example with spaces",
+ WebQuery: url.Values{
+ "tz": []string{"Europe/Paris"},
+ },
+ Query: url.Values{},
+ Domain: "gno.land",
+ },
+ },
+
+ {
+ Name: "file in path + args + query",
+ Input: "https://gno.land/r/demo/foo/render.gno:example$tz=Europe/Paris",
+ Expected: &GnoURL{
+ Path: "/r/demo/foo",
+ File: "render.gno",
+ Args: "example",
+ WebQuery: url.Values{
+ "tz": []string{"Europe/Paris"},
+ },
+ Query: url.Values{},
+ Domain: "gno.land",
+ },
+ },
+
+ {
+ Name: "no extension file",
+ Input: "https://gno.land/r/demo/lIcEnSe",
+ Expected: &GnoURL{
+ Path: "/r/demo",
+ File: "lIcEnSe",
+ Args: "",
+ WebQuery: url.Values{},
+ Query: url.Values{},
+ Domain: "gno.land",
+ },
+ },
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
+ t.Logf("testing input: %q", tc.Input)
+
u, err := url.Parse(tc.Input)
require.NoError(t, err)
result, err := ParseGnoURL(u)
if tc.Err == nil {
require.NoError(t, err)
- t.Logf("parsed: %s", result.EncodePath())
- t.Logf("parsed web: %s", result.EncodeWebPath())
+ t.Logf("encoded web path: %q", result.EncodeWebURL())
} else {
require.Error(t, err)
require.ErrorIs(t, err, tc.Err)
@@ -133,3 +272,191 @@ func TestParseGnoURL(t *testing.T) {
})
}
}
+
+func TestEncode(t *testing.T) {
+ testCases := []struct {
+ Name string
+ GnoURL GnoURL
+ EncodeFlags EncodeFlag
+ Expected string
+ }{
+ {
+ Name: "encode domain",
+ GnoURL: GnoURL{
+ Domain: "gno.land",
+ Path: "/r/demo/foo",
+ },
+ EncodeFlags: EncodeDomain,
+ Expected: "gno.land",
+ },
+
+ {
+ Name: "encode web query without escape",
+ GnoURL: GnoURL{
+ Domain: "gno.land",
+ Path: "/r/demo/foo",
+ WebQuery: url.Values{
+ "help": []string{""},
+ "fun$c": []string{"B$ ar"},
+ },
+ },
+ EncodeFlags: EncodeWebQuery | EncodeNoEscape,
+ Expected: "$fun$c=B$ ar&help=",
+ },
+
+ {
+ Name: "encode domain and path",
+ GnoURL: GnoURL{
+ Domain: "gno.land",
+ Path: "/r/demo/foo",
+ },
+ EncodeFlags: EncodeDomain | EncodePath,
+ Expected: "gno.land/r/demo/foo",
+ },
+
+ {
+ Name: "Encode Path Only",
+ GnoURL: GnoURL{
+ Path: "/r/demo/foo",
+ },
+ EncodeFlags: EncodePath,
+ Expected: "/r/demo/foo",
+ },
+
+ {
+ Name: "Encode Path and File",
+ GnoURL: GnoURL{
+ Path: "/r/demo/foo",
+ File: "render.gno",
+ },
+ EncodeFlags: EncodePath,
+ Expected: "/r/demo/foo/render.gno",
+ },
+
+ {
+ Name: "Encode Path, File, and Args",
+ GnoURL: GnoURL{
+ Path: "/r/demo/foo",
+ File: "render.gno",
+ Args: "example",
+ },
+ EncodeFlags: EncodePath | EncodeArgs,
+ Expected: "/r/demo/foo/render.gno:example",
+ },
+
+ {
+ Name: "Encode Path and Args",
+ GnoURL: GnoURL{
+ Path: "/r/demo/foo",
+ Args: "example",
+ },
+ EncodeFlags: EncodePath | EncodeArgs,
+ Expected: "/r/demo/foo:example",
+ },
+
+ {
+ Name: "Encode Path, Args, and WebQuery",
+ GnoURL: GnoURL{
+ Path: "/r/demo/foo",
+ Args: "example",
+ WebQuery: url.Values{
+ "tz": []string{"Europe/Paris"},
+ },
+ },
+ EncodeFlags: EncodePath | EncodeArgs | EncodeWebQuery,
+ Expected: "/r/demo/foo:example$tz=Europe%2FParis",
+ },
+
+ {
+ Name: "Encode Full URL",
+ GnoURL: GnoURL{
+ Path: "/r/demo/foo",
+ Args: "example",
+ WebQuery: url.Values{
+ "tz": []string{"Europe/Paris"},
+ },
+ Query: url.Values{
+ "hello": []string{"42"},
+ },
+ },
+ EncodeFlags: EncodePath | EncodeArgs | EncodeWebQuery | EncodeQuery,
+ Expected: "/r/demo/foo:example$tz=Europe%2FParis?hello=42",
+ },
+
+ {
+ Name: "Encode Args and Query",
+ GnoURL: GnoURL{
+ Path: "/r/demo/foo",
+ Args: "hello Jo$ny",
+ Query: url.Values{
+ "hello": []string{"42"},
+ },
+ },
+ EncodeFlags: EncodeArgs | EncodeQuery,
+ Expected: "hello%20Jo%24ny?hello=42",
+ },
+
+ {
+ Name: "Encode Args and Query (No Escape)",
+ GnoURL: GnoURL{
+ Path: "/r/demo/foo",
+ Args: "hello Jo$ny",
+ Query: url.Values{
+ "hello": []string{"42"},
+ },
+ },
+ EncodeFlags: EncodeArgs | EncodeQuery | EncodeNoEscape,
+ Expected: "hello Jo$ny?hello=42",
+ },
+
+ {
+ Name: "Encode Args and Query",
+ GnoURL: GnoURL{
+ Path: "/r/demo/foo",
+ Args: "example",
+ Query: url.Values{
+ "hello": []string{"42"},
+ },
+ },
+ EncodeFlags: EncodeArgs | EncodeQuery,
+ Expected: "example?hello=42",
+ },
+
+ {
+ Name: "Encode with Escaped Characters",
+ GnoURL: GnoURL{
+ Path: "/r/demo/foo",
+ Args: "example with spaces",
+ WebQuery: url.Values{
+ "tz": []string{"Europe/Paris"},
+ },
+ Query: url.Values{
+ "hello": []string{"42"},
+ },
+ },
+ EncodeFlags: EncodePath | EncodeArgs | EncodeWebQuery | EncodeQuery,
+ Expected: "/r/demo/foo:example%20with%20spaces$tz=Europe%2FParis?hello=42",
+ },
+
+ {
+ Name: "Encode Path, Args, and Query",
+ GnoURL: GnoURL{
+ Path: "/r/demo/foo",
+ Args: "example",
+ Query: url.Values{
+ "hello": []string{"42"},
+ },
+ },
+ EncodeFlags: EncodePath | EncodeArgs | EncodeQuery,
+ Expected: "/r/demo/foo:example?hello=42",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ result := tc.GnoURL.Encode(tc.EncodeFlags)
+ require.True(t, tc.GnoURL.IsValid(), "gno url is not valid")
+ assert.Equal(t, tc.Expected, result)
+ })
+ }
+}
diff --git a/gno.land/pkg/integration/doc.go b/gno.land/pkg/integration/doc.go
index ef3ed9923da..3e09d627c9a 100644
--- a/gno.land/pkg/integration/doc.go
+++ b/gno.land/pkg/integration/doc.go
@@ -76,13 +76,6 @@
//
// Input:
//
-// - LOG_LEVEL:
-// The logging level to be used, which can be one of "error", "debug", "info", or an empty string.
-// If empty, the log level defaults to "debug".
-//
-// - LOG_DIR:
-// If set, logs will be directed to the specified directory.
-//
// - TESTWORK:
// A boolean that, when enabled, retains working directories after tests for
// inspection. If enabled, gnoland logs will be persisted inside this
diff --git a/gno.land/pkg/integration/testing_node.go b/gno.land/pkg/integration/node_testing.go
similarity index 86%
rename from gno.land/pkg/integration/testing_node.go
rename to gno.land/pkg/integration/node_testing.go
index 7eaf3457b03..7965f228fc2 100644
--- a/gno.land/pkg/integration/testing_node.go
+++ b/gno.land/pkg/integration/node_testing.go
@@ -55,7 +55,8 @@ func TestingInMemoryNode(t TestingTS, logger *slog.Logger, config *gnoland.InMem
// with default packages and genesis transactions already loaded.
// It will return the default creator address of the loaded packages.
func TestingNodeConfig(t TestingTS, gnoroot string, additionalTxs ...gnoland.TxWithMetadata) (*gnoland.InMemoryNodeConfig, bft.Address) {
- cfg := TestingMinimalNodeConfig(t, gnoroot)
+ cfg := TestingMinimalNodeConfig(gnoroot)
+ cfg.SkipGenesisVerification = true
creator := crypto.MustAddressFromString(DefaultAccount_Address) // test1
@@ -65,24 +66,24 @@ func TestingNodeConfig(t TestingTS, gnoroot string, additionalTxs ...gnoland.TxW
txs = append(txs, LoadDefaultPackages(t, creator, gnoroot)...)
txs = append(txs, additionalTxs...)
- ggs := cfg.Genesis.AppState.(gnoland.GnoGenesisState)
- ggs.Balances = balances
- ggs.Txs = txs
- ggs.Params = params
- cfg.Genesis.AppState = ggs
+ cfg.Genesis.AppState = gnoland.GnoGenesisState{
+ Balances: balances,
+ Txs: txs,
+ Params: params,
+ }
return cfg, creator
}
// TestingMinimalNodeConfig constructs the default minimal in-memory node configuration for testing.
-func TestingMinimalNodeConfig(t TestingTS, gnoroot string) *gnoland.InMemoryNodeConfig {
+func TestingMinimalNodeConfig(gnoroot string) *gnoland.InMemoryNodeConfig {
tmconfig := DefaultTestingTMConfig(gnoroot)
// Create Mocked Identity
pv := gnoland.NewMockedPrivValidator()
// Generate genesis config
- genesis := DefaultTestingGenesisConfig(t, gnoroot, pv.GetPubKey(), tmconfig)
+ genesis := DefaultTestingGenesisConfig(gnoroot, pv.GetPubKey(), tmconfig)
return &gnoland.InMemoryNodeConfig{
PrivValidator: pv,
@@ -96,16 +97,7 @@ func TestingMinimalNodeConfig(t TestingTS, gnoroot string) *gnoland.InMemoryNode
}
}
-func DefaultTestingGenesisConfig(t TestingTS, gnoroot string, self crypto.PubKey, tmconfig *tmcfg.Config) *bft.GenesisDoc {
- genState := gnoland.DefaultGenState()
- genState.Balances = []gnoland.Balance{
- {
- Address: crypto.MustAddressFromString(DefaultAccount_Address),
- Amount: std.MustParseCoins(ugnot.ValueString(10000000000000)),
- },
- }
- genState.Txs = []gnoland.TxWithMetadata{}
- genState.Params = []gnoland.Param{}
+func DefaultTestingGenesisConfig(gnoroot string, self crypto.PubKey, tmconfig *tmcfg.Config) *bft.GenesisDoc {
return &bft.GenesisDoc{
GenesisTime: time.Now(),
ChainID: tmconfig.ChainID(),
@@ -125,7 +117,16 @@ func DefaultTestingGenesisConfig(t TestingTS, gnoroot string, self crypto.PubKey
Name: "self",
},
},
- AppState: genState,
+ AppState: gnoland.GnoGenesisState{
+ Balances: []gnoland.Balance{
+ {
+ Address: crypto.MustAddressFromString(DefaultAccount_Address),
+ Amount: std.MustParseCoins(ugnot.ValueString(10_000_000_000_000)),
+ },
+ },
+ Txs: []gnoland.TxWithMetadata{},
+ Params: []gnoland.Param{},
+ },
}
}
@@ -178,8 +179,9 @@ func DefaultTestingTMConfig(gnoroot string) *tmcfg.Config {
tmconfig := tmcfg.TestConfig().SetRootDir(gnoroot)
tmconfig.Consensus.WALDisabled = true
+ tmconfig.Consensus.SkipTimeoutCommit = true
tmconfig.Consensus.CreateEmptyBlocks = true
- tmconfig.Consensus.CreateEmptyBlocksInterval = time.Duration(0)
+ tmconfig.Consensus.CreateEmptyBlocksInterval = time.Millisecond * 100
tmconfig.RPC.ListenAddress = defaultListner
tmconfig.P2P.ListenAddress = defaultListner
return tmconfig
diff --git a/gno.land/pkg/integration/pkgloader.go b/gno.land/pkg/integration/pkgloader.go
new file mode 100644
index 00000000000..7e7e817dd92
--- /dev/null
+++ b/gno.land/pkg/integration/pkgloader.go
@@ -0,0 +1,173 @@
+package integration
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/gnolang/gno/gno.land/pkg/gnoland"
+ "github.com/gnolang/gno/gno.land/pkg/sdk/vm"
+ "github.com/gnolang/gno/gnovm/pkg/gnolang"
+ "github.com/gnolang/gno/gnovm/pkg/gnomod"
+ "github.com/gnolang/gno/gnovm/pkg/packages"
+ "github.com/gnolang/gno/tm2/pkg/crypto"
+ "github.com/gnolang/gno/tm2/pkg/std"
+)
+
+type PkgsLoader struct {
+ pkgs []gnomod.Pkg
+ visited map[string]struct{}
+
+ // list of occurrences to patchs with the given value
+ // XXX: find a better way
+ patchs map[string]string
+}
+
+func NewPkgsLoader() *PkgsLoader {
+ return &PkgsLoader{
+ pkgs: make([]gnomod.Pkg, 0),
+ visited: make(map[string]struct{}),
+ patchs: make(map[string]string),
+ }
+}
+
+func (pl *PkgsLoader) List() gnomod.PkgList {
+ return pl.pkgs
+}
+
+func (pl *PkgsLoader) SetPatch(replace, with string) {
+ pl.patchs[replace] = with
+}
+
+func (pl *PkgsLoader) LoadPackages(creatorKey crypto.PrivKey, fee std.Fee, deposit std.Coins) ([]gnoland.TxWithMetadata, error) {
+ pkgslist, err := pl.List().Sort() // sorts packages by their dependencies.
+ if err != nil {
+ return nil, fmt.Errorf("unable to sort packages: %w", err)
+ }
+
+ txs := make([]gnoland.TxWithMetadata, len(pkgslist))
+ for i, pkg := range pkgslist {
+ tx, err := gnoland.LoadPackage(pkg, creatorKey.PubKey().Address(), fee, deposit)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load pkg %q: %w", pkg.Name, err)
+ }
+
+ // If any replace value is specified, apply them
+ if len(pl.patchs) > 0 {
+ for _, msg := range tx.Msgs {
+ addpkg, ok := msg.(vm.MsgAddPackage)
+ if !ok {
+ continue
+ }
+
+ if addpkg.Package == nil {
+ continue
+ }
+
+ for _, file := range addpkg.Package.Files {
+ for replace, with := range pl.patchs {
+ file.Body = strings.ReplaceAll(file.Body, replace, with)
+ }
+ }
+ }
+ }
+
+ txs[i] = gnoland.TxWithMetadata{
+ Tx: tx,
+ }
+ }
+
+ err = SignTxs(txs, creatorKey, "tendermint_test")
+ if err != nil {
+ return nil, fmt.Errorf("unable to sign txs: %w", err)
+ }
+
+ return txs, nil
+}
+
+func (pl *PkgsLoader) LoadAllPackagesFromDir(path string) error {
+ // list all packages from target path
+ pkgslist, err := gnomod.ListPkgs(path)
+ if err != nil {
+ return fmt.Errorf("listing gno packages: %w", err)
+ }
+
+ for _, pkg := range pkgslist {
+ if !pl.exist(pkg) {
+ pl.add(pkg)
+ }
+ }
+
+ return nil
+}
+
+func (pl *PkgsLoader) LoadPackage(modroot string, path, name string) error {
+ // Initialize a queue with the root package
+ queue := []gnomod.Pkg{{Dir: path, Name: name}}
+
+ for len(queue) > 0 {
+ // Dequeue the first package
+ currentPkg := queue[0]
+ queue = queue[1:]
+
+ if currentPkg.Dir == "" {
+ return fmt.Errorf("no path specified for package")
+ }
+
+ if currentPkg.Name == "" {
+ // Load `gno.mod` information
+ gnoModPath := filepath.Join(currentPkg.Dir, "gno.mod")
+ gm, err := gnomod.ParseGnoMod(gnoModPath)
+ if err != nil {
+ return fmt.Errorf("unable to load %q: %w", gnoModPath, err)
+ }
+ gm.Sanitize()
+
+ // Override package info with mod infos
+ currentPkg.Name = gm.Module.Mod.Path
+ currentPkg.Draft = gm.Draft
+
+ pkg, err := gnolang.ReadMemPackage(currentPkg.Dir, currentPkg.Name)
+ if err != nil {
+ return fmt.Errorf("unable to read package at %q: %w", currentPkg.Dir, err)
+ }
+ imports, err := packages.Imports(pkg, nil)
+ if err != nil {
+ return fmt.Errorf("unable to load package imports in %q: %w", currentPkg.Dir, err)
+ }
+ for _, imp := range imports {
+ if imp.PkgPath == currentPkg.Name || gnolang.IsStdlib(imp.PkgPath) {
+ continue
+ }
+ currentPkg.Imports = append(currentPkg.Imports, imp.PkgPath)
+ }
+ }
+
+ if currentPkg.Draft {
+ continue // Skip draft package
+ }
+
+ if pl.exist(currentPkg) {
+ continue
+ }
+ pl.add(currentPkg)
+
+ // Add requirements to the queue
+ for _, pkgPath := range currentPkg.Imports {
+ fullPath := filepath.Join(modroot, pkgPath)
+ queue = append(queue, gnomod.Pkg{Dir: fullPath})
+ }
+ }
+
+ return nil
+}
+
+func (pl *PkgsLoader) add(pkg gnomod.Pkg) {
+ pl.visited[pkg.Name] = struct{}{}
+ pl.pkgs = append(pl.pkgs, pkg)
+}
+
+func (pl *PkgsLoader) exist(pkg gnomod.Pkg) (ok bool) {
+ _, ok = pl.visited[pkg.Name]
+ return
+}
diff --git a/gno.land/pkg/integration/process.go b/gno.land/pkg/integration/process.go
new file mode 100644
index 00000000000..839004ca1f3
--- /dev/null
+++ b/gno.land/pkg/integration/process.go
@@ -0,0 +1,451 @@
+package integration
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "os"
+ "os/exec"
+ "os/signal"
+ "slices"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/gnolang/gno/gno.land/pkg/gnoland"
+ "github.com/gnolang/gno/tm2/pkg/amino"
+ tmcfg "github.com/gnolang/gno/tm2/pkg/bft/config"
+ bft "github.com/gnolang/gno/tm2/pkg/bft/types"
+ "github.com/gnolang/gno/tm2/pkg/crypto/ed25519"
+ "github.com/gnolang/gno/tm2/pkg/db"
+ "github.com/gnolang/gno/tm2/pkg/db/goleveldb"
+ "github.com/gnolang/gno/tm2/pkg/db/memdb"
+ "github.com/stretchr/testify/require"
+)
+
+const gracefulShutdown = time.Second * 5
+
+type ProcessNodeConfig struct {
+ ValidatorKey ed25519.PrivKeyEd25519 `json:"priv"`
+ Verbose bool `json:"verbose"`
+ DBDir string `json:"dbdir"`
+ RootDir string `json:"rootdir"`
+ Genesis *MarshalableGenesisDoc `json:"genesis"`
+ TMConfig *tmcfg.Config `json:"tm"`
+}
+
+type ProcessConfig struct {
+ Node *ProcessNodeConfig
+
+ // These parameters are not meant to be passed to the process
+ CoverDir string
+ Stderr, Stdout io.Writer
+}
+
+func (i ProcessConfig) validate() error {
+ if i.Node.TMConfig == nil {
+ return errors.New("no tm config set")
+ }
+
+ if i.Node.Genesis == nil {
+ return errors.New("no genesis is set")
+ }
+
+ return nil
+}
+
+// RunNode initializes and runs a gnoaland node with the provided configuration.
+func RunNode(ctx context.Context, pcfg *ProcessNodeConfig, stdout, stderr io.Writer) error {
+ // Setup logger based on verbosity
+ var handler slog.Handler
+ if pcfg.Verbose {
+ handler = slog.NewTextHandler(stdout, &slog.HandlerOptions{Level: slog.LevelDebug})
+ } else {
+ handler = slog.NewTextHandler(stdout, &slog.HandlerOptions{Level: slog.LevelError})
+ }
+ logger := slog.New(handler)
+
+ // Initialize database
+ db, err := initDatabase(pcfg.DBDir)
+ if err != nil {
+ return err
+ }
+ defer db.Close() // ensure db is close
+
+ nodecfg := TestingMinimalNodeConfig(pcfg.RootDir)
+
+ // Configure validator if provided
+ if len(pcfg.ValidatorKey) > 0 && !isAllZero(pcfg.ValidatorKey) {
+ nodecfg.PrivValidator = bft.NewMockPVWithParams(pcfg.ValidatorKey, false, false)
+ }
+ pv := nodecfg.PrivValidator.GetPubKey()
+
+ // Setup node configuration
+ nodecfg.DB = db
+ nodecfg.TMConfig.DBPath = pcfg.DBDir
+ nodecfg.TMConfig = pcfg.TMConfig
+ nodecfg.Genesis = pcfg.Genesis.ToGenesisDoc()
+ nodecfg.Genesis.Validators = []bft.GenesisValidator{
+ {
+ Address: pv.Address(),
+ PubKey: pv,
+ Power: 10,
+ Name: "self",
+ },
+ }
+
+ // Create and start the node
+ node, err := gnoland.NewInMemoryNode(logger, nodecfg)
+ if err != nil {
+ return fmt.Errorf("failed to create new in-memory node: %w", err)
+ }
+
+ if err := node.Start(); err != nil {
+ return fmt.Errorf("failed to start node: %w", err)
+ }
+ defer node.Stop()
+
+ // Determine if the node is a validator
+ ourAddress := nodecfg.PrivValidator.GetPubKey().Address()
+ isValidator := slices.ContainsFunc(nodecfg.Genesis.Validators, func(val bft.GenesisValidator) bool {
+ return val.Address == ourAddress
+ })
+
+ lisnAddress := node.Config().RPC.ListenAddress
+ if isValidator {
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("waiting for the node to start: %w", ctx.Err())
+ case <-node.Ready():
+ }
+ }
+
+ // Write READY signal to stdout
+ signalWriteReady(stdout, lisnAddress)
+
+ <-ctx.Done()
+ return node.Stop()
+}
+
+type NodeProcess interface {
+ Stop() error
+ Address() string
+}
+
+type nodeProcess struct {
+ cmd *exec.Cmd
+ address string
+
+ stopOnce sync.Once
+ stopErr error
+}
+
+func (n *nodeProcess) Address() string {
+ return n.address
+}
+
+func (n *nodeProcess) Stop() error {
+ n.stopOnce.Do(func() {
+ // Send SIGTERM to the process
+ if err := n.cmd.Process.Signal(os.Interrupt); err != nil {
+ n.stopErr = fmt.Errorf("error sending `SIGINT` to the node: %w", err)
+ return
+ }
+
+ // Optionally wait for the process to exit
+ if _, err := n.cmd.Process.Wait(); err != nil {
+ n.stopErr = fmt.Errorf("process exited with error: %w", err)
+ return
+ }
+ })
+
+ return n.stopErr
+}
+
+// RunNodeProcess runs the binary at the given path with the provided configuration.
+func RunNodeProcess(ctx context.Context, cfg ProcessConfig, name string, args ...string) (NodeProcess, error) {
+ if cfg.Stdout == nil {
+ cfg.Stdout = os.Stdout
+ }
+
+ if cfg.Stderr == nil {
+ cfg.Stderr = os.Stderr
+ }
+
+ if err := cfg.validate(); err != nil {
+ return nil, err
+ }
+
+ // Marshal the configuration to JSON
+ nodeConfigData, err := json.Marshal(cfg.Node)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal config to JSON: %w", err)
+ }
+
+ // Create and configure the command to execute the binary
+ cmd := exec.Command(name, args...)
+ cmd.Env = os.Environ()
+ cmd.Stdin = bytes.NewReader(nodeConfigData)
+
+ if cfg.CoverDir != "" {
+ cmd.Env = append(cmd.Env, "GOCOVERDIR="+cfg.CoverDir)
+ }
+
+ // Redirect all errors into a buffer
+ cmd.Stderr = os.Stderr
+ if cfg.Stderr != nil {
+ cmd.Stderr = cfg.Stderr
+ }
+
+ // Create pipes for stdout
+ stdoutPipe, err := cmd.StdoutPipe()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create stdout pipe: %w", err)
+ }
+
+ // Start the command
+ if err := cmd.Start(); err != nil {
+ return nil, fmt.Errorf("failed to start command: %w", err)
+ }
+
+ address, err := waitForProcessReady(ctx, stdoutPipe, cfg.Stdout)
+ if err != nil {
+ return nil, fmt.Errorf("waiting for readiness: %w", err)
+ }
+
+ return &nodeProcess{
+ cmd: cmd,
+ address: address,
+ }, nil
+}
+
+type nodeInMemoryProcess struct {
+ address string
+
+ stopOnce sync.Once
+ stopErr error
+ stop context.CancelFunc
+ ccNodeError chan error
+}
+
+func (n *nodeInMemoryProcess) Address() string {
+ return n.address
+}
+
+func (n *nodeInMemoryProcess) Stop() error {
+ n.stopOnce.Do(func() {
+ n.stop()
+ var err error
+ select {
+ case err = <-n.ccNodeError:
+ case <-time.After(time.Second * 5):
+ err = fmt.Errorf("timeout while waiting for node to stop")
+ }
+
+ if err != nil {
+ n.stopErr = fmt.Errorf("unable to node gracefully: %w", err)
+ }
+ })
+
+ return n.stopErr
+}
+
+func RunInMemoryProcess(ctx context.Context, cfg ProcessConfig) (NodeProcess, error) {
+ ctx, cancel := context.WithCancel(ctx)
+
+ out, in := io.Pipe()
+ ccStopErr := make(chan error, 1)
+ go func() {
+ defer close(ccStopErr)
+ defer cancel()
+
+ err := RunNode(ctx, cfg.Node, in, cfg.Stderr)
+ if err != nil {
+ fmt.Fprintf(cfg.Stderr, "run node failed: %v", err)
+ }
+
+ ccStopErr <- err
+ }()
+
+ address, err := waitForProcessReady(ctx, out, cfg.Stdout)
+ if err == nil { // ok
+ return &nodeInMemoryProcess{
+ address: address,
+ stop: cancel,
+ ccNodeError: ccStopErr,
+ }, nil
+ }
+
+ cancel()
+
+ select {
+ case err = <-ccStopErr: // return node error in priority
+ default:
+ }
+
+ return nil, err
+}
+
+func RunMain(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer) error {
+ ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
+ defer stop()
+
+ // Read the configuration from standard input
+ configData, err := io.ReadAll(stdin)
+ if err != nil {
+ // log.Fatalf("error reading stdin: %v", err)
+ return fmt.Errorf("error reading stdin: %w", err)
+ }
+
+ // Unmarshal the JSON configuration
+ var cfg ProcessNodeConfig
+ if err := json.Unmarshal(configData, &cfg); err != nil {
+ return fmt.Errorf("error unmarshaling JSON: %w", err)
+ // log.Fatalf("error unmarshaling JSON: %v", err)
+ }
+
+ // Run the node
+ ccErr := make(chan error, 1)
+ go func() {
+ ccErr <- RunNode(ctx, &cfg, stdout, stderr)
+ close(ccErr)
+ }()
+
+ // Wait for the node to gracefully terminate
+ <-ctx.Done()
+
+ // Attempt graceful shutdown
+ select {
+ case <-time.After(gracefulShutdown):
+ return fmt.Errorf("unable to gracefully stop the node, exiting now")
+ case err = <-ccErr: // done
+ }
+
+ return err
+}
+
+func runTestingNodeProcess(t TestingTS, ctx context.Context, pcfg ProcessConfig) NodeProcess {
+ bin, err := os.Executable()
+ require.NoError(t, err)
+ args := []string{
+ "-test.run=^$",
+ "-run-node-process",
+ }
+
+ if pcfg.CoverDir != "" && testing.CoverMode() != "" {
+ args = append(args, "-test.gocoverdir="+pcfg.CoverDir)
+ }
+
+ node, err := RunNodeProcess(ctx, pcfg, bin, args...)
+ require.NoError(t, err)
+
+ return node
+}
+
+// initDatabase initializes the database based on the provided directory configuration.
+func initDatabase(dbDir string) (db.DB, error) {
+ if dbDir == "" {
+ return memdb.NewMemDB(), nil
+ }
+
+ data, err := goleveldb.NewGoLevelDB("testdb", dbDir)
+ if err != nil {
+ return nil, fmt.Errorf("unable to init database in %q: %w", dbDir, err)
+ }
+
+ return data, nil
+}
+
+func signalWriteReady(w io.Writer, address string) error {
+ _, err := fmt.Fprintf(w, "READY:%s\n", address)
+ return err
+}
+
+func signalReadReady(line string) (string, bool) {
+ var address string
+ if _, err := fmt.Sscanf(line, "READY:%s", &address); err == nil {
+ return address, true
+ }
+ return "", false
+}
+
+// waitForProcessReady waits for the process to signal readiness and returns the address.
+func waitForProcessReady(ctx context.Context, stdoutPipe io.Reader, out io.Writer) (string, error) {
+ var address string
+
+ cReady := make(chan error, 2)
+ go func() {
+ defer close(cReady)
+
+ scanner := bufio.NewScanner(stdoutPipe)
+ ready := false
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ if !ready {
+ if addr, ok := signalReadReady(line); ok {
+ address = addr
+ ready = true
+ cReady <- nil
+ }
+ }
+
+ fmt.Fprintln(out, line)
+ }
+
+ if err := scanner.Err(); err != nil {
+ cReady <- fmt.Errorf("error reading stdout: %w", err)
+ } else {
+ cReady <- fmt.Errorf("process exited without 'READY'")
+ }
+ }()
+
+ select {
+ case err := <-cReady:
+ return address, err
+ case <-ctx.Done():
+ return "", ctx.Err()
+ }
+}
+
+// isAllZero checks if a 64-byte key consists entirely of zeros.
+func isAllZero(key [64]byte) bool {
+ for _, v := range key {
+ if v != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+type MarshalableGenesisDoc bft.GenesisDoc
+
+func NewMarshalableGenesisDoc(doc *bft.GenesisDoc) *MarshalableGenesisDoc {
+ m := MarshalableGenesisDoc(*doc)
+ return &m
+}
+
+func (m *MarshalableGenesisDoc) MarshalJSON() ([]byte, error) {
+ doc := (*bft.GenesisDoc)(m)
+ return amino.MarshalJSON(doc)
+}
+
+func (m *MarshalableGenesisDoc) UnmarshalJSON(data []byte) (err error) {
+ doc, err := bft.GenesisDocFromJSON(data)
+ if err != nil {
+ return err
+ }
+
+ *m = MarshalableGenesisDoc(*doc)
+ return
+}
+
+// Cast back to the original bft.GenesisDoc.
+func (m *MarshalableGenesisDoc) ToGenesisDoc() *bft.GenesisDoc {
+ return (*bft.GenesisDoc)(m)
+}
diff --git a/gno.land/pkg/integration/process/main.go b/gno.land/pkg/integration/process/main.go
new file mode 100644
index 00000000000..bcd52e6fd44
--- /dev/null
+++ b/gno.land/pkg/integration/process/main.go
@@ -0,0 +1,20 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/gnolang/gno/gno.land/pkg/integration"
+)
+
+func main() {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
+ defer cancel()
+
+ if err := integration.RunMain(ctx, os.Stdin, os.Stdout, os.Stderr); err != nil {
+ fmt.Fprintln(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+}
diff --git a/gno.land/pkg/integration/process_test.go b/gno.land/pkg/integration/process_test.go
new file mode 100644
index 00000000000..b8768ad0e63
--- /dev/null
+++ b/gno.land/pkg/integration/process_test.go
@@ -0,0 +1,144 @@
+package integration
+
+import (
+ "bytes"
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/gnolang/gno/gnovm/pkg/gnoenv"
+ "github.com/gnolang/gno/tm2/pkg/bft/rpc/client"
+ "github.com/gnolang/gno/tm2/pkg/crypto/ed25519"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// Define a flag to indicate whether to run the embedded command
+var runCommand = flag.Bool("run-node-process", false, "execute the embedded command")
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+
+ // Check if the embedded command should be executed
+ if !*runCommand {
+ fmt.Println("Running tests...")
+ os.Exit(m.Run())
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
+ defer cancel()
+
+ if err := RunMain(ctx, os.Stdin, os.Stdout, os.Stderr); err != nil {
+ fmt.Fprintln(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+}
+
+// TestGnolandIntegration tests the forking of a Gnoland node.
+func TestNodeProcess(t *testing.T) {
+ t.Parallel()
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*20)
+ defer cancel()
+
+ gnoRootDir := gnoenv.RootDir()
+
+ // Define paths for the build directory and the gnoland binary
+ gnolandDBDir := filepath.Join(t.TempDir(), "db")
+
+ // Prepare a minimal node configuration for testing
+ cfg := TestingMinimalNodeConfig(gnoRootDir)
+
+ var stdio bytes.Buffer
+ defer func() {
+ t.Log("node output:")
+ t.Log(stdio.String())
+ }()
+
+ start := time.Now()
+ node := runTestingNodeProcess(t, ctx, ProcessConfig{
+ Stderr: &stdio, Stdout: &stdio,
+ Node: &ProcessNodeConfig{
+ Verbose: true,
+ ValidatorKey: ed25519.GenPrivKey(),
+ DBDir: gnolandDBDir,
+ RootDir: gnoRootDir,
+ TMConfig: cfg.TMConfig,
+ Genesis: NewMarshalableGenesisDoc(cfg.Genesis),
+ },
+ })
+ t.Logf("time to start the node: %v", time.Since(start).String())
+
+ // Create a new HTTP client to interact with the integration node
+ cli, err := client.NewHTTPClient(node.Address())
+ require.NoError(t, err)
+
+ // Retrieve node info
+ info, err := cli.ABCIInfo()
+ require.NoError(t, err)
+ assert.NotEmpty(t, info.Response.Data)
+
+ // Attempt to stop the node
+ err = node.Stop()
+ require.NoError(t, err)
+
+ // Attempt to stop the node a second time, should not fail
+ err = node.Stop()
+ require.NoError(t, err)
+}
+
+// TestGnolandIntegration tests the forking of a Gnoland node.
+func TestInMemoryNodeProcess(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*20)
+ defer cancel()
+
+ gnoRootDir := gnoenv.RootDir()
+
+ // Define paths for the build directory and the gnoland binary
+ gnolandDBDir := filepath.Join(t.TempDir(), "db")
+
+ // Prepare a minimal node configuration for testing
+ cfg := TestingMinimalNodeConfig(gnoRootDir)
+
+ var stdio bytes.Buffer
+ defer func() {
+ t.Log("node output:")
+ t.Log(stdio.String())
+ }()
+
+ start := time.Now()
+ node, err := RunInMemoryProcess(ctx, ProcessConfig{
+ Stderr: &stdio, Stdout: &stdio,
+ Node: &ProcessNodeConfig{
+ Verbose: true,
+ ValidatorKey: ed25519.GenPrivKey(),
+ DBDir: gnolandDBDir,
+ RootDir: gnoRootDir,
+ TMConfig: cfg.TMConfig,
+ Genesis: NewMarshalableGenesisDoc(cfg.Genesis),
+ },
+ })
+ require.NoError(t, err)
+ t.Logf("time to start the node: %v", time.Since(start).String())
+
+ // Create a new HTTP client to interact with the integration node
+ cli, err := client.NewHTTPClient(node.Address())
+ require.NoError(t, err)
+
+ // Retrieve node info
+ info, err := cli.ABCIInfo()
+ require.NoError(t, err)
+ assert.NotEmpty(t, info.Response.Data)
+
+ // Attempt to stop the node
+ err = node.Stop()
+ require.NoError(t, err)
+
+ // Attempt to stop the node a second time, should not fail
+ err = node.Stop()
+ require.NoError(t, err)
+}
diff --git a/gno.land/pkg/integration/signer.go b/gno.land/pkg/integration/signer.go
new file mode 100644
index 00000000000..b32cd9c59bc
--- /dev/null
+++ b/gno.land/pkg/integration/signer.go
@@ -0,0 +1,33 @@
+package integration
+
+import (
+ "fmt"
+
+ "github.com/gnolang/gno/gno.land/pkg/gnoland"
+
+ "github.com/gnolang/gno/tm2/pkg/crypto"
+ "github.com/gnolang/gno/tm2/pkg/std"
+)
+
+// SignTxs will sign all txs passed as argument using the private key
+// this signature is only valid for genesis transactions as accountNumber and sequence are 0
+func SignTxs(txs []gnoland.TxWithMetadata, privKey crypto.PrivKey, chainID string) error {
+ for index, tx := range txs {
+ bytes, err := tx.Tx.GetSignBytes(chainID, 0, 0)
+ if err != nil {
+ return fmt.Errorf("unable to get sign bytes for transaction, %w", err)
+ }
+ signature, err := privKey.Sign(bytes)
+ if err != nil {
+ return fmt.Errorf("unable to sign transaction, %w", err)
+ }
+
+ txs[index].Tx.Signatures = []std.Signature{
+ {
+ PubKey: privKey.PubKey(),
+ Signature: signature,
+ },
+ }
+ }
+ return nil
+}
diff --git a/gno.land/pkg/integration/testdata/event_multi_msg.txtar b/gno.land/pkg/integration/testdata/event_multi_msg.txtar
index 84afe3cc6a4..13a448e7f8c 100644
--- a/gno.land/pkg/integration/testdata/event_multi_msg.txtar
+++ b/gno.land/pkg/integration/testdata/event_multi_msg.txtar
@@ -11,16 +11,19 @@ stdout 'data: {'
stdout ' "BaseAccount": {'
stdout ' "address": "g1jg8mtutu9khhfwc4nxmuhcpftf0pajdhfvsqf5",'
stdout ' "coins": "[0-9]*ugnot",' # dynamic
-stdout ' "public_key": null,'
+stdout ' "public_key": {'
+stdout ' "@type": "/tm.PubKeySecp256k1",'
+stdout ' "value": "A\+FhNtsXHjLfSJk1lB8FbiL4mGPjc50Kt81J7EKDnJ2y"'
+stdout ' },'
stdout ' "account_number": "0",'
-stdout ' "sequence": "0"'
+stdout ' "sequence": "1"'
stdout ' }'
stdout '}'
! stderr '.+' # empty
## sign
-gnokey sign -tx-path $WORK/multi/multi_msg.tx -chainid=tendermint_test -account-number 0 -account-sequence 0 test1
+gnokey sign -tx-path $WORK/multi/multi_msg.tx -chainid=tendermint_test -account-number 0 -account-sequence 1 test1
stdout 'Tx successfully signed and saved to '
## broadcast
diff --git a/gno.land/pkg/integration/testdata/gnokey_simulate.txtar b/gno.land/pkg/integration/testdata/gnokey_simulate.txtar
index 8db2c7302fc..db3cd527eb3 100644
--- a/gno.land/pkg/integration/testdata/gnokey_simulate.txtar
+++ b/gno.land/pkg/integration/testdata/gnokey_simulate.txtar
@@ -7,41 +7,41 @@ gnoland start
# Initial state: assert that sequence == 0.
gnokey query auth/accounts/$USER_ADDR_test1
-stdout '"sequence": "0"'
+stdout '"sequence": "1"'
# attempt adding the "test" package.
# the package has a syntax error; simulation should catch this ahead of time and prevent the tx.
# -simulate test
! gnokey maketx addpkg -pkgdir $WORK/test -pkgpath gno.land/r/test -gas-fee 1000000ugnot -gas-wanted 2000000 -broadcast -chainid=tendermint_test -simulate test test1
gnokey query auth/accounts/$USER_ADDR_test1
-stdout '"sequence": "0"'
+stdout '"sequence": "1"'
# -simulate only
! gnokey maketx addpkg -pkgdir $WORK/test -pkgpath gno.land/r/test -gas-fee 1000000ugnot -gas-wanted 2000000 -broadcast -chainid=tendermint_test -simulate only test1
gnokey query auth/accounts/$USER_ADDR_test1
-stdout '"sequence": "0"'
+stdout '"sequence": "1"'
# -simulate skip
! gnokey maketx addpkg -pkgdir $WORK/test -pkgpath gno.land/r/test -gas-fee 1000000ugnot -gas-wanted 2000000 -broadcast -chainid=tendermint_test -simulate skip test1
gnokey query auth/accounts/$USER_ADDR_test1
-stdout '"sequence": "1"'
+stdout '"sequence": "2"'
# attempt calling hello.SetName correctly.
# -simulate test and skip should do it successfully, -simulate only should not.
# -simulate test
gnokey maketx call -pkgpath gno.land/r/hello -func SetName -args John -gas-wanted 2000000 -gas-fee 1000000ugnot -broadcast -chainid tendermint_test -simulate test test1
gnokey query auth/accounts/$USER_ADDR_test1
-stdout '"sequence": "2"'
+stdout '"sequence": "3"'
gnokey query vm/qeval --data "gno.land/r/hello.Hello()"
stdout 'Hello, John!'
# -simulate only
gnokey maketx call -pkgpath gno.land/r/hello -func SetName -args Paul -gas-wanted 2000000 -gas-fee 1000000ugnot -broadcast -chainid tendermint_test -simulate only test1
gnokey query auth/accounts/$USER_ADDR_test1
-stdout '"sequence": "2"'
+stdout '"sequence": "3"'
gnokey query vm/qeval --data "gno.land/r/hello.Hello()"
stdout 'Hello, John!'
# -simulate skip
gnokey maketx call -pkgpath gno.land/r/hello -func SetName -args George -gas-wanted 2000000 -gas-fee 1000000ugnot -broadcast -chainid tendermint_test -simulate skip test1
gnokey query auth/accounts/$USER_ADDR_test1
-stdout '"sequence": "3"'
+stdout '"sequence": "4"'
gnokey query vm/qeval --data "gno.land/r/hello.Hello()"
stdout 'Hello, George!'
@@ -51,19 +51,19 @@ stdout 'Hello, George!'
# -simulate test
! gnokey maketx call -pkgpath gno.land/r/hello -func Grumpy -gas-wanted 2000000 -gas-fee 1000000ugnot -broadcast -chainid tendermint_test -simulate test test1
gnokey query auth/accounts/$USER_ADDR_test1
-stdout '"sequence": "3"'
+stdout '"sequence": "4"'
gnokey query vm/qeval --data "gno.land/r/hello.Hello()"
stdout 'Hello, George!'
# -simulate only
! gnokey maketx call -pkgpath gno.land/r/hello -func Grumpy -gas-wanted 2000000 -gas-fee 1000000ugnot -broadcast -chainid tendermint_test -simulate only test1
gnokey query auth/accounts/$USER_ADDR_test1
-stdout '"sequence": "3"'
+stdout '"sequence": "4"'
gnokey query vm/qeval --data "gno.land/r/hello.Hello()"
stdout 'Hello, George!'
# -simulate skip
! gnokey maketx call -pkgpath gno.land/r/hello -func Grumpy -gas-wanted 2000000 -gas-fee 1000000ugnot -broadcast -chainid tendermint_test -simulate skip test1
gnokey query auth/accounts/$USER_ADDR_test1
-stdout '"sequence": "4"'
+stdout '"sequence": "5"'
gnokey query vm/qeval --data "gno.land/r/hello.Hello()"
stdout 'Hello, George!'
diff --git a/gno.land/pkg/integration/testdata/gnoland.txtar b/gno.land/pkg/integration/testdata/gnoland.txtar
index 78bdc9cae4e..83c8fe9c9a5 100644
--- a/gno.land/pkg/integration/testdata/gnoland.txtar
+++ b/gno.land/pkg/integration/testdata/gnoland.txtar
@@ -28,7 +28,7 @@ cmp stderr gnoland-already-stop.stderr.golden
-- gnoland-no-arguments.stdout.golden --
-- gnoland-no-arguments.stderr.golden --
-"gnoland" error: syntax: gnoland [start|stop|restart]
+"gnoland" error: no command provided
-- gnoland-start.stdout.golden --
node started successfully
-- gnoland-start.stderr.golden --
diff --git a/gno.land/pkg/integration/testdata/gnoweb_airgapped.txtar b/gno.land/pkg/integration/testdata/gnoweb_airgapped.txtar
index 3ed35a1b1d3..02bd8058214 100644
--- a/gno.land/pkg/integration/testdata/gnoweb_airgapped.txtar
+++ b/gno.land/pkg/integration/testdata/gnoweb_airgapped.txtar
@@ -14,9 +14,12 @@ stdout 'data: {'
stdout ' "BaseAccount": {'
stdout ' "address": "g1jg8mtutu9khhfwc4nxmuhcpftf0pajdhfvsqf5",'
stdout ' "coins": "[0-9]*ugnot",' # dynamic
-stdout ' "public_key": null,'
+stdout ' "public_key": {'
+stdout ' "@type": "/tm.PubKeySecp256k1",'
+stdout ' "value": "A\+FhNtsXHjLfSJk1lB8FbiL4mGPjc50Kt81J7EKDnJ2y"'
+stdout ' },'
stdout ' "account_number": "0",'
-stdout ' "sequence": "0"'
+stdout ' "sequence": "4"'
stdout ' }'
stdout '}'
! stderr '.+' # empty
@@ -26,7 +29,7 @@ gnokey maketx call -pkgpath "gno.land/r/demo/echo" -func "Render" -gas-fee 10000
cp stdout call.tx
# Sign
-gnokey sign -tx-path $WORK/call.tx -chainid "tendermint_test" -account-number 0 -account-sequence 0 test1
+gnokey sign -tx-path $WORK/call.tx -chainid "tendermint_test" -account-number 0 -account-sequence 4 test1
cmpenv stdout sign.stdout.golden
gnokey broadcast $WORK/call.tx
diff --git a/gno.land/pkg/integration/testdata/loadpkg_example.txtar b/gno.land/pkg/integration/testdata/loadpkg_example.txtar
index c05bedfef65..f7be500f3b6 100644
--- a/gno.land/pkg/integration/testdata/loadpkg_example.txtar
+++ b/gno.land/pkg/integration/testdata/loadpkg_example.txtar
@@ -4,11 +4,11 @@ loadpkg gno.land/p/demo/ufmt
## start a new node
gnoland start
-gnokey maketx addpkg -pkgdir $WORK -pkgpath gno.land/r/importtest -gas-fee 1000000ugnot -gas-wanted 9000000 -broadcast -chainid=tendermint_test test1
+gnokey maketx addpkg -pkgdir $WORK -pkgpath gno.land/r/importtest -gas-fee 1000000ugnot -gas-wanted 10000000 -broadcast -chainid=tendermint_test test1
stdout OK!
## execute Render
-gnokey maketx call -pkgpath gno.land/r/importtest -func Render -gas-fee 1000000ugnot -gas-wanted 9000000 -args '' -broadcast -chainid=tendermint_test test1
+gnokey maketx call -pkgpath gno.land/r/importtest -func Render -gas-fee 1000000ugnot -gas-wanted 10000000 -args '' -broadcast -chainid=tendermint_test test1
stdout '("92054" string)'
stdout OK!
diff --git a/gno.land/pkg/integration/testdata/restart.txtar b/gno.land/pkg/integration/testdata/restart.txtar
index 8a63713a214..5571aa9fa66 100644
--- a/gno.land/pkg/integration/testdata/restart.txtar
+++ b/gno.land/pkg/integration/testdata/restart.txtar
@@ -4,12 +4,12 @@
loadpkg gno.land/r/demo/counter $WORK
gnoland start
-gnokey maketx call -pkgpath gno.land/r/demo/counter -func Incr -gas-fee 1000000ugnot -gas-wanted 150000 -broadcast -chainid tendermint_test test1
+gnokey maketx call -pkgpath gno.land/r/demo/counter -func Incr -gas-fee 1000000ugnot -gas-wanted 200000 -broadcast -chainid tendermint_test test1
stdout '\(1 int\)'
gnoland restart
-gnokey maketx call -pkgpath gno.land/r/demo/counter -func Incr -gas-fee 1000000ugnot -gas-wanted 150000 -broadcast -chainid tendermint_test test1
+gnokey maketx call -pkgpath gno.land/r/demo/counter -func Incr -gas-fee 1000000ugnot -gas-wanted 200000 -broadcast -chainid tendermint_test test1
stdout '\(2 int\)'
-- counter.gno --
diff --git a/gno.land/pkg/integration/testdata/restart_missing_type.txtar b/gno.land/pkg/integration/testdata/restart_missing_type.txtar
index b02acc16d96..09e1a27d6f4 100644
--- a/gno.land/pkg/integration/testdata/restart_missing_type.txtar
+++ b/gno.land/pkg/integration/testdata/restart_missing_type.txtar
@@ -5,15 +5,15 @@
loadpkg gno.land/p/demo/avl
gnoland start
-gnokey sign -tx-path $WORK/tx1.tx -chainid tendermint_test -account-sequence 0 test1
+gnokey sign -tx-path $WORK/tx1.tx -chainid tendermint_test -account-sequence 1 test1
! gnokey broadcast $WORK/tx1.tx
stderr 'out of gas'
-gnokey sign -tx-path $WORK/tx2.tx -chainid tendermint_test -account-sequence 1 test1
+gnokey sign -tx-path $WORK/tx2.tx -chainid tendermint_test -account-sequence 2 test1
gnokey broadcast $WORK/tx2.tx
stdout 'OK!'
-gnokey sign -tx-path $WORK/tx3.tx -chainid tendermint_test -account-sequence 2 test1
+gnokey sign -tx-path $WORK/tx3.tx -chainid tendermint_test -account-sequence 3 test1
gnokey broadcast $WORK/tx3.tx
stdout 'OK!'
diff --git a/gno.land/pkg/integration/testdata/simulate_gas.txtar b/gno.land/pkg/integration/testdata/simulate_gas.txtar
index 8550419f205..4c5213da345 100644
--- a/gno.land/pkg/integration/testdata/simulate_gas.txtar
+++ b/gno.land/pkg/integration/testdata/simulate_gas.txtar
@@ -6,11 +6,11 @@ gnoland start
# simulate only
gnokey maketx call -pkgpath gno.land/r/simulate -func Hello -gas-fee 1000000ugnot -gas-wanted 2000000 -broadcast -chainid=tendermint_test -simulate only test1
-stdout 'GAS USED: 96411'
+stdout 'GAS USED: 99015'
# simulate skip
gnokey maketx call -pkgpath gno.land/r/simulate -func Hello -gas-fee 1000000ugnot -gas-wanted 2000000 -broadcast -chainid=tendermint_test -simulate skip test1
-stdout 'GAS USED: 96411' # same as simulate only
+stdout 'GAS USED: 99015' # same as simulate only
-- package/package.gno --
diff --git a/gno.land/pkg/integration/testdata_test.go b/gno.land/pkg/integration/testdata_test.go
new file mode 100644
index 00000000000..ba4d5176df1
--- /dev/null
+++ b/gno.land/pkg/integration/testdata_test.go
@@ -0,0 +1,67 @@
+package integration
+
+import (
+ "os"
+ "strconv"
+ "testing"
+
+ gno_integration "github.com/gnolang/gno/gnovm/pkg/integration"
+ "github.com/rogpeppe/go-internal/testscript"
+ "github.com/stretchr/testify/require"
+)
+
+func TestTestdata(t *testing.T) {
+ t.Parallel()
+
+ flagInMemoryTS, _ := strconv.ParseBool(os.Getenv("INMEMORY_TS"))
+ flagNoSeqTS, _ := strconv.ParseBool(os.Getenv("NO_SEQ_TS"))
+
+ p := gno_integration.NewTestingParams(t, "testdata")
+
+ if coverdir, ok := gno_integration.ResolveCoverageDir(); ok {
+ err := gno_integration.SetupTestscriptsCoverage(&p, coverdir)
+ require.NoError(t, err)
+ }
+
+ // Set up gnoland for testscript
+ err := SetupGnolandTestscript(t, &p)
+ require.NoError(t, err)
+
+ mode := commandKindTesting
+ if flagInMemoryTS {
+ mode = commandKindInMemory
+ }
+
+ origSetup := p.Setup
+ p.Setup = func(env *testscript.Env) error {
+ env.Values[envKeyExecCommand] = mode
+ if origSetup != nil {
+ if err := origSetup(env); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+
+ if flagInMemoryTS && !flagNoSeqTS {
+ testscript.RunT(tSeqShim{t}, p)
+ } else {
+ testscript.Run(t, p)
+ }
+}
+
+type tSeqShim struct{ *testing.T }
+
+// noop Parallel method allow us to run test sequentially
+func (tSeqShim) Parallel() {}
+
+func (t tSeqShim) Run(name string, f func(testscript.T)) {
+ t.T.Run(name, func(t *testing.T) {
+ f(tSeqShim{t})
+ })
+}
+
+func (t tSeqShim) Verbose() bool {
+ return testing.Verbose()
+}
diff --git a/gno.land/pkg/integration/testing_integration.go b/gno.land/pkg/integration/testing_integration.go
deleted file mode 100644
index 0a181950bb3..00000000000
--- a/gno.land/pkg/integration/testing_integration.go
+++ /dev/null
@@ -1,795 +0,0 @@
-package integration
-
-import (
- "context"
- "errors"
- "flag"
- "fmt"
- "hash/crc32"
- "log/slog"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "testing"
-
- "github.com/gnolang/gno/gno.land/pkg/gnoland"
- "github.com/gnolang/gno/gno.land/pkg/gnoland/ugnot"
- "github.com/gnolang/gno/gno.land/pkg/keyscli"
- "github.com/gnolang/gno/gno.land/pkg/log"
- "github.com/gnolang/gno/gno.land/pkg/sdk/vm"
- "github.com/gnolang/gno/gnovm/pkg/gnoenv"
- "github.com/gnolang/gno/gnovm/pkg/gnolang"
- "github.com/gnolang/gno/gnovm/pkg/gnomod"
- "github.com/gnolang/gno/gnovm/pkg/packages"
- "github.com/gnolang/gno/tm2/pkg/bft/node"
- bft "github.com/gnolang/gno/tm2/pkg/bft/types"
- "github.com/gnolang/gno/tm2/pkg/commands"
- "github.com/gnolang/gno/tm2/pkg/crypto"
- "github.com/gnolang/gno/tm2/pkg/crypto/bip39"
- "github.com/gnolang/gno/tm2/pkg/crypto/keys"
- "github.com/gnolang/gno/tm2/pkg/crypto/keys/client"
- "github.com/gnolang/gno/tm2/pkg/db/memdb"
- tm2Log "github.com/gnolang/gno/tm2/pkg/log"
- "github.com/gnolang/gno/tm2/pkg/std"
- "github.com/rogpeppe/go-internal/testscript"
- "go.uber.org/zap/zapcore"
-)
-
-const (
- envKeyGenesis int = iota
- envKeyLogger
- envKeyPkgsLoader
-)
-
-type tSeqShim struct{ *testing.T }
-
-// noop Parallel method allow us to run test sequentially
-func (tSeqShim) Parallel() {}
-
-func (t tSeqShim) Run(name string, f func(testscript.T)) {
- t.T.Run(name, func(t *testing.T) {
- f(tSeqShim{t})
- })
-}
-
-func (t tSeqShim) Verbose() bool {
- return testing.Verbose()
-}
-
-// RunGnolandTestscripts sets up and runs txtar integration tests for gnoland nodes.
-// It prepares an in-memory gnoland node and initializes the necessary environment and custom commands.
-// The function adapts the test setup for use with the testscript package, enabling
-// the execution of gnoland and gnokey commands within txtar scripts.
-//
-// Refer to package documentation in doc.go for more information on commands and example txtar scripts.
-func RunGnolandTestscripts(t *testing.T, txtarDir string) {
- t.Helper()
-
- p := setupGnolandTestScript(t, txtarDir)
- if deadline, ok := t.Deadline(); ok && p.Deadline.IsZero() {
- p.Deadline = deadline
- }
-
- testscript.RunT(tSeqShim{t}, p)
-}
-
-type testNode struct {
- *node.Node
- cfg *gnoland.InMemoryNodeConfig
- nGnoKeyExec uint // Counter for execution of gnokey.
-}
-
-func setupGnolandTestScript(t *testing.T, txtarDir string) testscript.Params {
- t.Helper()
-
- tmpdir := t.TempDir()
-
- // `gnoRootDir` should point to the local location of the gno repository.
- // It serves as the gno equivalent of GOROOT.
- gnoRootDir := gnoenv.RootDir()
-
- // `gnoHomeDir` should be the local directory where gnokey stores keys.
- gnoHomeDir := filepath.Join(tmpdir, "gno")
-
- // Testscripts run concurrently by default, so we need to be prepared for that.
- nodes := map[string]*testNode{}
-
- updateScripts, _ := strconv.ParseBool(os.Getenv("UPDATE_SCRIPTS"))
- persistWorkDir, _ := strconv.ParseBool(os.Getenv("TESTWORK"))
- return testscript.Params{
- UpdateScripts: updateScripts,
- TestWork: persistWorkDir,
- Dir: txtarDir,
- Setup: func(env *testscript.Env) error {
- kb, err := keys.NewKeyBaseFromDir(gnoHomeDir)
- if err != nil {
- return err
- }
-
- // create sessions ID
- var sid string
- {
- works := env.Getenv("WORK")
- sum := crc32.ChecksumIEEE([]byte(works))
- sid = strconv.FormatUint(uint64(sum), 16)
- env.Setenv("SID", sid)
- }
-
- // setup logger
- var logger *slog.Logger
- {
- logger = tm2Log.NewNoopLogger()
- if persistWorkDir || os.Getenv("LOG_PATH_DIR") != "" {
- logname := fmt.Sprintf("txtar-gnoland-%s.log", sid)
- logger, err = getTestingLogger(env, logname)
- if err != nil {
- return fmt.Errorf("unable to setup logger: %w", err)
- }
- }
-
- env.Values[envKeyLogger] = logger
- }
-
- // Track new user balances added via the `adduser`
- // command and packages added with the `loadpkg` command.
- // This genesis will be use when node is started.
-
- genesis := gnoland.DefaultGenState()
- genesis.Balances = LoadDefaultGenesisBalanceFile(t, gnoRootDir)
- genesis.Params = LoadDefaultGenesisParamFile(t, gnoRootDir)
- genesis.Auth.Params.InitialGasPrice = std.GasPrice{Gas: 0, Price: std.Coin{Amount: 0, Denom: "ugnot"}}
- genesis.Txs = []gnoland.TxWithMetadata{}
-
- // test1 must be created outside of the loop below because it is already included in genesis so
- // attempting to recreate results in it getting overwritten and breaking existing tests that
- // rely on its address being static.
- kb.CreateAccount(DefaultAccount_Name, DefaultAccount_Seed, "", "", 0, 0)
- env.Setenv("USER_SEED_"+DefaultAccount_Name, DefaultAccount_Seed)
- env.Setenv("USER_ADDR_"+DefaultAccount_Name, DefaultAccount_Address)
-
- env.Values[envKeyGenesis] = &genesis
- env.Values[envKeyPkgsLoader] = newPkgsLoader()
-
- env.Setenv("GNOROOT", gnoRootDir)
- env.Setenv("GNOHOME", gnoHomeDir)
-
- return nil
- },
- Cmds: map[string]func(ts *testscript.TestScript, neg bool, args []string){
- "gnoland": func(ts *testscript.TestScript, neg bool, args []string) {
- if len(args) == 0 {
- tsValidateError(ts, "gnoland", neg, fmt.Errorf("syntax: gnoland [start|stop|restart]"))
- return
- }
-
- logger := ts.Value(envKeyLogger).(*slog.Logger) // grab logger
- sid := getNodeSID(ts) // grab session id
-
- var cmd string
- cmd, args = args[0], args[1:]
-
- var err error
- switch cmd {
- case "start":
- if nodeIsRunning(nodes, sid) {
- err = fmt.Errorf("node already started")
- break
- }
-
- // parse flags
- fs := flag.NewFlagSet("start", flag.ContinueOnError)
- nonVal := fs.Bool("non-validator", false, "set up node as a non-validator")
- if err := fs.Parse(args); err != nil {
- ts.Fatalf("unable to parse `gnoland start` flags: %s", err)
- }
-
- // get packages
- pkgs := ts.Value(envKeyPkgsLoader).(*pkgsLoader) // grab logger
- creator := crypto.MustAddressFromString(DefaultAccount_Address) // test1
- defaultFee := std.NewFee(50000, std.MustParseCoin(ugnot.ValueString(1000000)))
- // we need to define a new err1 otherwise the out err would be shadowed in the case "start":
- pkgsTxs, loadErr := pkgs.LoadPackages(creator, defaultFee, nil)
-
- if loadErr != nil {
- ts.Fatalf("unable to load packages txs: %s", err)
- }
-
- // Warp up `ts` so we can pass it to other testing method
- t := TSTestingT(ts)
-
- // Generate config and node
- cfg := TestingMinimalNodeConfig(t, gnoRootDir)
- genesis := ts.Value(envKeyGenesis).(*gnoland.GnoGenesisState)
- genesis.Txs = append(pkgsTxs, genesis.Txs...)
-
- // setup genesis state
- cfg.Genesis.AppState = *genesis
- if *nonVal {
- // re-create cfg.Genesis.Validators with a throwaway pv, so we start as a
- // non-validator.
- pv := gnoland.NewMockedPrivValidator()
- cfg.Genesis.Validators = []bft.GenesisValidator{
- {
- Address: pv.GetPubKey().Address(),
- PubKey: pv.GetPubKey(),
- Power: 10,
- Name: "none",
- },
- }
- }
- cfg.DB = memdb.NewMemDB() // so it can be reused when restarting.
-
- n, remoteAddr := TestingInMemoryNode(t, logger, cfg)
-
- // Register cleanup
- nodes[sid] = &testNode{Node: n, cfg: cfg}
-
- // Add default environments
- ts.Setenv("RPC_ADDR", remoteAddr)
-
- fmt.Fprintln(ts.Stdout(), "node started successfully")
- case "restart":
- n, ok := nodes[sid]
- if !ok {
- err = fmt.Errorf("node must be started before being restarted")
- break
- }
-
- if stopErr := n.Stop(); stopErr != nil {
- err = fmt.Errorf("error stopping node: %w", stopErr)
- break
- }
-
- // Create new node with same config.
- newNode, newRemoteAddr := TestingInMemoryNode(t, logger, n.cfg)
-
- // Update testNode and environment variables.
- n.Node = newNode
- ts.Setenv("RPC_ADDR", newRemoteAddr)
-
- fmt.Fprintln(ts.Stdout(), "node restarted successfully")
- case "stop":
- n, ok := nodes[sid]
- if !ok {
- err = fmt.Errorf("node not started cannot be stopped")
- break
- }
- if err = n.Stop(); err == nil {
- delete(nodes, sid)
-
- // Unset gnoland environments
- ts.Setenv("RPC_ADDR", "")
- fmt.Fprintln(ts.Stdout(), "node stopped successfully")
- }
- default:
- err = fmt.Errorf("invalid gnoland subcommand: %q", cmd)
- }
-
- tsValidateError(ts, "gnoland "+cmd, neg, err)
- },
- "gnokey": func(ts *testscript.TestScript, neg bool, args []string) {
- logger := ts.Value(envKeyLogger).(*slog.Logger) // grab logger
- sid := ts.Getenv("SID") // grab session id
-
- // Unquote args enclosed in `"` to correctly handle `\n` or similar escapes.
- args, err := unquote(args)
- if err != nil {
- tsValidateError(ts, "gnokey", neg, err)
- }
-
- // Setup IO command
- io := commands.NewTestIO()
- io.SetOut(commands.WriteNopCloser(ts.Stdout()))
- io.SetErr(commands.WriteNopCloser(ts.Stderr()))
- cmd := keyscli.NewRootCmd(io, client.DefaultBaseOptions)
-
- io.SetIn(strings.NewReader("\n")) // Inject empty password to stdin.
- defaultArgs := []string{
- "-home", gnoHomeDir,
- "-insecure-password-stdin=true", // There no use to not have this param by default.
- }
-
- if n, ok := nodes[sid]; ok {
- if raddr := n.Config().RPC.ListenAddress; raddr != "" {
- defaultArgs = append(defaultArgs, "-remote", raddr)
- }
-
- n.nGnoKeyExec++
- headerlog := fmt.Sprintf("%.02d!EXEC_GNOKEY", n.nGnoKeyExec)
-
- // Log the command inside gnoland logger, so we can better scope errors.
- logger.Info(headerlog, "args", strings.Join(args, " "))
- defer logger.Info(headerlog, "delimiter", "END")
- }
-
- // Inject default argument, if duplicate
- // arguments, it should be override by the ones
- // user provided.
- args = append(defaultArgs, args...)
-
- err = cmd.ParseAndRun(context.Background(), args)
- tsValidateError(ts, "gnokey", neg, err)
- },
- // adduser command must be executed before starting the node; it errors out otherwise.
- "adduser": func(ts *testscript.TestScript, neg bool, args []string) {
- if nodeIsRunning(nodes, getNodeSID(ts)) {
- tsValidateError(ts, "adduser", neg, errors.New("adduser must be used before starting node"))
- return
- }
-
- if len(args) == 0 {
- ts.Fatalf("new user name required")
- }
-
- kb, err := keys.NewKeyBaseFromDir(gnoHomeDir)
- if err != nil {
- ts.Fatalf("unable to get keybase")
- }
-
- balance, err := createAccount(ts, kb, args[0])
- if err != nil {
- ts.Fatalf("error creating account %s: %s", args[0], err)
- }
-
- // Add balance to genesis
- genesis := ts.Value(envKeyGenesis).(*gnoland.GnoGenesisState)
- genesis.Balances = append(genesis.Balances, balance)
- },
- // adduserfrom commands must be executed before starting the node; it errors out otherwise.
- "adduserfrom": func(ts *testscript.TestScript, neg bool, args []string) {
- if nodeIsRunning(nodes, getNodeSID(ts)) {
- tsValidateError(ts, "adduserfrom", neg, errors.New("adduserfrom must be used before starting node"))
- return
- }
-
- var account, index uint64
- var err error
-
- switch len(args) {
- case 2:
- // expected user input
- // adduserfrom 'username 'menmonic'
- // no need to do anything
-
- case 4:
- // expected user input
- // adduserfrom 'username 'menmonic' 'account' 'index'
-
- // parse 'index' first, then fallghrough to `case 3` to parse 'account'
- index, err = strconv.ParseUint(args[3], 10, 32)
- if err != nil {
- ts.Fatalf("invalid index number %s", args[3])
- }
-
- fallthrough // parse 'account'
- case 3:
- // expected user input
- // adduserfrom 'username 'menmonic' 'account'
-
- account, err = strconv.ParseUint(args[2], 10, 32)
- if err != nil {
- ts.Fatalf("invalid account number %s", args[2])
- }
- default:
- ts.Fatalf("to create account from metadatas, user name and mnemonic are required ( account and index are optional )")
- }
-
- kb, err := keys.NewKeyBaseFromDir(gnoHomeDir)
- if err != nil {
- ts.Fatalf("unable to get keybase")
- }
-
- balance, err := createAccountFrom(ts, kb, args[0], args[1], uint32(account), uint32(index))
- if err != nil {
- ts.Fatalf("error creating wallet %s", err)
- }
-
- // Add balance to genesis
- genesis := ts.Value(envKeyGenesis).(*gnoland.GnoGenesisState)
- genesis.Balances = append(genesis.Balances, balance)
-
- fmt.Fprintf(ts.Stdout(), "Added %s(%s) to genesis", args[0], balance.Address)
- },
- // `patchpkg` Patch any loaded files by packages by replacing all occurrences of the
- // first argument with the second.
- // This is mostly use to replace hardcoded address inside txtar file.
- "patchpkg": func(ts *testscript.TestScript, neg bool, args []string) {
- args, err := unquote(args)
- if err != nil {
- tsValidateError(ts, "patchpkg", neg, err)
- }
-
- if len(args) != 2 {
- ts.Fatalf("`patchpkg`: should have exactly 2 arguments")
- }
-
- pkgs := ts.Value(envKeyPkgsLoader).(*pkgsLoader)
- replace, with := args[0], args[1]
- pkgs.SetPatch(replace, with)
- },
- // `loadpkg` load a specific package from the 'examples' or working directory.
- "loadpkg": func(ts *testscript.TestScript, neg bool, args []string) {
- // special dirs
- workDir := ts.Getenv("WORK")
- examplesDir := filepath.Join(gnoRootDir, "examples")
-
- pkgs := ts.Value(envKeyPkgsLoader).(*pkgsLoader)
-
- var path, name string
- switch len(args) {
- case 2:
- name = args[0]
- path = filepath.Clean(args[1])
- case 1:
- path = filepath.Clean(args[0])
- case 0:
- ts.Fatalf("`loadpkg`: no arguments specified")
- default:
- ts.Fatalf("`loadpkg`: too many arguments specified")
- }
-
- // If `all` is specified, fully load 'examples' directory.
- // NOTE: In 99% of cases, this is not needed, and
- // packages should be loaded individually.
- if path == "all" {
- ts.Logf("warning: loading all packages")
- if err := pkgs.LoadAllPackagesFromDir(examplesDir); err != nil {
- ts.Fatalf("unable to load packages from %q: %s", examplesDir, err)
- }
-
- return
- }
-
- if !strings.HasPrefix(path, workDir) {
- path = filepath.Join(examplesDir, path)
- }
-
- if err := pkgs.LoadPackage(examplesDir, path, name); err != nil {
- ts.Fatalf("`loadpkg` unable to load package(s) from %q: %s", args[0], err)
- }
-
- ts.Logf("%q package was added to genesis", args[0])
- },
- },
- }
-}
-
-// `unquote` takes a slice of strings, resulting from splitting a string block by spaces, and
-// processes them. The function handles quoted phrases and escape characters within these strings.
-func unquote(args []string) ([]string, error) {
- const quote = '"'
-
- parts := []string{}
- var inQuote bool
-
- var part strings.Builder
- for _, arg := range args {
- var escaped bool
- for _, c := range arg {
- if escaped {
- // If the character is meant to be escaped, it is processed with Unquote.
- // We use `Unquote` here for two main reasons:
- // 1. It will validate that the escape sequence is correct
- // 2. It converts the escaped string to its corresponding raw character.
- // For example, "\\t" becomes '\t'.
- uc, err := strconv.Unquote(`"\` + string(c) + `"`)
- if err != nil {
- return nil, fmt.Errorf("unhandled escape sequence `\\%c`: %w", c, err)
- }
-
- part.WriteString(uc)
- escaped = false
- continue
- }
-
- // If we are inside a quoted string and encounter an escape character,
- // flag the next character as `escaped`
- if inQuote && c == '\\' {
- escaped = true
- continue
- }
-
- // Detect quote and toggle inQuote state
- if c == quote {
- inQuote = !inQuote
- continue
- }
-
- // Handle regular character
- part.WriteRune(c)
- }
-
- // If we're inside a quote, add a single space.
- // It reflects one or multiple spaces between args in the original string.
- if inQuote {
- part.WriteRune(' ')
- continue
- }
-
- // Finalize part, add to parts, and reset for next part
- parts = append(parts, part.String())
- part.Reset()
- }
-
- // Check if a quote is left open
- if inQuote {
- return nil, errors.New("unfinished quote")
- }
-
- return parts, nil
-}
-
-func getNodeSID(ts *testscript.TestScript) string {
- return ts.Getenv("SID")
-}
-
-func nodeIsRunning(nodes map[string]*testNode, sid string) bool {
- _, ok := nodes[sid]
- return ok
-}
-
-func getTestingLogger(env *testscript.Env, logname string) (*slog.Logger, error) {
- var path string
-
- if logdir := os.Getenv("LOG_PATH_DIR"); logdir != "" {
- if err := os.MkdirAll(logdir, 0o755); err != nil {
- return nil, fmt.Errorf("unable to make log directory %q", logdir)
- }
-
- var err error
- if path, err = filepath.Abs(filepath.Join(logdir, logname)); err != nil {
- return nil, fmt.Errorf("unable to get absolute path of logdir %q", logdir)
- }
- } else if workdir := env.Getenv("WORK"); workdir != "" {
- path = filepath.Join(workdir, logname)
- } else {
- return tm2Log.NewNoopLogger(), nil
- }
-
- f, err := os.Create(path)
- if err != nil {
- return nil, fmt.Errorf("unable to create log file %q: %w", path, err)
- }
-
- env.Defer(func() {
- if err := f.Close(); err != nil {
- panic(fmt.Errorf("unable to close log file %q: %w", path, err))
- }
- })
-
- // Initialize the logger
- logLevel, err := zapcore.ParseLevel(strings.ToLower(os.Getenv("LOG_LEVEL")))
- if err != nil {
- return nil, fmt.Errorf("unable to parse log level, %w", err)
- }
-
- // Build zap logger for testing
- zapLogger := log.NewZapTestingLogger(f, logLevel)
- env.Defer(func() { zapLogger.Sync() })
-
- env.T().Log("starting logger", path)
- return log.ZapLoggerToSlog(zapLogger), nil
-}
-
-func tsValidateError(ts *testscript.TestScript, cmd string, neg bool, err error) {
- if err != nil {
- fmt.Fprintf(ts.Stderr(), "%q error: %+v\n", cmd, err)
- if !neg {
- ts.Fatalf("unexpected %q command failure: %s", cmd, err)
- }
- } else {
- if neg {
- ts.Fatalf("unexpected %q command success", cmd)
- }
- }
-}
-
-type envSetter interface {
- Setenv(key, value string)
-}
-
-// createAccount creates a new account with the given name and adds it to the keybase.
-func createAccount(env envSetter, kb keys.Keybase, accountName string) (gnoland.Balance, error) {
- var balance gnoland.Balance
- entropy, err := bip39.NewEntropy(256)
- if err != nil {
- return balance, fmt.Errorf("error creating entropy: %w", err)
- }
-
- mnemonic, err := bip39.NewMnemonic(entropy)
- if err != nil {
- return balance, fmt.Errorf("error generating mnemonic: %w", err)
- }
-
- var keyInfo keys.Info
- if keyInfo, err = kb.CreateAccount(accountName, mnemonic, "", "", 0, 0); err != nil {
- return balance, fmt.Errorf("unable to create account: %w", err)
- }
-
- address := keyInfo.GetAddress()
- env.Setenv("USER_SEED_"+accountName, mnemonic)
- env.Setenv("USER_ADDR_"+accountName, address.String())
-
- return gnoland.Balance{
- Address: address,
- Amount: std.Coins{std.NewCoin(ugnot.Denom, 10e6)},
- }, nil
-}
-
-// createAccountFrom creates a new account with the given metadata and adds it to the keybase.
-func createAccountFrom(env envSetter, kb keys.Keybase, accountName, mnemonic string, account, index uint32) (gnoland.Balance, error) {
- var balance gnoland.Balance
-
- // check if mnemonic is valid
- if !bip39.IsMnemonicValid(mnemonic) {
- return balance, fmt.Errorf("invalid mnemonic")
- }
-
- keyInfo, err := kb.CreateAccount(accountName, mnemonic, "", "", account, index)
- if err != nil {
- return balance, fmt.Errorf("unable to create account: %w", err)
- }
-
- address := keyInfo.GetAddress()
- env.Setenv("USER_SEED_"+accountName, mnemonic)
- env.Setenv("USER_ADDR_"+accountName, address.String())
-
- return gnoland.Balance{
- Address: address,
- Amount: std.Coins{std.NewCoin(ugnot.Denom, 10e6)},
- }, nil
-}
-
-type pkgsLoader struct {
- pkgs []gnomod.Pkg
- visited map[string]struct{}
-
- // list of occurrences to patchs with the given value
- // XXX: find a better way
- patchs map[string]string
-}
-
-func newPkgsLoader() *pkgsLoader {
- return &pkgsLoader{
- pkgs: make([]gnomod.Pkg, 0),
- visited: make(map[string]struct{}),
- patchs: make(map[string]string),
- }
-}
-
-func (pl *pkgsLoader) List() gnomod.PkgList {
- return pl.pkgs
-}
-
-func (pl *pkgsLoader) SetPatch(replace, with string) {
- pl.patchs[replace] = with
-}
-
-func (pl *pkgsLoader) LoadPackages(creator bft.Address, fee std.Fee, deposit std.Coins) ([]gnoland.TxWithMetadata, error) {
- pkgslist, err := pl.List().Sort() // sorts packages by their dependencies.
- if err != nil {
- return nil, fmt.Errorf("unable to sort packages: %w", err)
- }
-
- txs := make([]gnoland.TxWithMetadata, len(pkgslist))
- for i, pkg := range pkgslist {
- tx, err := gnoland.LoadPackage(pkg, creator, fee, deposit)
- if err != nil {
- return nil, fmt.Errorf("unable to load pkg %q: %w", pkg.Name, err)
- }
-
- // If any replace value is specified, apply them
- if len(pl.patchs) > 0 {
- for _, msg := range tx.Msgs {
- addpkg, ok := msg.(vm.MsgAddPackage)
- if !ok {
- continue
- }
-
- if addpkg.Package == nil {
- continue
- }
-
- for _, file := range addpkg.Package.Files {
- for replace, with := range pl.patchs {
- file.Body = strings.ReplaceAll(file.Body, replace, with)
- }
- }
- }
- }
-
- txs[i] = gnoland.TxWithMetadata{
- Tx: tx,
- }
- }
-
- return txs, nil
-}
-
-func (pl *pkgsLoader) LoadAllPackagesFromDir(path string) error {
- // list all packages from target path
- pkgslist, err := gnomod.ListPkgs(path)
- if err != nil {
- return fmt.Errorf("listing gno packages: %w", err)
- }
-
- for _, pkg := range pkgslist {
- if !pl.exist(pkg) {
- pl.add(pkg)
- }
- }
-
- return nil
-}
-
-func (pl *pkgsLoader) LoadPackage(modroot string, path, name string) error {
- // Initialize a queue with the root package
- queue := []gnomod.Pkg{{Dir: path, Name: name}}
-
- for len(queue) > 0 {
- // Dequeue the first package
- currentPkg := queue[0]
- queue = queue[1:]
-
- if currentPkg.Dir == "" {
- return fmt.Errorf("no path specified for package")
- }
-
- if currentPkg.Name == "" {
- // Load `gno.mod` information
- gnoModPath := filepath.Join(currentPkg.Dir, "gno.mod")
- gm, err := gnomod.ParseGnoMod(gnoModPath)
- if err != nil {
- return fmt.Errorf("unable to load %q: %w", gnoModPath, err)
- }
- gm.Sanitize()
-
- // Override package info with mod infos
- currentPkg.Name = gm.Module.Mod.Path
- currentPkg.Draft = gm.Draft
-
- pkg, err := gnolang.ReadMemPackage(currentPkg.Dir, currentPkg.Name)
- if err != nil {
- return fmt.Errorf("unable to read package at %q: %w", currentPkg.Dir, err)
- }
- imports, err := packages.Imports(pkg, nil)
- if err != nil {
- return fmt.Errorf("unable to load package imports in %q: %w", currentPkg.Dir, err)
- }
- for _, imp := range imports {
- if imp.PkgPath == currentPkg.Name || gnolang.IsStdlib(imp.PkgPath) {
- continue
- }
- currentPkg.Imports = append(currentPkg.Imports, imp.PkgPath)
- }
- }
-
- if currentPkg.Draft {
- continue // Skip draft package
- }
-
- if pl.exist(currentPkg) {
- continue
- }
- pl.add(currentPkg)
-
- // Add requirements to the queue
- for _, pkgPath := range currentPkg.Imports {
- fullPath := filepath.Join(modroot, pkgPath)
- queue = append(queue, gnomod.Pkg{Dir: fullPath})
- }
- }
-
- return nil
-}
-
-func (pl *pkgsLoader) add(pkg gnomod.Pkg) {
- pl.visited[pkg.Name] = struct{}{}
- pl.pkgs = append(pl.pkgs, pkg)
-}
-
-func (pl *pkgsLoader) exist(pkg gnomod.Pkg) (ok bool) {
- _, ok = pl.visited[pkg.Name]
- return
-}
diff --git a/gno.land/pkg/integration/testscript_gnoland.go b/gno.land/pkg/integration/testscript_gnoland.go
new file mode 100644
index 00000000000..9781799ea7d
--- /dev/null
+++ b/gno.land/pkg/integration/testscript_gnoland.go
@@ -0,0 +1,789 @@
+package integration
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "hash/crc32"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/gnolang/gno/gno.land/pkg/gnoland"
+ "github.com/gnolang/gno/gno.land/pkg/gnoland/ugnot"
+ "github.com/gnolang/gno/gno.land/pkg/keyscli"
+ "github.com/gnolang/gno/gnovm/pkg/gnoenv"
+ bft "github.com/gnolang/gno/tm2/pkg/bft/types"
+ "github.com/gnolang/gno/tm2/pkg/commands"
+ "github.com/gnolang/gno/tm2/pkg/crypto"
+ "github.com/gnolang/gno/tm2/pkg/crypto/bip39"
+ "github.com/gnolang/gno/tm2/pkg/crypto/ed25519"
+ "github.com/gnolang/gno/tm2/pkg/crypto/hd"
+ "github.com/gnolang/gno/tm2/pkg/crypto/keys"
+ "github.com/gnolang/gno/tm2/pkg/crypto/keys/client"
+ "github.com/gnolang/gno/tm2/pkg/crypto/secp256k1"
+ "github.com/gnolang/gno/tm2/pkg/std"
+ "github.com/rogpeppe/go-internal/testscript"
+ "github.com/stretchr/testify/require"
+)
+
+const nodeMaxLifespan = time.Second * 30
+
+type envKey int
+
+const (
+ envKeyGenesis envKey = iota
+ envKeyLogger
+ envKeyPkgsLoader
+ envKeyPrivValKey
+ envKeyExecCommand
+ envKeyExecBin
+)
+
+type commandkind int
+
+const (
+ // commandKindBin builds and uses an integration binary to run the testscript
+ // in a separate process. This should be used for any external package that
+ // wants to use test scripts.
+ commandKindBin commandkind = iota
+ // commandKindTesting uses the current testing binary to run the testscript
+ // in a separate process. This command cannot be used outside this package.
+ commandKindTesting
+ // commandKindInMemory runs testscripts in memory.
+ commandKindInMemory
+)
+
+type tNodeProcess struct {
+ NodeProcess
+ cfg *gnoland.InMemoryNodeConfig
+ nGnoKeyExec uint // Counter for execution of gnokey.
+}
+
+// NodesManager manages access to the nodes map with synchronization.
+type NodesManager struct {
+ nodes map[string]*tNodeProcess
+ mu sync.RWMutex
+}
+
+// NewNodesManager creates a new instance of NodesManager.
+func NewNodesManager() *NodesManager {
+ return &NodesManager{
+ nodes: make(map[string]*tNodeProcess),
+ }
+}
+
+func (nm *NodesManager) IsNodeRunning(sid string) bool {
+ nm.mu.RLock()
+ defer nm.mu.RUnlock()
+
+ _, ok := nm.nodes[sid]
+ return ok
+}
+
+// Get retrieves a node by its SID.
+func (nm *NodesManager) Get(sid string) (*tNodeProcess, bool) {
+ nm.mu.RLock()
+ defer nm.mu.RUnlock()
+ node, exists := nm.nodes[sid]
+ return node, exists
+}
+
+// Set adds or updates a node in the map.
+func (nm *NodesManager) Set(sid string, node *tNodeProcess) {
+ nm.mu.Lock()
+ defer nm.mu.Unlock()
+ nm.nodes[sid] = node
+}
+
+// Delete removes a node from the map.
+func (nm *NodesManager) Delete(sid string) {
+ nm.mu.Lock()
+ defer nm.mu.Unlock()
+ delete(nm.nodes, sid)
+}
+
+func SetupGnolandTestscript(t *testing.T, p *testscript.Params) error {
+ t.Helper()
+
+ gnoRootDir := gnoenv.RootDir()
+
+ nodesManager := NewNodesManager()
+
+ defaultPK, err := GeneratePrivKeyFromMnemonic(DefaultAccount_Seed, "", 0, 0)
+ require.NoError(t, err)
+
+ var buildOnce sync.Once
+ var gnolandBin string
+
+ // Store the original setup scripts for potential wrapping
+ origSetup := p.Setup
+ p.Setup = func(env *testscript.Env) error {
+ // If there's an original setup, execute it
+ if origSetup != nil {
+ if err := origSetup(env); err != nil {
+ return err
+ }
+ }
+
+ cmd, isSet := env.Values[envKeyExecCommand].(commandkind)
+ switch {
+ case !isSet:
+ cmd = commandKindBin // fallback on commandKindBin
+ fallthrough
+ case cmd == commandKindBin:
+ buildOnce.Do(func() {
+ t.Logf("building the gnoland integration node")
+ start := time.Now()
+ gnolandBin = buildGnoland(t, gnoRootDir)
+ t.Logf("time to build the node: %v", time.Since(start).String())
+ })
+
+ env.Values[envKeyExecBin] = gnolandBin
+ }
+
+ tmpdir, dbdir := t.TempDir(), t.TempDir()
+ gnoHomeDir := filepath.Join(tmpdir, "gno")
+
+ kb, err := keys.NewKeyBaseFromDir(gnoHomeDir)
+ if err != nil {
+ return err
+ }
+
+ kb.ImportPrivKey(DefaultAccount_Name, defaultPK, "")
+ env.Setenv("USER_SEED_"+DefaultAccount_Name, DefaultAccount_Seed)
+ env.Setenv("USER_ADDR_"+DefaultAccount_Name, DefaultAccount_Address)
+
+ // New private key
+ env.Values[envKeyPrivValKey] = ed25519.GenPrivKey()
+ env.Setenv("GNO_DBDIR", dbdir)
+
+ // Generate node short id
+ var sid string
+ {
+ works := env.Getenv("WORK")
+ sum := crc32.ChecksumIEEE([]byte(works))
+ sid = strconv.FormatUint(uint64(sum), 16)
+ env.Setenv("SID", sid)
+ }
+
+ balanceFile := LoadDefaultGenesisBalanceFile(t, gnoRootDir)
+ genesisParamFile := LoadDefaultGenesisParamFile(t, gnoRootDir)
+
+ // Track new user balances added via the `adduser`
+ // command and packages added with the `loadpkg` command.
+ // This genesis will be use when node is started.
+ genesis := &gnoland.GnoGenesisState{
+ Balances: balanceFile,
+ Params: genesisParamFile,
+ Txs: []gnoland.TxWithMetadata{},
+ }
+
+ env.Values[envKeyGenesis] = genesis
+ env.Values[envKeyPkgsLoader] = NewPkgsLoader()
+
+ env.Setenv("GNOROOT", gnoRootDir)
+ env.Setenv("GNOHOME", gnoHomeDir)
+
+ env.Defer(func() {
+ // Gracefully stop the node, if any
+ n, exist := nodesManager.Get(sid)
+ if !exist {
+ return
+ }
+
+ if err := n.Stop(); err != nil {
+ err = fmt.Errorf("unable to stop the node gracefully: %w", err)
+ env.T().Fatal(err.Error())
+ }
+ })
+
+ return nil
+ }
+
+ cmds := map[string]func(ts *testscript.TestScript, neg bool, args []string){
+ "gnoland": gnolandCmd(t, nodesManager, gnoRootDir),
+ "gnokey": gnokeyCmd(nodesManager),
+ "adduser": adduserCmd(nodesManager),
+ "adduserfrom": adduserfromCmd(nodesManager),
+ "patchpkg": patchpkgCmd(),
+ "loadpkg": loadpkgCmd(gnoRootDir),
+ }
+
+ // Initialize cmds map if needed
+ if p.Cmds == nil {
+ p.Cmds = make(map[string]func(ts *testscript.TestScript, neg bool, args []string))
+ }
+
+ // Register gnoland command
+ for cmd, call := range cmds {
+ if _, exist := p.Cmds[cmd]; exist {
+ panic(fmt.Errorf("unable register %q: command already exist", cmd))
+ }
+
+ p.Cmds[cmd] = call
+ }
+
+ return nil
+}
+
+func gnolandCmd(t *testing.T, nodesManager *NodesManager, gnoRootDir string) func(ts *testscript.TestScript, neg bool, args []string) {
+ t.Helper()
+
+ defaultPK, err := GeneratePrivKeyFromMnemonic(DefaultAccount_Seed, "", 0, 0)
+ require.NoError(t, err)
+
+ return func(ts *testscript.TestScript, neg bool, args []string) {
+ sid := getNodeSID(ts)
+
+ cmd, cmdargs := "", []string{}
+ if len(args) > 0 {
+ cmd, cmdargs = args[0], args[1:]
+ }
+
+ var err error
+ switch cmd {
+ case "":
+ err = errors.New("no command provided")
+ case "start":
+ if nodesManager.IsNodeRunning(sid) {
+ err = fmt.Errorf("node already started")
+ break
+ }
+
+ // XXX: this is a bit hacky, we should consider moving
+ // gnoland into his own package to be able to use it
+ // directly or use the config command for this.
+ fs := flag.NewFlagSet("start", flag.ContinueOnError)
+ nonVal := fs.Bool("non-validator", false, "set up node as a non-validator")
+ if err := fs.Parse(cmdargs); err != nil {
+ ts.Fatalf("unable to parse `gnoland start` flags: %s", err)
+ }
+
+ pkgs := ts.Value(envKeyPkgsLoader).(*PkgsLoader)
+ defaultFee := std.NewFee(50000, std.MustParseCoin(ugnot.ValueString(1000000)))
+ pkgsTxs, err := pkgs.LoadPackages(defaultPK, defaultFee, nil)
+ if err != nil {
+ ts.Fatalf("unable to load packages txs: %s", err)
+ }
+
+ cfg := TestingMinimalNodeConfig(gnoRootDir)
+ genesis := ts.Value(envKeyGenesis).(*gnoland.GnoGenesisState)
+ genesis.Txs = append(pkgsTxs, genesis.Txs...)
+
+ cfg.Genesis.AppState = *genesis
+ if *nonVal {
+ pv := gnoland.NewMockedPrivValidator()
+ cfg.Genesis.Validators = []bft.GenesisValidator{
+ {
+ Address: pv.GetPubKey().Address(),
+ PubKey: pv.GetPubKey(),
+ Power: 10,
+ Name: "none",
+ },
+ }
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), nodeMaxLifespan)
+ ts.Defer(cancel)
+
+ dbdir := ts.Getenv("GNO_DBDIR")
+ priv := ts.Value(envKeyPrivValKey).(ed25519.PrivKeyEd25519)
+ nodep := setupNode(ts, ctx, &ProcessNodeConfig{
+ ValidatorKey: priv,
+ DBDir: dbdir,
+ RootDir: gnoRootDir,
+ TMConfig: cfg.TMConfig,
+ Genesis: NewMarshalableGenesisDoc(cfg.Genesis),
+ })
+
+ nodesManager.Set(sid, &tNodeProcess{NodeProcess: nodep, cfg: cfg})
+
+ ts.Setenv("RPC_ADDR", nodep.Address())
+ fmt.Fprintln(ts.Stdout(), "node started successfully")
+
+ case "restart":
+ node, exists := nodesManager.Get(sid)
+ if !exists {
+ err = fmt.Errorf("node must be started before being restarted")
+ break
+ }
+
+ if err := node.Stop(); err != nil {
+ err = fmt.Errorf("unable to stop the node gracefully: %w", err)
+ break
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), nodeMaxLifespan)
+ ts.Defer(cancel)
+
+ priv := ts.Value(envKeyPrivValKey).(ed25519.PrivKeyEd25519)
+ dbdir := ts.Getenv("GNO_DBDIR")
+ nodep := setupNode(ts, ctx, &ProcessNodeConfig{
+ ValidatorKey: priv,
+ DBDir: dbdir,
+ RootDir: gnoRootDir,
+ TMConfig: node.cfg.TMConfig,
+ Genesis: NewMarshalableGenesisDoc(node.cfg.Genesis),
+ })
+
+ ts.Setenv("RPC_ADDR", nodep.Address())
+ nodesManager.Set(sid, &tNodeProcess{NodeProcess: nodep, cfg: node.cfg})
+
+ fmt.Fprintln(ts.Stdout(), "node restarted successfully")
+
+ case "stop":
+ node, exists := nodesManager.Get(sid)
+ if !exists {
+ err = fmt.Errorf("node not started cannot be stopped")
+ break
+ }
+
+ if err := node.Stop(); err != nil {
+ err = fmt.Errorf("unable to stop the node gracefully: %w", err)
+ break
+ }
+
+ fmt.Fprintln(ts.Stdout(), "node stopped successfully")
+ nodesManager.Delete(sid)
+
+ default:
+ err = fmt.Errorf("not supported command: %q", cmd)
+ // XXX: support gnoland other commands
+ }
+
+ tsValidateError(ts, strings.TrimSpace("gnoland "+cmd), neg, err)
+ }
+}
+
+func gnokeyCmd(nodes *NodesManager) func(ts *testscript.TestScript, neg bool, args []string) {
+ return func(ts *testscript.TestScript, neg bool, args []string) {
+ gnoHomeDir := ts.Getenv("GNOHOME")
+
+ sid := getNodeSID(ts)
+
+ args, err := unquote(args)
+ if err != nil {
+ tsValidateError(ts, "gnokey", neg, err)
+ }
+
+ io := commands.NewTestIO()
+ io.SetOut(commands.WriteNopCloser(ts.Stdout()))
+ io.SetErr(commands.WriteNopCloser(ts.Stderr()))
+ cmd := keyscli.NewRootCmd(io, client.DefaultBaseOptions)
+
+ io.SetIn(strings.NewReader("\n"))
+ defaultArgs := []string{
+ "-home", gnoHomeDir,
+ "-insecure-password-stdin=true",
+ }
+
+ if n, ok := nodes.Get(sid); ok {
+ if raddr := n.Address(); raddr != "" {
+ defaultArgs = append(defaultArgs, "-remote", raddr)
+ }
+
+ n.nGnoKeyExec++
+ }
+
+ args = append(defaultArgs, args...)
+
+ err = cmd.ParseAndRun(context.Background(), args)
+ tsValidateError(ts, "gnokey", neg, err)
+ }
+}
+
+func adduserCmd(nodesManager *NodesManager) func(ts *testscript.TestScript, neg bool, args []string) {
+ return func(ts *testscript.TestScript, neg bool, args []string) {
+ gnoHomeDir := ts.Getenv("GNOHOME")
+
+ sid := getNodeSID(ts)
+ if nodesManager.IsNodeRunning(sid) {
+ tsValidateError(ts, "adduser", neg, errors.New("adduser must be used before starting node"))
+ return
+ }
+
+ if len(args) == 0 {
+ ts.Fatalf("new user name required")
+ }
+
+ kb, err := keys.NewKeyBaseFromDir(gnoHomeDir)
+ if err != nil {
+ ts.Fatalf("unable to get keybase")
+ }
+
+ balance, err := createAccount(ts, kb, args[0])
+ if err != nil {
+ ts.Fatalf("error creating account %s: %s", args[0], err)
+ }
+
+ genesis := ts.Value(envKeyGenesis).(*gnoland.GnoGenesisState)
+ genesis.Balances = append(genesis.Balances, balance)
+ }
+}
+
+func adduserfromCmd(nodesManager *NodesManager) func(ts *testscript.TestScript, neg bool, args []string) {
+ return func(ts *testscript.TestScript, neg bool, args []string) {
+ gnoHomeDir := ts.Getenv("GNOHOME")
+
+ sid := getNodeSID(ts)
+ if nodesManager.IsNodeRunning(sid) {
+ tsValidateError(ts, "adduserfrom", neg, errors.New("adduserfrom must be used before starting node"))
+ return
+ }
+
+ var account, index uint64
+ var err error
+
+ switch len(args) {
+ case 2:
+ case 4:
+ index, err = strconv.ParseUint(args[3], 10, 32)
+ if err != nil {
+ ts.Fatalf("invalid index number %s", args[3])
+ }
+ fallthrough
+ case 3:
+ account, err = strconv.ParseUint(args[2], 10, 32)
+ if err != nil {
+ ts.Fatalf("invalid account number %s", args[2])
+ }
+ default:
+ ts.Fatalf("to create account from metadatas, user name and mnemonic are required ( account and index are optional )")
+ }
+
+ kb, err := keys.NewKeyBaseFromDir(gnoHomeDir)
+ if err != nil {
+ ts.Fatalf("unable to get keybase")
+ }
+
+ balance, err := createAccountFrom(ts, kb, args[0], args[1], uint32(account), uint32(index))
+ if err != nil {
+ ts.Fatalf("error creating wallet %s", err)
+ }
+
+ genesis := ts.Value(envKeyGenesis).(*gnoland.GnoGenesisState)
+ genesis.Balances = append(genesis.Balances, balance)
+
+ fmt.Fprintf(ts.Stdout(), "Added %s(%s) to genesis", args[0], balance.Address)
+ }
+}
+
+func patchpkgCmd() func(ts *testscript.TestScript, neg bool, args []string) {
+ return func(ts *testscript.TestScript, neg bool, args []string) {
+ args, err := unquote(args)
+ if err != nil {
+ tsValidateError(ts, "patchpkg", neg, err)
+ }
+
+ if len(args) != 2 {
+ ts.Fatalf("`patchpkg`: should have exactly 2 arguments")
+ }
+
+ pkgs := ts.Value(envKeyPkgsLoader).(*PkgsLoader)
+ replace, with := args[0], args[1]
+ pkgs.SetPatch(replace, with)
+ }
+}
+
+func loadpkgCmd(gnoRootDir string) func(ts *testscript.TestScript, neg bool, args []string) {
+ return func(ts *testscript.TestScript, neg bool, args []string) {
+ workDir := ts.Getenv("WORK")
+ examplesDir := filepath.Join(gnoRootDir, "examples")
+
+ pkgs := ts.Value(envKeyPkgsLoader).(*PkgsLoader)
+
+ var path, name string
+ switch len(args) {
+ case 2:
+ name = args[0]
+ path = filepath.Clean(args[1])
+ case 1:
+ path = filepath.Clean(args[0])
+ case 0:
+ ts.Fatalf("`loadpkg`: no arguments specified")
+ default:
+ ts.Fatalf("`loadpkg`: too many arguments specified")
+ }
+
+ if path == "all" {
+ ts.Logf("warning: loading all packages")
+ if err := pkgs.LoadAllPackagesFromDir(examplesDir); err != nil {
+ ts.Fatalf("unable to load packages from %q: %s", examplesDir, err)
+ }
+
+ return
+ }
+
+ if !strings.HasPrefix(path, workDir) {
+ path = filepath.Join(examplesDir, path)
+ }
+
+ if err := pkgs.LoadPackage(examplesDir, path, name); err != nil {
+ ts.Fatalf("`loadpkg` unable to load package(s) from %q: %s", args[0], err)
+ }
+
+ ts.Logf("%q package was added to genesis", args[0])
+ }
+}
+
+type tsLogWriter struct {
+ ts *testscript.TestScript
+}
+
+func (l *tsLogWriter) Write(p []byte) (n int, err error) {
+ l.ts.Logf(string(p))
+ return len(p), nil
+}
+
+func setupNode(ts *testscript.TestScript, ctx context.Context, cfg *ProcessNodeConfig) NodeProcess {
+ pcfg := ProcessConfig{
+ Node: cfg,
+ Stdout: &tsLogWriter{ts},
+ Stderr: ts.Stderr(),
+ }
+
+ // Setup coverdir provided
+ if coverdir := ts.Getenv("GOCOVERDIR"); coverdir != "" {
+ pcfg.CoverDir = coverdir
+ }
+
+ val := ts.Value(envKeyExecCommand)
+
+ switch cmd := val.(commandkind); cmd {
+ case commandKindInMemory:
+ nodep, err := RunInMemoryProcess(ctx, pcfg)
+ if err != nil {
+ ts.Fatalf("unable to start in memory node: %s", err)
+ }
+
+ return nodep
+
+ case commandKindTesting:
+ if !testing.Testing() {
+ ts.Fatalf("unable to invoke testing process while not testing")
+ }
+
+ return runTestingNodeProcess(&testingTS{ts}, ctx, pcfg)
+
+ case commandKindBin:
+ bin := ts.Value(envKeyExecBin).(string)
+ nodep, err := RunNodeProcess(ctx, pcfg, bin)
+ if err != nil {
+ ts.Fatalf("unable to start process node: %s", err)
+ }
+
+ return nodep
+
+ default:
+ ts.Fatalf("unknown command kind: %+v", cmd)
+ }
+
+ return nil
+}
+
+// `unquote` takes a slice of strings, resulting from splitting a string block by spaces, and
+// processes them. The function handles quoted phrases and escape characters within these strings.
+func unquote(args []string) ([]string, error) {
+ const quote = '"'
+
+ parts := []string{}
+ var inQuote bool
+
+ var part strings.Builder
+ for _, arg := range args {
+ var escaped bool
+ for _, c := range arg {
+ if escaped {
+ // If the character is meant to be escaped, it is processed with Unquote.
+ // We use `Unquote` here for two main reasons:
+ // 1. It will validate that the escape sequence is correct
+ // 2. It converts the escaped string to its corresponding raw character.
+ // For example, "\\t" becomes '\t'.
+ uc, err := strconv.Unquote(`"\` + string(c) + `"`)
+ if err != nil {
+ return nil, fmt.Errorf("unhandled escape sequence `\\%c`: %w", c, err)
+ }
+
+ part.WriteString(uc)
+ escaped = false
+ continue
+ }
+
+ // If we are inside a quoted string and encounter an escape character,
+ // flag the next character as `escaped`
+ if inQuote && c == '\\' {
+ escaped = true
+ continue
+ }
+
+ // Detect quote and toggle inQuote state
+ if c == quote {
+ inQuote = !inQuote
+ continue
+ }
+
+ // Handle regular character
+ part.WriteRune(c)
+ }
+
+ // If we're inside a quote, add a single space.
+ // It reflects one or multiple spaces between args in the original string.
+ if inQuote {
+ part.WriteRune(' ')
+ continue
+ }
+
+ // Finalize part, add to parts, and reset for next part
+ parts = append(parts, part.String())
+ part.Reset()
+ }
+
+ // Check if a quote is left open
+ if inQuote {
+ return nil, errors.New("unfinished quote")
+ }
+
+ return parts, nil
+}
+
+func getNodeSID(ts *testscript.TestScript) string {
+ return ts.Getenv("SID")
+}
+
+func tsValidateError(ts *testscript.TestScript, cmd string, neg bool, err error) {
+ if err != nil {
+ fmt.Fprintf(ts.Stderr(), "%q error: %+v\n", cmd, err)
+ if !neg {
+ ts.Fatalf("unexpected %q command failure: %s", cmd, err)
+ }
+ } else {
+ if neg {
+ ts.Fatalf("unexpected %q command success", cmd)
+ }
+ }
+}
+
+type envSetter interface {
+ Setenv(key, value string)
+}
+
+// createAccount creates a new account with the given name and adds it to the keybase.
+func createAccount(env envSetter, kb keys.Keybase, accountName string) (gnoland.Balance, error) {
+ var balance gnoland.Balance
+ entropy, err := bip39.NewEntropy(256)
+ if err != nil {
+ return balance, fmt.Errorf("error creating entropy: %w", err)
+ }
+
+ mnemonic, err := bip39.NewMnemonic(entropy)
+ if err != nil {
+ return balance, fmt.Errorf("error generating mnemonic: %w", err)
+ }
+
+ var keyInfo keys.Info
+ if keyInfo, err = kb.CreateAccount(accountName, mnemonic, "", "", 0, 0); err != nil {
+ return balance, fmt.Errorf("unable to create account: %w", err)
+ }
+
+ address := keyInfo.GetAddress()
+ env.Setenv("USER_SEED_"+accountName, mnemonic)
+ env.Setenv("USER_ADDR_"+accountName, address.String())
+
+ return gnoland.Balance{
+ Address: address,
+ Amount: std.Coins{std.NewCoin(ugnot.Denom, 10e6)},
+ }, nil
+}
+
+// createAccountFrom creates a new account with the given metadata and adds it to the keybase.
+func createAccountFrom(env envSetter, kb keys.Keybase, accountName, mnemonic string, account, index uint32) (gnoland.Balance, error) {
+ var balance gnoland.Balance
+
+ // check if mnemonic is valid
+ if !bip39.IsMnemonicValid(mnemonic) {
+ return balance, fmt.Errorf("invalid mnemonic")
+ }
+
+ keyInfo, err := kb.CreateAccount(accountName, mnemonic, "", "", account, index)
+ if err != nil {
+ return balance, fmt.Errorf("unable to create account: %w", err)
+ }
+
+ address := keyInfo.GetAddress()
+ env.Setenv("USER_SEED_"+accountName, mnemonic)
+ env.Setenv("USER_ADDR_"+accountName, address.String())
+
+ return gnoland.Balance{
+ Address: address,
+ Amount: std.Coins{std.NewCoin(ugnot.Denom, 10e6)},
+ }, nil
+}
+
+func buildGnoland(t *testing.T, rootdir string) string {
+ t.Helper()
+
+ bin := filepath.Join(t.TempDir(), "gnoland-test")
+
+ t.Log("building gnoland integration binary...")
+
+ // Build a fresh gno binary in a temp directory
+ gnoArgsBuilder := []string{"build", "-o", bin}
+
+ os.Executable()
+
+ // Forward `-covermode` settings if set
+ if coverMode := testing.CoverMode(); coverMode != "" {
+ gnoArgsBuilder = append(gnoArgsBuilder,
+ "-covermode", coverMode,
+ )
+ }
+
+ // Append the path to the gno command source
+ gnoArgsBuilder = append(gnoArgsBuilder, filepath.Join(rootdir,
+ "gno.land", "pkg", "integration", "process"))
+
+ t.Logf("build command: %s", strings.Join(gnoArgsBuilder, " "))
+
+ cmd := exec.Command("go", gnoArgsBuilder...)
+
+ var buff bytes.Buffer
+ cmd.Stderr, cmd.Stdout = &buff, &buff
+ defer buff.Reset()
+
+ if err := cmd.Run(); err != nil {
+ require.FailNowf(t, "unable to build binary", "%q\n%s",
+ err.Error(), buff.String())
+ }
+
+ return bin
+}
+
+// GeneratePrivKeyFromMnemonic generates a crypto.PrivKey from a mnemonic.
+func GeneratePrivKeyFromMnemonic(mnemonic, bip39Passphrase string, account, index uint32) (crypto.PrivKey, error) {
+ // Generate Seed from Mnemonic
+ seed, err := bip39.NewSeedWithErrorChecking(mnemonic, bip39Passphrase)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate seed: %w", err)
+ }
+
+ // Derive Private Key
+ coinType := crypto.CoinType // ensure this is set correctly in your context
+ hdPath := hd.NewFundraiserParams(account, coinType, index)
+ masterPriv, ch := hd.ComputeMastersFromSeed(seed)
+ derivedPriv, err := hd.DerivePrivateKeyForPath(masterPriv, ch, hdPath.String())
+ if err != nil {
+ return nil, fmt.Errorf("failed to derive private key: %w", err)
+ }
+
+ // Convert to secp256k1 private key
+ privKey := secp256k1.PrivKeySecp256k1(derivedPriv)
+ return privKey, nil
+}
diff --git a/gno.land/pkg/integration/integration_test.go b/gno.land/pkg/integration/testscript_gnoland_test.go
similarity index 93%
rename from gno.land/pkg/integration/integration_test.go
rename to gno.land/pkg/integration/testscript_gnoland_test.go
index 99a3e6c7eca..2c301064969 100644
--- a/gno.land/pkg/integration/integration_test.go
+++ b/gno.land/pkg/integration/testscript_gnoland_test.go
@@ -8,12 +8,6 @@ import (
"github.com/stretchr/testify/require"
)
-func TestTestdata(t *testing.T) {
- t.Parallel()
-
- RunGnolandTestscripts(t, "testdata")
-}
-
func TestUnquote(t *testing.T) {
t.Parallel()
diff --git a/gno.land/pkg/integration/testing.go b/gno.land/pkg/integration/testscript_testing.go
similarity index 95%
rename from gno.land/pkg/integration/testing.go
rename to gno.land/pkg/integration/testscript_testing.go
index 0cd3152d888..9eed180dd8b 100644
--- a/gno.land/pkg/integration/testing.go
+++ b/gno.land/pkg/integration/testscript_testing.go
@@ -2,6 +2,7 @@ package integration
import (
"errors"
+ "testing"
"github.com/rogpeppe/go-internal/testscript"
"github.com/stretchr/testify/assert"
@@ -16,6 +17,7 @@ var errFailNow = errors.New("fail now!") //nolint:stylecheck
var (
_ require.TestingT = (*testingTS)(nil)
_ assert.TestingT = (*testingTS)(nil)
+ _ TestingTS = &testing.T{}
)
type TestingTS = require.TestingT
diff --git a/gnovm/cmd/gno/testdata_test.go b/gnovm/cmd/gno/testdata_test.go
index 6b1bbd1d459..c5cb0def04e 100644
--- a/gnovm/cmd/gno/testdata_test.go
+++ b/gnovm/cmd/gno/testdata_test.go
@@ -3,7 +3,6 @@ package main
import (
"os"
"path/filepath"
- "strconv"
"testing"
"github.com/gnolang/gno/gnovm/pkg/integration"
@@ -18,25 +17,23 @@ func Test_Scripts(t *testing.T) {
testdirs, err := os.ReadDir(testdata)
require.NoError(t, err)
+ homeDir, buildDir := t.TempDir(), t.TempDir()
for _, dir := range testdirs {
if !dir.IsDir() {
continue
}
name := dir.Name()
+ t.Logf("testing: %s", name)
t.Run(name, func(t *testing.T) {
- updateScripts, _ := strconv.ParseBool(os.Getenv("UPDATE_SCRIPTS"))
- p := testscript.Params{
- UpdateScripts: updateScripts,
- Dir: filepath.Join(testdata, name),
- }
-
+ testdir := filepath.Join(testdata, name)
+ p := integration.NewTestingParams(t, testdir)
if coverdir, ok := integration.ResolveCoverageDir(); ok {
err := integration.SetupTestscriptsCoverage(&p, coverdir)
require.NoError(t, err)
}
- err := integration.SetupGno(&p, t.TempDir())
+ err := integration.SetupGno(&p, homeDir, buildDir)
require.NoError(t, err)
testscript.Run(t, p)
diff --git a/gnovm/pkg/gnolang/op_bench_test.go b/gnovm/pkg/gnolang/op_bench_test.go
new file mode 100644
index 00000000000..5874f980285
--- /dev/null
+++ b/gnovm/pkg/gnolang/op_bench_test.go
@@ -0,0 +1,70 @@
+package gnolang
+
+import (
+ "testing"
+
+ "github.com/gnolang/gno/tm2/pkg/overflow"
+)
+
+func BenchmarkOpAdd(b *testing.B) {
+ m := NewMachine("bench", nil)
+ x := TypedValue{T: IntType}
+ x.SetInt(4)
+ y := TypedValue{T: IntType}
+ y.SetInt(3)
+
+ b.ResetTimer()
+
+ for range b.N {
+ m.PushOp(OpHalt)
+ m.PushExpr(&BinaryExpr{})
+ m.PushValue(x)
+ m.PushValue(y)
+ m.PushOp(OpAdd)
+ m.Run()
+ }
+}
+
+//go:noinline
+func AddNoOverflow(x, y int) int { return x + y }
+
+func BenchmarkAddNoOverflow(b *testing.B) {
+ x, y := 4, 3
+ c := 0
+ for range b.N {
+ c = AddNoOverflow(x, y)
+ }
+ if c != 7 {
+ b.Error("invalid result")
+ }
+}
+
+func BenchmarkAddOverflow(b *testing.B) {
+ x, y := 4, 3
+ c := 0
+ for range b.N {
+ c = overflow.Addp(x, y)
+ }
+ if c != 7 {
+ b.Error("invalid result")
+ }
+}
+
+func TestOpAdd1(t *testing.T) {
+ m := NewMachine("test", nil)
+ a := TypedValue{T: IntType}
+ a.SetInt(4)
+ b := TypedValue{T: IntType}
+ b.SetInt(3)
+ t.Log("a:", a, "b:", b)
+
+ start := m.NumValues
+ m.PushOp(OpHalt)
+ m.PushExpr(&BinaryExpr{})
+ m.PushValue(a)
+ m.PushValue(b)
+ m.PushOp(OpAdd)
+ m.Run()
+ res := m.ReapValues(start)
+ t.Log("res:", res)
+}
diff --git a/gnovm/pkg/gnolang/op_binary.go b/gnovm/pkg/gnolang/op_binary.go
index 6d26fa7ce54..0f66da5e685 100644
--- a/gnovm/pkg/gnolang/op_binary.go
+++ b/gnovm/pkg/gnolang/op_binary.go
@@ -6,6 +6,7 @@ import (
"math/big"
"github.com/cockroachdb/apd/v3"
+ "github.com/gnolang/gno/tm2/pkg/overflow"
)
// ----------------------------------------
@@ -183,7 +184,9 @@ func (m *Machine) doOpAdd() {
}
// add rv to lv.
- addAssign(m.Alloc, lv, rv)
+ if err := addAssign(m.Alloc, lv, rv); err != nil {
+ panic(err)
+ }
}
func (m *Machine) doOpSub() {
@@ -197,7 +200,9 @@ func (m *Machine) doOpSub() {
}
// sub rv from lv.
- subAssign(lv, rv)
+ if err := subAssign(lv, rv); err != nil {
+ panic(err)
+ }
}
func (m *Machine) doOpBor() {
@@ -253,8 +258,7 @@ func (m *Machine) doOpQuo() {
}
// lv / rv
- err := quoAssign(lv, rv)
- if err != nil {
+ if err := quoAssign(lv, rv); err != nil {
panic(err)
}
}
@@ -270,8 +274,7 @@ func (m *Machine) doOpRem() {
}
// lv % rv
- err := remAssign(lv, rv)
- if err != nil {
+ if err := remAssign(lv, rv); err != nil {
panic(err)
}
}
@@ -683,23 +686,38 @@ func isGeq(lv, rv *TypedValue) bool {
}
}
-// for doOpAdd and doOpAddAssign.
-func addAssign(alloc *Allocator, lv, rv *TypedValue) {
+// addAssign adds lv to rv and stores the result to lv.
+// It returns an exception in case of overflow on signed integers.
+// The assignement is performed even in case of exception.
+func addAssign(alloc *Allocator, lv, rv *TypedValue) *Exception {
// set the result in lv.
// NOTE this block is replicated in op_assign.go
+ ok := true
switch baseOf(lv.T) {
case StringType, UntypedStringType:
lv.V = alloc.NewString(lv.GetString() + rv.GetString())
+ // Signed integers may overflow, which triggers an exception.
case IntType:
- lv.SetInt(lv.GetInt() + rv.GetInt())
+ var r int
+ r, ok = overflow.Add(lv.GetInt(), rv.GetInt())
+ lv.SetInt(r)
case Int8Type:
- lv.SetInt8(lv.GetInt8() + rv.GetInt8())
+ var r int8
+ r, ok = overflow.Add8(lv.GetInt8(), rv.GetInt8())
+ lv.SetInt8(r)
case Int16Type:
- lv.SetInt16(lv.GetInt16() + rv.GetInt16())
+ var r int16
+ r, ok = overflow.Add16(lv.GetInt16(), rv.GetInt16())
+ lv.SetInt16(r)
case Int32Type, UntypedRuneType:
- lv.SetInt32(lv.GetInt32() + rv.GetInt32())
+ var r int32
+ r, ok = overflow.Add32(lv.GetInt32(), rv.GetInt32())
+ lv.SetInt32(r)
case Int64Type:
- lv.SetInt64(lv.GetInt64() + rv.GetInt64())
+ var r int64
+ r, ok = overflow.Add64(lv.GetInt64(), rv.GetInt64())
+ lv.SetInt64(r)
+ // Unsigned integers do not overflow, they just wrap.
case UintType:
lv.SetUint(lv.GetUint() + rv.GetUint())
case Uint8Type:
@@ -739,23 +757,42 @@ func addAssign(alloc *Allocator, lv, rv *TypedValue) {
lv.T,
))
}
+ if !ok {
+ return &Exception{Value: typedString("addition overflow")}
+ }
+ return nil
}
-// for doOpSub and doOpSubAssign.
-func subAssign(lv, rv *TypedValue) {
+// subAssign subtracts lv to rv and stores the result to lv.
+// It returns an exception in case of overflow on signed integers.
+// The subtraction is performed even in case of exception.
+func subAssign(lv, rv *TypedValue) *Exception {
// set the result in lv.
// NOTE this block is replicated in op_assign.go
+ ok := true
switch baseOf(lv.T) {
+ // Signed integers may overflow, which triggers an exception.
case IntType:
- lv.SetInt(lv.GetInt() - rv.GetInt())
+ var r int
+ r, ok = overflow.Sub(lv.GetInt(), rv.GetInt())
+ lv.SetInt(r)
case Int8Type:
- lv.SetInt8(lv.GetInt8() - rv.GetInt8())
+ var r int8
+ r, ok = overflow.Sub8(lv.GetInt8(), rv.GetInt8())
+ lv.SetInt8(r)
case Int16Type:
- lv.SetInt16(lv.GetInt16() - rv.GetInt16())
+ var r int16
+ r, ok = overflow.Sub16(lv.GetInt16(), rv.GetInt16())
+ lv.SetInt16(r)
case Int32Type, UntypedRuneType:
- lv.SetInt32(lv.GetInt32() - rv.GetInt32())
+ var r int32
+ r, ok = overflow.Sub32(lv.GetInt32(), rv.GetInt32())
+ lv.SetInt32(r)
case Int64Type:
- lv.SetInt64(lv.GetInt64() - rv.GetInt64())
+ var r int64
+ r, ok = overflow.Sub64(lv.GetInt64(), rv.GetInt64())
+ lv.SetInt64(r)
+ // Unsigned integers do not overflow, they just wrap.
case UintType:
lv.SetUint(lv.GetUint() - rv.GetUint())
case Uint8Type:
@@ -795,23 +832,39 @@ func subAssign(lv, rv *TypedValue) {
lv.T,
))
}
+ if !ok {
+ return &Exception{Value: typedString("subtraction overflow")}
+ }
+ return nil
}
// for doOpMul and doOpMulAssign.
-func mulAssign(lv, rv *TypedValue) {
+func mulAssign(lv, rv *TypedValue) *Exception {
// set the result in lv.
// NOTE this block is replicated in op_assign.go
+ ok := true
switch baseOf(lv.T) {
+ // Signed integers may overflow, which triggers a panic.
case IntType:
- lv.SetInt(lv.GetInt() * rv.GetInt())
+ var r int
+ r, ok = overflow.Mul(lv.GetInt(), rv.GetInt())
+ lv.SetInt(r)
case Int8Type:
- lv.SetInt8(lv.GetInt8() * rv.GetInt8())
+ var r int8
+ r, ok = overflow.Mul8(lv.GetInt8(), rv.GetInt8())
+ lv.SetInt8(r)
case Int16Type:
- lv.SetInt16(lv.GetInt16() * rv.GetInt16())
+ var r int16
+ r, ok = overflow.Mul16(lv.GetInt16(), rv.GetInt16())
+ lv.SetInt16(r)
case Int32Type, UntypedRuneType:
- lv.SetInt32(lv.GetInt32() * rv.GetInt32())
+ var r int32
+ r, ok = overflow.Mul32(lv.GetInt32(), rv.GetInt32())
+ lv.SetInt32(r)
case Int64Type:
- lv.SetInt64(lv.GetInt64() * rv.GetInt64())
+ var r int64
+ r, ok = overflow.Mul64(lv.GetInt64(), rv.GetInt64())
+ lv.SetInt64(r)
case UintType:
lv.SetUint(lv.GetUint() * rv.GetUint())
case Uint8Type:
@@ -849,96 +902,105 @@ func mulAssign(lv, rv *TypedValue) {
lv.T,
))
}
+ if !ok {
+ return &Exception{Value: typedString("multiplication overflow")}
+ }
+ return nil
}
// for doOpQuo and doOpQuoAssign.
func quoAssign(lv, rv *TypedValue) *Exception {
- expt := &Exception{
- Value: typedString("division by zero"),
- }
-
// set the result in lv.
// NOTE this block is replicated in op_assign.go
+ ok := true
switch baseOf(lv.T) {
+ // Signed integers may overflow or cause a division by 0, which triggers a panic.
case IntType:
- if rv.GetInt() == 0 {
- return expt
- }
- lv.SetInt(lv.GetInt() / rv.GetInt())
+ var q int
+ q, _, ok = overflow.Quotient(lv.GetInt(), rv.GetInt())
+ lv.SetInt(q)
case Int8Type:
- if rv.GetInt8() == 0 {
- return expt
- }
- lv.SetInt8(lv.GetInt8() / rv.GetInt8())
+ var q int8
+ q, _, ok = overflow.Quotient8(lv.GetInt8(), rv.GetInt8())
+ lv.SetInt8(q)
case Int16Type:
- if rv.GetInt16() == 0 {
- return expt
- }
- lv.SetInt16(lv.GetInt16() / rv.GetInt16())
+ var q int16
+ q, _, ok = overflow.Quotient16(lv.GetInt16(), rv.GetInt16())
+ lv.SetInt16(q)
case Int32Type, UntypedRuneType:
- if rv.GetInt32() == 0 {
- return expt
- }
- lv.SetInt32(lv.GetInt32() / rv.GetInt32())
+ var q int32
+ q, _, ok = overflow.Quotient32(lv.GetInt32(), rv.GetInt32())
+ lv.SetInt32(q)
case Int64Type:
- if rv.GetInt64() == 0 {
- return expt
- }
- lv.SetInt64(lv.GetInt64() / rv.GetInt64())
+ var q int64
+ q, _, ok = overflow.Quotient64(lv.GetInt64(), rv.GetInt64())
+ lv.SetInt64(q)
+ // Unsigned integers do not cause overflow, but a division by 0 may still occur.
case UintType:
- if rv.GetUint() == 0 {
- return expt
+ y := rv.GetUint()
+ ok = y != 0
+ if ok {
+ lv.SetUint(lv.GetUint() / y)
}
- lv.SetUint(lv.GetUint() / rv.GetUint())
case Uint8Type:
- if rv.GetUint8() == 0 {
- return expt
+ y := rv.GetUint8()
+ ok = y != 0
+ if ok {
+ lv.SetUint8(lv.GetUint8() / y)
}
- lv.SetUint8(lv.GetUint8() / rv.GetUint8())
case DataByteType:
- if rv.GetUint8() == 0 {
- return expt
+ y := rv.GetUint8()
+ ok = y != 0
+ if ok {
+ lv.SetDataByte(lv.GetDataByte() / y)
}
- lv.SetDataByte(lv.GetDataByte() / rv.GetUint8())
case Uint16Type:
- if rv.GetUint16() == 0 {
- return expt
+ y := rv.GetUint16()
+ ok = y != 0
+ if ok {
+ lv.SetUint16(lv.GetUint16() / y)
}
- lv.SetUint16(lv.GetUint16() / rv.GetUint16())
case Uint32Type:
- if rv.GetUint32() == 0 {
- return expt
+ y := rv.GetUint32()
+ ok = y != 0
+ if ok {
+ lv.SetUint32(lv.GetUint32() / y)
}
- lv.SetUint32(lv.GetUint32() / rv.GetUint32())
case Uint64Type:
- if rv.GetUint64() == 0 {
- return expt
+ y := rv.GetUint64()
+ ok = y != 0
+ if ok {
+ lv.SetUint64(lv.GetUint64() / y)
}
- lv.SetUint64(lv.GetUint64() / rv.GetUint64())
+ // XXX Handling float overflows is more complex.
case Float32Type:
// NOTE: gno doesn't fuse *+.
- if rv.GetFloat32() == 0 {
- return expt
+ y := rv.GetFloat32()
+ ok = y != 0
+ if ok {
+ lv.SetFloat32(lv.GetFloat32() / y)
}
- lv.SetFloat32(lv.GetFloat32() / rv.GetFloat32())
// XXX FOR DETERMINISM, PANIC IF NAN.
case Float64Type:
// NOTE: gno doesn't fuse *+.
- if rv.GetFloat64() == 0 {
- return expt
+ y := rv.GetFloat64()
+ ok = y != 0
+ if ok {
+ lv.SetFloat64(lv.GetFloat64() / y)
}
- lv.SetFloat64(lv.GetFloat64() / rv.GetFloat64())
// XXX FOR DETERMINISM, PANIC IF NAN.
case BigintType, UntypedBigintType:
if rv.GetBigInt().Sign() == 0 {
- return expt
+ ok = false
+ break
}
lb := lv.GetBigInt()
lb = big.NewInt(0).Quo(lb, rv.GetBigInt())
lv.V = BigintValue{V: lb}
case BigdecType, UntypedBigdecType:
if rv.GetBigDec().Cmp(apd.New(0, 0)) == 0 {
- return expt
+ ok = false
+ break
}
lb := lv.GetBigDec()
rb := rv.GetBigDec()
@@ -955,81 +1017,83 @@ func quoAssign(lv, rv *TypedValue) *Exception {
))
}
+ if !ok {
+ return &Exception{Value: typedString("division by zero or overflow")}
+ }
return nil
}
// for doOpRem and doOpRemAssign.
func remAssign(lv, rv *TypedValue) *Exception {
- expt := &Exception{
- Value: typedString("division by zero"),
- }
-
// set the result in lv.
// NOTE this block is replicated in op_assign.go
+ ok := true
switch baseOf(lv.T) {
+ // Signed integers may overflow or cause a division by 0, which triggers a panic.
case IntType:
- if rv.GetInt() == 0 {
- return expt
- }
- lv.SetInt(lv.GetInt() % rv.GetInt())
+ var r int
+ _, r, ok = overflow.Quotient(lv.GetInt(), rv.GetInt())
+ lv.SetInt(r)
case Int8Type:
- if rv.GetInt8() == 0 {
- return expt
- }
- lv.SetInt8(lv.GetInt8() % rv.GetInt8())
+ var r int8
+ _, r, ok = overflow.Quotient8(lv.GetInt8(), rv.GetInt8())
+ lv.SetInt8(r)
case Int16Type:
- if rv.GetInt16() == 0 {
- return expt
- }
- lv.SetInt16(lv.GetInt16() % rv.GetInt16())
+ var r int16
+ _, r, ok = overflow.Quotient16(lv.GetInt16(), rv.GetInt16())
+ lv.SetInt16(r)
case Int32Type, UntypedRuneType:
- if rv.GetInt32() == 0 {
- return expt
- }
- lv.SetInt32(lv.GetInt32() % rv.GetInt32())
+ var r int32
+ _, r, ok = overflow.Quotient32(lv.GetInt32(), rv.GetInt32())
+ lv.SetInt32(r)
case Int64Type:
- if rv.GetInt64() == 0 {
- return expt
- }
- lv.SetInt64(lv.GetInt64() % rv.GetInt64())
+ var r int64
+ _, r, ok = overflow.Quotient64(lv.GetInt64(), rv.GetInt64())
+ lv.SetInt64(r)
+ // Unsigned integers do not cause overflow, but a division by 0 may still occur.
case UintType:
- if rv.GetUint() == 0 {
- return expt
+ y := rv.GetUint()
+ ok = y != 0
+ if ok {
+ lv.SetUint(lv.GetUint() % y)
}
- lv.SetUint(lv.GetUint() % rv.GetUint())
case Uint8Type:
- if rv.GetUint8() == 0 {
- return expt
+ y := rv.GetUint8()
+ ok = y != 0
+ if ok {
+ lv.SetUint8(lv.GetUint8() % y)
}
- lv.SetUint8(lv.GetUint8() % rv.GetUint8())
case DataByteType:
- if rv.GetUint8() == 0 {
- return expt
+ y := rv.GetUint8()
+ ok = y != 0
+ if ok {
+ lv.SetDataByte(lv.GetDataByte() % y)
}
- lv.SetDataByte(lv.GetDataByte() % rv.GetUint8())
case Uint16Type:
- if rv.GetUint16() == 0 {
- return expt
+ y := rv.GetUint16()
+ ok = y != 0
+ if ok {
+ lv.SetUint16(lv.GetUint16() % y)
}
- lv.SetUint16(lv.GetUint16() % rv.GetUint16())
case Uint32Type:
- if rv.GetUint32() == 0 {
- return expt
+ y := rv.GetUint32()
+ ok = y != 0
+ if ok {
+ lv.SetUint32(lv.GetUint32() % y)
}
- lv.SetUint32(lv.GetUint32() % rv.GetUint32())
case Uint64Type:
- if rv.GetUint64() == 0 {
- return expt
+ y := rv.GetUint64()
+ ok = y != 0
+ if ok {
+ lv.SetUint64(lv.GetUint64() % y)
}
- lv.SetUint64(lv.GetUint64() % rv.GetUint64())
case BigintType, UntypedBigintType:
- if rv.GetBigInt().Sign() == 0 {
- return expt
+ ok = rv.GetBigInt().Sign() != 0
+ if ok {
+ lb := lv.GetBigInt()
+ lb = big.NewInt(0).Rem(lb, rv.GetBigInt())
+ lv.V = BigintValue{V: lb}
}
-
- lb := lv.GetBigInt()
- lb = big.NewInt(0).Rem(lb, rv.GetBigInt())
- lv.V = BigintValue{V: lb}
default:
panic(fmt.Sprintf(
"operators %% and %%= not defined for %s",
@@ -1037,6 +1101,9 @@ func remAssign(lv, rv *TypedValue) *Exception {
))
}
+ if !ok {
+ return &Exception{Value: typedString("division by zero or overflow")}
+ }
return nil
}
diff --git a/gnovm/pkg/gnolang/op_inc_dec.go b/gnovm/pkg/gnolang/op_inc_dec.go
index 7a8a885bcf0..1e68e195596 100644
--- a/gnovm/pkg/gnolang/op_inc_dec.go
+++ b/gnovm/pkg/gnolang/op_inc_dec.go
@@ -5,6 +5,7 @@ import (
"math/big"
"github.com/cockroachdb/apd/v3"
+ "github.com/gnolang/gno/tm2/pkg/overflow"
)
func (m *Machine) doOpInc() {
@@ -31,16 +32,18 @@ func (m *Machine) doOpInc() {
// because it could be a type alias
// type num int
switch baseOf(lv.T) {
+ // Signed integers may overflow, which triggers a panic.
case IntType:
- lv.SetInt(lv.GetInt() + 1)
+ lv.SetInt(overflow.Addp(lv.GetInt(), 1))
case Int8Type:
- lv.SetInt8(lv.GetInt8() + 1)
+ lv.SetInt8(overflow.Add8p(lv.GetInt8(), 1))
case Int16Type:
- lv.SetInt16(lv.GetInt16() + 1)
+ lv.SetInt16(overflow.Add16p(lv.GetInt16(), 1))
case Int32Type:
- lv.SetInt32(lv.GetInt32() + 1)
+ lv.SetInt32(overflow.Add32p(lv.GetInt32(), 1))
case Int64Type:
- lv.SetInt64(lv.GetInt64() + 1)
+ lv.SetInt64(overflow.Add64p(lv.GetInt64(), 1))
+ // Unsigned integers do not overflow, they just wrap.
case UintType:
lv.SetUint(lv.GetUint() + 1)
case Uint8Type:
@@ -101,16 +104,18 @@ func (m *Machine) doOpDec() {
}
}
switch baseOf(lv.T) {
+ // Signed integers may overflow, which triggers a panic.
case IntType:
- lv.SetInt(lv.GetInt() - 1)
+ lv.SetInt(overflow.Subp(lv.GetInt(), 1))
case Int8Type:
- lv.SetInt8(lv.GetInt8() - 1)
+ lv.SetInt8(overflow.Sub8p(lv.GetInt8(), 1))
case Int16Type:
- lv.SetInt16(lv.GetInt16() - 1)
+ lv.SetInt16(overflow.Sub16p(lv.GetInt16(), 1))
case Int32Type:
- lv.SetInt32(lv.GetInt32() - 1)
+ lv.SetInt32(overflow.Sub32p(lv.GetInt32(), 1))
case Int64Type:
- lv.SetInt64(lv.GetInt64() - 1)
+ lv.SetInt64(overflow.Sub64p(lv.GetInt64(), 1))
+ // Unsigned integers do not overflow, they just wrap.
case UintType:
lv.SetUint(lv.GetUint() - 1)
case Uint8Type:
diff --git a/gnovm/pkg/integration/testscript.go b/gnovm/pkg/integration/testscript.go
new file mode 100644
index 00000000000..5b7c5c81a44
--- /dev/null
+++ b/gnovm/pkg/integration/testscript.go
@@ -0,0 +1,36 @@
+package integration
+
+import (
+ "os"
+ "strconv"
+ "testing"
+
+ "github.com/rogpeppe/go-internal/testscript"
+)
+
+// NewTestingParams setup and initialize base params for testing.
+func NewTestingParams(t *testing.T, testdir string) testscript.Params {
+ t.Helper()
+
+ var params testscript.Params
+ params.Dir = testdir
+
+ params.UpdateScripts, _ = strconv.ParseBool(os.Getenv("UPDATE_SCRIPTS"))
+ params.TestWork, _ = strconv.ParseBool(os.Getenv("TESTWORK"))
+ if deadline, ok := t.Deadline(); ok && params.Deadline.IsZero() {
+ params.Deadline = deadline
+ }
+
+ // Store the original setup scripts for potential wrapping
+ params.Setup = func(env *testscript.Env) error {
+ // Set the UPDATE_SCRIPTS environment variable
+ env.Setenv("UPDATE_SCRIPTS", strconv.FormatBool(params.UpdateScripts))
+
+ // Set the environment variable
+ env.Setenv("TESTWORK", strconv.FormatBool(params.TestWork))
+
+ return nil
+ }
+
+ return params
+}
diff --git a/gnovm/pkg/integration/coverage.go b/gnovm/pkg/integration/testscript_coverage.go
similarity index 100%
rename from gnovm/pkg/integration/coverage.go
rename to gnovm/pkg/integration/testscript_coverage.go
diff --git a/gnovm/pkg/integration/gno.go b/gnovm/pkg/integration/testscript_gno.go
similarity index 87%
rename from gnovm/pkg/integration/gno.go
rename to gnovm/pkg/integration/testscript_gno.go
index a389b6a9b24..03a3fcd6056 100644
--- a/gnovm/pkg/integration/gno.go
+++ b/gnovm/pkg/integration/testscript_gno.go
@@ -17,7 +17,7 @@ import (
// If the `gno` binary doesn't exist, it's built using the `go build` command into the specified buildDir.
// The function also include the `gno` command into `p.Cmds` to and wrap environment into p.Setup
// to correctly set up the environment variables needed for the `gno` command.
-func SetupGno(p *testscript.Params, buildDir string) error {
+func SetupGno(p *testscript.Params, homeDir, buildDir string) error {
// Try to fetch `GNOROOT` from the environment variables
gnoroot := gnoenv.RootDir()
@@ -62,18 +62,10 @@ func SetupGno(p *testscript.Params, buildDir string) error {
// Set the GNOROOT environment variable
env.Setenv("GNOROOT", gnoroot)
- // Create a temporary home directory because certain commands require access to $HOME/.cache/go-build
- home, err := os.MkdirTemp("", "gno")
- if err != nil {
- return fmt.Errorf("unable to create temporary home directory: %w", err)
- }
- env.Setenv("HOME", home)
+ env.Setenv("HOME", homeDir)
// Avoids go command printing errors relating to lack of go.mod.
env.Setenv("GO111MODULE", "off")
- // Cleanup home folder
- env.Defer(func() { os.RemoveAll(home) })
-
return nil
}
diff --git a/gnovm/stdlibs/generated.go b/gnovm/stdlibs/generated.go
index c1198e5f351..d5ab052028f 100644
--- a/gnovm/stdlibs/generated.go
+++ b/gnovm/stdlibs/generated.go
@@ -995,7 +995,6 @@ var initOrder = [...]string{
"hash",
"hash/adler32",
"html",
- "math/overflow",
"math/rand",
"path",
"sort",
diff --git a/gnovm/stdlibs/math/const_test.gno b/gnovm/stdlibs/math/const_test.gno
index b892a12898b..fbe59d61878 100644
--- a/gnovm/stdlibs/math/const_test.gno
+++ b/gnovm/stdlibs/math/const_test.gno
@@ -31,19 +31,76 @@ func TestMaxUint(t *testing.T) {
}
func TestMaxInt(t *testing.T) {
- if v := int(math.MaxInt); v+1 != math.MinInt {
- t.Errorf("MaxInt should wrap around to MinInt: %d", v+1)
+ defer func() {
+ if r := recover(); r != nil {
+ if r != "addition overflow" {
+ panic(r)
+ }
+ }
+ }()
+ v := int(math.MaxInt)
+ if v+1 == math.MinInt {
+ t.Errorf("int should overflow")
}
- if v := int8(math.MaxInt8); v+1 != math.MinInt8 {
- t.Errorf("MaxInt8 should wrap around to MinInt8: %d", v+1)
+ t.Errorf("expected panic did not occur")
+}
+
+func TestMaxInt8(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ if r != "addition overflow" {
+ panic(r)
+ }
+ }
+ }()
+ v := int8(math.MaxInt8)
+ if v+1 == math.MinInt8 {
+ t.Errorf("int8 should overflow")
}
- if v := int16(math.MaxInt16); v+1 != math.MinInt16 {
- t.Errorf("MaxInt16 should wrap around to MinInt16: %d", v+1)
+ t.Errorf("expected panic did not occur")
+}
+
+func TestMaxInt16(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ if r != "addition overflow" {
+ panic(r)
+ }
+ }
+ }()
+ v := int16(math.MaxInt16)
+ if v+1 == math.MinInt16 {
+ t.Errorf("int16 should overflow")
}
- if v := int32(math.MaxInt32); v+1 != math.MinInt32 {
- t.Errorf("MaxInt32 should wrap around to MinInt32: %d", v+1)
+ t.Errorf("expected panic did not occur")
+}
+
+func TestMaxInt32(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ if r != "addition overflow" {
+ panic(r)
+ }
+ }
+ }()
+ v := int32(math.MaxInt32)
+ if v+1 == math.MinInt32 {
+ t.Errorf("int32 should overflow")
}
- if v := int64(math.MaxInt64); v+1 != math.MinInt64 {
- t.Errorf("MaxInt64 should wrap around to MinInt64: %d", v+1)
+ t.Errorf("expected panic did not occur")
+}
+
+func TestMaxInt64(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ if r != "addition overflow" {
+ panic(r)
+ }
+ }
+ }()
+ v := int64(math.MaxInt64)
+ if v+1 == math.MinInt64 {
+ t.Errorf("int64 should overflow")
}
+ t.Errorf("expected panic did not occur")
}
diff --git a/gnovm/stdlibs/math/overflow/overflow.gno b/gnovm/stdlibs/math/overflow/overflow.gno
deleted file mode 100644
index 0bc2e03a522..00000000000
--- a/gnovm/stdlibs/math/overflow/overflow.gno
+++ /dev/null
@@ -1,501 +0,0 @@
-// This is modified from https://github.com/JohnCGriffin/overflow (MIT).
-// NOTE: there was a bug with the original Quotient* functions, and
-// testing method. These have been fixed here, and tests ported to
-// tests/files/maths_int*.go respectively.
-// Note: moved over from p/demo/maths.
-
-/*
-Package overflow offers overflow-checked integer arithmetic operations
-for int, int32, and int64. Each of the operations returns a
-result,bool combination. This was prompted by the need to know when
-to flow into higher precision types from the math.big library.
-
-For instance, assuing a 64 bit machine:
-
-10 + 20 -> 30
-int(math.MaxInt64) + 1 -> -9223372036854775808
-
-whereas
-
-overflow.Add(10,20) -> (30, true)
-overflow.Add(math.MaxInt64,1) -> (0, false)
-
-Add, Sub, Mul, Div are for int. Add64, Add32, etc. are specifically sized.
-
-If anybody wishes an unsigned version, submit a pull request for code
-and new tests.
-*/
-package overflow
-
-import "math"
-
-//go:generate ./overflow_template.sh
-
-func _is64Bit() bool {
- maxU32 := uint(math.MaxUint32)
- return ((maxU32 << 1) >> 1) == maxU32
-}
-
-/********** PARTIAL TEST COVERAGE FROM HERE DOWN *************
-
-The only way that I could see to do this is a combination of
-my normal 64 bit system and a GopherJS running on Node. My
-understanding is that its ints are 32 bit.
-
-So, FEEL FREE to carefully review the code visually.
-
-*************************************************************/
-
-// Unspecified size, i.e. normal signed int
-
-// Add sums two ints, returning the result and a boolean status.
-func Add(a, b int) (int, bool) {
- if _is64Bit() {
- r64, ok := Add64(int64(a), int64(b))
- return int(r64), ok
- }
- r32, ok := Add32(int32(a), int32(b))
- return int(r32), ok
-}
-
-// Sub returns the difference of two ints and a boolean status.
-func Sub(a, b int) (int, bool) {
- if _is64Bit() {
- r64, ok := Sub64(int64(a), int64(b))
- return int(r64), ok
- }
- r32, ok := Sub32(int32(a), int32(b))
- return int(r32), ok
-}
-
-// Mul returns the product of two ints and a boolean status.
-func Mul(a, b int) (int, bool) {
- if _is64Bit() {
- r64, ok := Mul64(int64(a), int64(b))
- return int(r64), ok
- }
- r32, ok := Mul32(int32(a), int32(b))
- return int(r32), ok
-}
-
-// Div returns the quotient of two ints and a boolean status
-func Div(a, b int) (int, bool) {
- if _is64Bit() {
- r64, ok := Div64(int64(a), int64(b))
- return int(r64), ok
- }
- r32, ok := Div32(int32(a), int32(b))
- return int(r32), ok
-}
-
-// Quo returns the quotient, remainder and status of two ints
-func Quo(a, b int) (int, int, bool) {
- if _is64Bit() {
- q64, r64, ok := Quo64(int64(a), int64(b))
- return int(q64), int(r64), ok
- }
- q32, r32, ok := Quo32(int32(a), int32(b))
- return int(q32), int(r32), ok
-}
-
-/************* Panic versions for int ****************/
-
-// Addp returns the sum of two ints, panicking on overflow
-func Addp(a, b int) int {
- r, ok := Add(a, b)
- if !ok {
- panic("addition overflow")
- }
- return r
-}
-
-// Subp returns the difference of two ints, panicking on overflow.
-func Subp(a, b int) int {
- r, ok := Sub(a, b)
- if !ok {
- panic("subtraction overflow")
- }
- return r
-}
-
-// Mulp returns the product of two ints, panicking on overflow.
-func Mulp(a, b int) int {
- r, ok := Mul(a, b)
- if !ok {
- panic("multiplication overflow")
- }
- return r
-}
-
-// Divp returns the quotient of two ints, panicking on overflow.
-func Divp(a, b int) int {
- r, ok := Div(a, b)
- if !ok {
- panic("division failure")
- }
- return r
-}
-
-//----------------------------------------
-// This is generated code, created by overflow_template.sh executed
-// by "go generate"
-
-// Add8 performs + operation on two int8 operands
-// returning a result and status
-func Add8(a, b int8) (int8, bool) {
- c := a + b
- if (c > a) == (b > 0) {
- return c, true
- }
- return c, false
-}
-
-// Add8p is the unchecked panicking version of Add8
-func Add8p(a, b int8) int8 {
- r, ok := Add8(a, b)
- if !ok {
- panic("addition overflow")
- }
- return r
-}
-
-// Sub8 performs - operation on two int8 operands
-// returning a result and status
-func Sub8(a, b int8) (int8, bool) {
- c := a - b
- if (c < a) == (b > 0) {
- return c, true
- }
- return c, false
-}
-
-// Sub8p is the unchecked panicking version of Sub8
-func Sub8p(a, b int8) int8 {
- r, ok := Sub8(a, b)
- if !ok {
- panic("subtraction overflow")
- }
- return r
-}
-
-// Mul8 performs * operation on two int8 operands
-// returning a result and status
-func Mul8(a, b int8) (int8, bool) {
- if a == 0 || b == 0 {
- return 0, true
- }
- c := a * b
- if (c < 0) == ((a < 0) != (b < 0)) {
- if c/b == a {
- return c, true
- }
- }
- return c, false
-}
-
-// Mul8p is the unchecked panicking version of Mul8
-func Mul8p(a, b int8) int8 {
- r, ok := Mul8(a, b)
- if !ok {
- panic("multiplication overflow")
- }
- return r
-}
-
-// Div8 performs / operation on two int8 operands
-// returning a result and status
-func Div8(a, b int8) (int8, bool) {
- q, _, ok := Quo8(a, b)
- return q, ok
-}
-
-// Div8p is the unchecked panicking version of Div8
-func Div8p(a, b int8) int8 {
- r, ok := Div8(a, b)
- if !ok {
- panic("division failure")
- }
- return r
-}
-
-// Quo8 performs + operation on two int8 operands
-// returning a quotient, a remainder and status
-func Quo8(a, b int8) (int8, int8, bool) {
- if b == 0 {
- return 0, 0, false
- } else if b == -1 && a == int8(math.MinInt8) {
- return 0, 0, false
- }
- c := a / b
- return c, a % b, true
-}
-
-// Add16 performs + operation on two int16 operands
-// returning a result and status
-func Add16(a, b int16) (int16, bool) {
- c := a + b
- if (c > a) == (b > 0) {
- return c, true
- }
- return c, false
-}
-
-// Add16p is the unchecked panicking version of Add16
-func Add16p(a, b int16) int16 {
- r, ok := Add16(a, b)
- if !ok {
- panic("addition overflow")
- }
- return r
-}
-
-// Sub16 performs - operation on two int16 operands
-// returning a result and status
-func Sub16(a, b int16) (int16, bool) {
- c := a - b
- if (c < a) == (b > 0) {
- return c, true
- }
- return c, false
-}
-
-// Sub16p is the unchecked panicking version of Sub16
-func Sub16p(a, b int16) int16 {
- r, ok := Sub16(a, b)
- if !ok {
- panic("subtraction overflow")
- }
- return r
-}
-
-// Mul16 performs * operation on two int16 operands
-// returning a result and status
-func Mul16(a, b int16) (int16, bool) {
- if a == 0 || b == 0 {
- return 0, true
- }
- c := a * b
- if (c < 0) == ((a < 0) != (b < 0)) {
- if c/b == a {
- return c, true
- }
- }
- return c, false
-}
-
-// Mul16p is the unchecked panicking version of Mul16
-func Mul16p(a, b int16) int16 {
- r, ok := Mul16(a, b)
- if !ok {
- panic("multiplication overflow")
- }
- return r
-}
-
-// Div16 performs / operation on two int16 operands
-// returning a result and status
-func Div16(a, b int16) (int16, bool) {
- q, _, ok := Quo16(a, b)
- return q, ok
-}
-
-// Div16p is the unchecked panicking version of Div16
-func Div16p(a, b int16) int16 {
- r, ok := Div16(a, b)
- if !ok {
- panic("division failure")
- }
- return r
-}
-
-// Quo16 performs + operation on two int16 operands
-// returning a quotient, a remainder and status
-func Quo16(a, b int16) (int16, int16, bool) {
- if b == 0 {
- return 0, 0, false
- } else if b == -1 && a == int16(math.MinInt16) {
- return 0, 0, false
- }
- c := a / b
- return c, a % b, true
-}
-
-// Add32 performs + operation on two int32 operands
-// returning a result and status
-func Add32(a, b int32) (int32, bool) {
- c := a + b
- if (c > a) == (b > 0) {
- return c, true
- }
- return c, false
-}
-
-// Add32p is the unchecked panicking version of Add32
-func Add32p(a, b int32) int32 {
- r, ok := Add32(a, b)
- if !ok {
- panic("addition overflow")
- }
- return r
-}
-
-// Sub32 performs - operation on two int32 operands
-// returning a result and status
-func Sub32(a, b int32) (int32, bool) {
- c := a - b
- if (c < a) == (b > 0) {
- return c, true
- }
- return c, false
-}
-
-// Sub32p is the unchecked panicking version of Sub32
-func Sub32p(a, b int32) int32 {
- r, ok := Sub32(a, b)
- if !ok {
- panic("subtraction overflow")
- }
- return r
-}
-
-// Mul32 performs * operation on two int32 operands
-// returning a result and status
-func Mul32(a, b int32) (int32, bool) {
- if a == 0 || b == 0 {
- return 0, true
- }
- c := a * b
- if (c < 0) == ((a < 0) != (b < 0)) {
- if c/b == a {
- return c, true
- }
- }
- return c, false
-}
-
-// Mul32p is the unchecked panicking version of Mul32
-func Mul32p(a, b int32) int32 {
- r, ok := Mul32(a, b)
- if !ok {
- panic("multiplication overflow")
- }
- return r
-}
-
-// Div32 performs / operation on two int32 operands
-// returning a result and status
-func Div32(a, b int32) (int32, bool) {
- q, _, ok := Quo32(a, b)
- return q, ok
-}
-
-// Div32p is the unchecked panicking version of Div32
-func Div32p(a, b int32) int32 {
- r, ok := Div32(a, b)
- if !ok {
- panic("division failure")
- }
- return r
-}
-
-// Quo32 performs + operation on two int32 operands
-// returning a quotient, a remainder and status
-func Quo32(a, b int32) (int32, int32, bool) {
- if b == 0 {
- return 0, 0, false
- } else if b == -1 && a == int32(math.MinInt32) {
- return 0, 0, false
- }
- c := a / b
- return c, a % b, true
-}
-
-// Add64 performs + operation on two int64 operands
-// returning a result and status
-func Add64(a, b int64) (int64, bool) {
- c := a + b
- if (c > a) == (b > 0) {
- return c, true
- }
- return c, false
-}
-
-// Add64p is the unchecked panicking version of Add64
-func Add64p(a, b int64) int64 {
- r, ok := Add64(a, b)
- if !ok {
- panic("addition overflow")
- }
- return r
-}
-
-// Sub64 performs - operation on two int64 operands
-// returning a result and status
-func Sub64(a, b int64) (int64, bool) {
- c := a - b
- if (c < a) == (b > 0) {
- return c, true
- }
- return c, false
-}
-
-// Sub64p is the unchecked panicking version of Sub64
-func Sub64p(a, b int64) int64 {
- r, ok := Sub64(a, b)
- if !ok {
- panic("subtraction overflow")
- }
- return r
-}
-
-// Mul64 performs * operation on two int64 operands
-// returning a result and status
-func Mul64(a, b int64) (int64, bool) {
- if a == 0 || b == 0 {
- return 0, true
- }
- c := a * b
- if (c < 0) == ((a < 0) != (b < 0)) {
- if c/b == a {
- return c, true
- }
- }
- return c, false
-}
-
-// Mul64p is the unchecked panicking version of Mul64
-func Mul64p(a, b int64) int64 {
- r, ok := Mul64(a, b)
- if !ok {
- panic("multiplication overflow")
- }
- return r
-}
-
-// Div64 performs / operation on two int64 operands
-// returning a result and status
-func Div64(a, b int64) (int64, bool) {
- q, _, ok := Quo64(a, b)
- return q, ok
-}
-
-// Div64p is the unchecked panicking version of Div64
-func Div64p(a, b int64) int64 {
- r, ok := Div64(a, b)
- if !ok {
- panic("division failure")
- }
- return r
-}
-
-// Quo64 performs + operation on two int64 operands
-// returning a quotient, a remainder and status
-func Quo64(a, b int64) (int64, int64, bool) {
- if b == 0 {
- return 0, 0, false
- } else if b == -1 && a == math.MinInt64 {
- return 0, 0, false
- }
- c := a / b
- return c, a % b, true
-}
diff --git a/gnovm/stdlibs/math/overflow/overflow_test.gno b/gnovm/stdlibs/math/overflow/overflow_test.gno
deleted file mode 100644
index b7881aec480..00000000000
--- a/gnovm/stdlibs/math/overflow/overflow_test.gno
+++ /dev/null
@@ -1,200 +0,0 @@
-package overflow
-
-import (
- "math"
- "testing"
-)
-
-// sample all possibilities of 8 bit numbers
-// by checking against 64 bit numbers
-
-func TestAlgorithms(t *testing.T) {
- errors := 0
-
- for a64 := int64(math.MinInt8); a64 <= int64(math.MaxInt8); a64++ {
- for b64 := int64(math.MinInt8); b64 <= int64(math.MaxInt8) && errors < 10; b64++ {
-
- a8 := int8(a64)
- b8 := int8(b64)
-
- if int64(a8) != a64 || int64(b8) != b64 {
- t.Fatal("LOGIC FAILURE IN TEST")
- }
-
- // ADDITION
- {
- r64 := a64 + b64
-
- // now the verification
- result, ok := Add8(a8, b8)
- if ok && int64(result) != r64 {
- t.Errorf("failed to fail on %v + %v = %v instead of %v\n",
- a8, b8, result, r64)
- errors++
- }
- if !ok && int64(result) == r64 {
- t.Fail()
- errors++
- }
- }
-
- // SUBTRACTION
- {
- r64 := a64 - b64
-
- // now the verification
- result, ok := Sub8(a8, b8)
- if ok && int64(result) != r64 {
- t.Errorf("failed to fail on %v - %v = %v instead of %v\n",
- a8, b8, result, r64)
- }
- if !ok && int64(result) == r64 {
- t.Fail()
- errors++
- }
- }
-
- // MULTIPLICATION
- {
- r64 := a64 * b64
-
- // now the verification
- result, ok := Mul8(a8, b8)
- if ok && int64(result) != r64 {
- t.Errorf("failed to fail on %v * %v = %v instead of %v\n",
- a8, b8, result, r64)
- errors++
- }
- if !ok && int64(result) == r64 {
- t.Fail()
- errors++
- }
- }
-
- // DIVISION
- if b8 != 0 {
- r64 := a64 / b64
- rem64 := a64 % b64
-
- // now the verification
- result, rem, ok := Quo8(a8, b8)
- if ok && int64(result) != r64 {
- t.Errorf("failed to fail on %v / %v = %v instead of %v\n",
- a8, b8, result, r64)
- errors++
- }
- if ok && int64(rem) != rem64 {
- t.Errorf("failed to fail on %v %% %v = %v instead of %v\n",
- a8, b8, rem, rem64)
- errors++
- }
- }
- }
- }
-}
-
-func TestQuotient(t *testing.T) {
- q, r, ok := Quo(100, 3)
- if r != 1 || q != 33 || !ok {
- t.Errorf("expected 100/3 => 33, r=1")
- }
- if _, _, ok = Quo(1, 0); ok {
- t.Error("unexpected lack of failure")
- }
-}
-
-func TestLong(t *testing.T) {
- if testing.Short() {
- t.Skip()
- }
-
- ctr := int64(0)
-
- for a64 := int64(math.MinInt16); a64 <= int64(math.MaxInt16); a64++ {
- for b64 := int64(math.MinInt16); b64 <= int64(math.MaxInt16); b64++ {
- a16 := int16(a64)
- b16 := int16(b64)
- if int64(a16) != a64 || int64(b16) != b64 {
- panic("LOGIC FAILURE IN TEST")
- }
- ctr++
-
- // ADDITION
- {
- r64 := a64 + b64
-
- // now the verification
- result, ok := Add16(a16, b16)
- if int64(math.MinInt16) <= r64 && r64 <= int64(math.MaxInt16) {
- if !ok || int64(result) != r64 {
- println("add", a16, b16, result, r64)
- panic("incorrect result for non-overflow")
- }
- } else {
- if ok {
- println("add", a16, b16, result, r64)
- panic("incorrect ok result")
- }
- }
- }
-
- // SUBTRACTION
- {
- r64 := a64 - b64
-
- // now the verification
- result, ok := Sub16(a16, b16)
- if int64(math.MinInt16) <= r64 && r64 <= int64(math.MaxInt16) {
- if !ok || int64(result) != r64 {
- println("sub", a16, b16, result, r64)
- panic("incorrect result for non-overflow")
- }
- } else {
- if ok {
- println("sub", a16, b16, result, r64)
- panic("incorrect ok result")
- }
- }
- }
-
- // MULTIPLICATION
- {
- r64 := a64 * b64
-
- // now the verification
- result, ok := Mul16(a16, b16)
- if int64(math.MinInt16) <= r64 && r64 <= int64(math.MaxInt16) {
- if !ok || int64(result) != r64 {
- println("mul", a16, b16, result, r64)
- panic("incorrect result for non-overflow")
- }
- } else {
- if ok {
- println("mul", a16, b16, result, r64)
- panic("incorrect ok result")
- }
- }
- }
-
- // DIVISION
- if b16 != 0 {
- r64 := a64 / b64
-
- // now the verification
- result, _, ok := Quo16(a16, b16)
- if int64(math.MinInt16) <= r64 && r64 <= int64(math.MaxInt16) {
- if !ok || int64(result) != r64 {
- println("quo", a16, b16, result, r64)
- panic("incorrect result for non-overflow")
- }
- } else {
- if ok {
- println("quo", a16, b16, result, r64)
- panic("incorrect ok result")
- }
- }
- }
- }
- }
- println("done", ctr)
-}
diff --git a/gnovm/stdlibs/std/coins.gno b/gnovm/stdlibs/std/coins.gno
index 47e88e238d2..679674e443e 100644
--- a/gnovm/stdlibs/std/coins.gno
+++ b/gnovm/stdlibs/std/coins.gno
@@ -1,9 +1,6 @@
package std
-import (
- "math/overflow"
- "strconv"
-)
+import "strconv"
// NOTE: this is selectively copied over from tm2/pkgs/std/coin.go
@@ -56,13 +53,7 @@ func (c Coin) IsEqual(other Coin) bool {
// An invalid result panics.
func (c Coin) Add(other Coin) Coin {
mustMatchDenominations(c.Denom, other.Denom)
-
- sum, ok := overflow.Add64(c.Amount, other.Amount)
- if !ok {
- panic("coin add overflow/underflow: " + strconv.Itoa(int(c.Amount)) + " +/- " + strconv.Itoa(int(other.Amount)))
- }
-
- c.Amount = sum
+ c.Amount += other.Amount
return c
}
@@ -72,13 +63,7 @@ func (c Coin) Add(other Coin) Coin {
// An invalid result panics.
func (c Coin) Sub(other Coin) Coin {
mustMatchDenominations(c.Denom, other.Denom)
-
- dff, ok := overflow.Sub64(c.Amount, other.Amount)
- if !ok {
- panic("coin sub overflow/underflow: " + strconv.Itoa(int(c.Amount)) + " +/- " + strconv.Itoa(int(other.Amount)))
- }
- c.Amount = dff
-
+ c.Amount -= other.Amount
return c
}
@@ -113,10 +98,7 @@ func NewCoins(coins ...Coin) Coins {
for _, coin := range coins {
if currentAmount, exists := coinMap[coin.Denom]; exists {
- var ok bool
- if coinMap[coin.Denom], ok = overflow.Add64(currentAmount, coin.Amount); !ok {
- panic("coin sub overflow/underflow: " + strconv.Itoa(int(currentAmount)) + " +/- " + strconv.Itoa(int(coin.Amount)))
- }
+ coinMap[coin.Denom] = currentAmount + coin.Amount
} else {
coinMap[coin.Denom] = coin.Amount
}
diff --git a/gnovm/tests/files/overflow0.gno b/gnovm/tests/files/overflow0.gno
new file mode 100644
index 00000000000..1313f064322
--- /dev/null
+++ b/gnovm/tests/files/overflow0.gno
@@ -0,0 +1,10 @@
+package main
+
+func main() {
+ var a, b, c int8 = -1<<7, -1, 0
+ c = a / b // overflow: -128 instead of 128
+ println(c)
+}
+
+// Error:
+// division by zero or overflow
diff --git a/gnovm/tests/files/overflow1.gno b/gnovm/tests/files/overflow1.gno
new file mode 100644
index 00000000000..a416e9a3498
--- /dev/null
+++ b/gnovm/tests/files/overflow1.gno
@@ -0,0 +1,10 @@
+package main
+
+func main() {
+ var a, b, c int16 = -1<<15, -1, 0
+ c = a / b // overflow: -32768 instead of 32768
+ println(c)
+}
+
+// Error:
+// division by zero or overflow
diff --git a/gnovm/tests/files/overflow2.gno b/gnovm/tests/files/overflow2.gno
new file mode 100644
index 00000000000..353729bcdf2
--- /dev/null
+++ b/gnovm/tests/files/overflow2.gno
@@ -0,0 +1,10 @@
+package main
+
+func main() {
+ var a, b, c int32 = -1<<31, -1, 0
+ c = a / b // overflow: -2147483648 instead of 2147483648
+ println(c)
+}
+
+// Error:
+// division by zero or overflow
diff --git a/gnovm/tests/files/overflow3.gno b/gnovm/tests/files/overflow3.gno
new file mode 100644
index 00000000000..a09c59dfb03
--- /dev/null
+++ b/gnovm/tests/files/overflow3.gno
@@ -0,0 +1,10 @@
+package main
+
+func main() {
+ var a, b, c int64 = -1<<63, -1, 0
+ c = a / b // overflow: -9223372036854775808 instead of 9223372036854775808
+ println(c)
+}
+
+// Error:
+// division by zero or overflow
diff --git a/gnovm/tests/files/overflow4.gno b/gnovm/tests/files/overflow4.gno
new file mode 100644
index 00000000000..26b05567b07
--- /dev/null
+++ b/gnovm/tests/files/overflow4.gno
@@ -0,0 +1,10 @@
+package main
+
+func main() {
+ var a, b, c int = -1<<63, -1, 0
+ c = a / b // overflow: -9223372036854775808 instead of 9223372036854775808
+ println(c)
+}
+
+// Error:
+// division by zero or overflow
diff --git a/gnovm/tests/files/overflow5.gno b/gnovm/tests/files/overflow5.gno
new file mode 100644
index 00000000000..ef7f976eb24
--- /dev/null
+++ b/gnovm/tests/files/overflow5.gno
@@ -0,0 +1,10 @@
+package main
+
+func main() {
+ var a, b, c int = -5, 7, 0
+ c = a % b // 0 quotient triggers a false negative in gnolang/overflow
+ println(c)
+}
+
+// Output:
+// -5
diff --git a/gnovm/tests/files/recover14.gno b/gnovm/tests/files/recover14.gno
index 30a34ab291a..3c96404fcbe 100644
--- a/gnovm/tests/files/recover14.gno
+++ b/gnovm/tests/files/recover14.gno
@@ -12,4 +12,4 @@ func main() {
}
// Output:
-// recover: division by zero
+// recover: division by zero or overflow
diff --git a/go.mod b/go.mod
index bf7c2df94b6..280ca3ae602 100644
--- a/go.mod
+++ b/go.mod
@@ -23,9 +23,11 @@ require (
github.com/rogpeppe/go-internal v1.12.0
github.com/rs/cors v1.11.1
github.com/rs/xid v1.6.0
+ github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b
github.com/stretchr/testify v1.9.0
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/yuin/goldmark v1.7.2
+ github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc
go.etcd.io/bbolt v1.3.11
go.opentelemetry.io/otel v1.29.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0
diff --git a/go.sum b/go.sum
index 917270fd5a6..9c4d20dbad6 100644
--- a/go.sum
+++ b/go.sum
@@ -3,8 +3,10 @@ dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/alecthomas/assert/v2 v2.7.0 h1:QtqSACNS3tF7oasA8CU6A6sXZSBDqnm7RfpLl9bZqbE=
github.com/alecthomas/assert/v2 v2.7.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
+github.com/alecthomas/chroma/v2 v2.2.0/go.mod h1:vf4zrexSH54oEjJ7EdB65tGNHmH3pGZmVkgTP5RHvAs=
github.com/alecthomas/chroma/v2 v2.14.0 h1:R3+wzpnUArGcQz7fCETQBzO5n9IMNi13iIs46aU4V9E=
github.com/alecthomas/chroma/v2 v2.14.0/go.mod h1:QolEbTfmUHIMVpBqxeDnNBj2uoeI4EbYP4i6n68SG4I=
+github.com/alecthomas/repr v0.0.0-20220113201626-b1b626ac65ae/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8=
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
@@ -51,6 +53,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeC
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
+github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
+github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
@@ -131,6 +135,8 @@ github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
+github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b h1:oV47z+jotrLVvhiLRNzACVe7/qZ8DcRlMlDucR/FARo=
+github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b/go.mod h1:JprPCeMgYyLKJoAy9nxpVScm7NwFSwpibdrUKm4kcw0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -142,8 +148,11 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
+github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/goldmark v1.7.2 h1:NjGd7lO7zrUn/A7eKwn5PEOt4ONYGqpxSEeZuduvgxc=
github.com/yuin/goldmark v1.7.2/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
+github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ=
+github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I=
github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U=
github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw=
diff --git a/misc/autocounterd/go.mod b/misc/autocounterd/go.mod
index 30a6f23b458..82e8b0081ce 100644
--- a/misc/autocounterd/go.mod
+++ b/misc/autocounterd/go.mod
@@ -26,6 +26,7 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rs/xid v1.6.0 // indirect
+ github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b // indirect
github.com/stretchr/testify v1.9.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/zondax/hid v0.9.2 // indirect
@@ -43,6 +44,7 @@ require (
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
golang.org/x/mod v0.20.0 // indirect
golang.org/x/net v0.28.0 // indirect
+ golang.org/x/sync v0.8.0 // indirect
golang.org/x/sys v0.24.0 // indirect
golang.org/x/term v0.23.0 // indirect
golang.org/x/text v0.17.0 // indirect
diff --git a/misc/autocounterd/go.sum b/misc/autocounterd/go.sum
index 28959bf214e..bd88dd5d08c 100644
--- a/misc/autocounterd/go.sum
+++ b/misc/autocounterd/go.sum
@@ -120,6 +120,8 @@ github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
+github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b h1:oV47z+jotrLVvhiLRNzACVe7/qZ8DcRlMlDucR/FARo=
+github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b/go.mod h1:JprPCeMgYyLKJoAy9nxpVScm7NwFSwpibdrUKm4kcw0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -155,10 +157,6 @@ go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeX
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
-go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
-go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U=
-go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -176,6 +174,8 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/misc/genstd/util.go b/misc/genstd/util.go
index 025fe4b673e..13e90836f36 100644
--- a/misc/genstd/util.go
+++ b/misc/genstd/util.go
@@ -70,7 +70,8 @@ func findDirs() (gitRoot string, relPath string, err error) {
}
p := wd
for {
- if s, e := os.Stat(filepath.Join(p, ".git")); e == nil && s.IsDir() {
+ // .git is normally a directory, or a file in case of a git worktree.
+ if _, e := os.Stat(filepath.Join(p, ".git")); e == nil {
// make relPath relative to the git root
rp := strings.TrimPrefix(wd, p+string(filepath.Separator))
// normalize separator to /
diff --git a/misc/loop/go.mod b/misc/loop/go.mod
index af7783e57bb..fc2c5daac59 100644
--- a/misc/loop/go.mod
+++ b/misc/loop/go.mod
@@ -52,6 +52,7 @@ require (
github.com/prometheus/procfs v0.11.1 // indirect
github.com/rs/cors v1.11.1 // indirect
github.com/rs/xid v1.6.0 // indirect
+ github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b // indirect
github.com/stretchr/testify v1.9.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
go.etcd.io/bbolt v1.3.11 // indirect
@@ -68,6 +69,7 @@ require (
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
golang.org/x/mod v0.20.0 // indirect
golang.org/x/net v0.28.0 // indirect
+ golang.org/x/sync v0.8.0 // indirect
golang.org/x/sys v0.24.0 // indirect
golang.org/x/term v0.23.0 // indirect
golang.org/x/text v0.17.0 // indirect
diff --git a/misc/loop/go.sum b/misc/loop/go.sum
index 0d235f2cfb1..1ed786fb82d 100644
--- a/misc/loop/go.sum
+++ b/misc/loop/go.sum
@@ -162,6 +162,8 @@ github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
+github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b h1:oV47z+jotrLVvhiLRNzACVe7/qZ8DcRlMlDucR/FARo=
+github.com/sig-0/insertion-queue v0.0.0-20241004125609-6b3ca841346b/go.mod h1:JprPCeMgYyLKJoAy9nxpVScm7NwFSwpibdrUKm4kcw0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
diff --git a/tm2/pkg/bft/blockchain/pool.go b/tm2/pkg/bft/blockchain/pool.go
index b610a0c0e7a..5fdd23af910 100644
--- a/tm2/pkg/bft/blockchain/pool.go
+++ b/tm2/pkg/bft/blockchain/pool.go
@@ -12,7 +12,7 @@ import (
"github.com/gnolang/gno/tm2/pkg/bft/types"
"github.com/gnolang/gno/tm2/pkg/flow"
"github.com/gnolang/gno/tm2/pkg/log"
- "github.com/gnolang/gno/tm2/pkg/p2p"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/gnolang/gno/tm2/pkg/service"
)
@@ -69,7 +69,7 @@ type BlockPool struct {
requesters map[int64]*bpRequester
height int64 // the lowest key in requesters.
// peers
- peers map[p2p.ID]*bpPeer
+ peers map[p2pTypes.ID]*bpPeer
maxPeerHeight int64 // the biggest reported height
// atomic
@@ -83,7 +83,7 @@ type BlockPool struct {
// requests and errors will be sent to requestsCh and errorsCh accordingly.
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
bp := &BlockPool{
- peers: make(map[p2p.ID]*bpPeer),
+ peers: make(map[p2pTypes.ID]*bpPeer),
requesters: make(map[int64]*bpRequester),
height: start,
@@ -226,13 +226,13 @@ func (pool *BlockPool) PopRequest() {
// RedoRequest invalidates the block at pool.height,
// Remove the peer and redo request from others.
// Returns the ID of the removed peer.
-func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
+func (pool *BlockPool) RedoRequest(height int64) p2pTypes.ID {
pool.mtx.Lock()
defer pool.mtx.Unlock()
request := pool.requesters[height]
peerID := request.getPeerID()
- if peerID != p2p.ID("") {
+ if peerID != p2pTypes.ID("") {
// RemovePeer will redo all requesters associated with this peer.
pool.removePeer(peerID)
}
@@ -241,7 +241,7 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
// TODO: ensure that blocks come in order for each peer.
-func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) {
+func (pool *BlockPool) AddBlock(peerID p2pTypes.ID, block *types.Block, blockSize int) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@@ -278,7 +278,7 @@ func (pool *BlockPool) MaxPeerHeight() int64 {
}
// SetPeerHeight sets the peer's alleged blockchain height.
-func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) {
+func (pool *BlockPool) SetPeerHeight(peerID p2pTypes.ID, height int64) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@@ -298,14 +298,14 @@ func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) {
// RemovePeer removes the peer with peerID from the pool. If there's no peer
// with peerID, function is a no-op.
-func (pool *BlockPool) RemovePeer(peerID p2p.ID) {
+func (pool *BlockPool) RemovePeer(peerID p2pTypes.ID) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
pool.removePeer(peerID)
}
-func (pool *BlockPool) removePeer(peerID p2p.ID) {
+func (pool *BlockPool) removePeer(peerID p2pTypes.ID) {
for _, requester := range pool.requesters {
if requester.getPeerID() == peerID {
requester.redo(peerID)
@@ -386,14 +386,14 @@ func (pool *BlockPool) requestersLen() int64 {
return int64(len(pool.requesters))
}
-func (pool *BlockPool) sendRequest(height int64, peerID p2p.ID) {
+func (pool *BlockPool) sendRequest(height int64, peerID p2pTypes.ID) {
if !pool.IsRunning() {
return
}
pool.requestsCh <- BlockRequest{height, peerID}
}
-func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
+func (pool *BlockPool) sendError(err error, peerID p2pTypes.ID) {
if !pool.IsRunning() {
return
}
@@ -424,7 +424,7 @@ func (pool *BlockPool) debug() string {
type bpPeer struct {
pool *BlockPool
- id p2p.ID
+ id p2pTypes.ID
recvMonitor *flow.Monitor
height int64
@@ -435,7 +435,7 @@ type bpPeer struct {
logger *slog.Logger
}
-func newBPPeer(pool *BlockPool, peerID p2p.ID, height int64) *bpPeer {
+func newBPPeer(pool *BlockPool, peerID p2pTypes.ID, height int64) *bpPeer {
peer := &bpPeer{
pool: pool,
id: peerID,
@@ -499,10 +499,10 @@ type bpRequester struct {
pool *BlockPool
height int64
gotBlockCh chan struct{}
- redoCh chan p2p.ID // redo may send multitime, add peerId to identify repeat
+ redoCh chan p2pTypes.ID // redo may send multitime, add peerId to identify repeat
mtx sync.Mutex
- peerID p2p.ID
+ peerID p2pTypes.ID
block *types.Block
}
@@ -511,7 +511,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
pool: pool,
height: height,
gotBlockCh: make(chan struct{}, 1),
- redoCh: make(chan p2p.ID, 1),
+ redoCh: make(chan p2pTypes.ID, 1),
peerID: "",
block: nil,
@@ -526,7 +526,7 @@ func (bpr *bpRequester) OnStart() error {
}
// Returns true if the peer matches and block doesn't already exist.
-func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool {
+func (bpr *bpRequester) setBlock(block *types.Block, peerID p2pTypes.ID) bool {
bpr.mtx.Lock()
if bpr.block != nil || bpr.peerID != peerID {
bpr.mtx.Unlock()
@@ -548,7 +548,7 @@ func (bpr *bpRequester) getBlock() *types.Block {
return bpr.block
}
-func (bpr *bpRequester) getPeerID() p2p.ID {
+func (bpr *bpRequester) getPeerID() p2pTypes.ID {
bpr.mtx.Lock()
defer bpr.mtx.Unlock()
return bpr.peerID
@@ -570,7 +570,7 @@ func (bpr *bpRequester) reset() {
// Tells bpRequester to pick another peer and try again.
// NOTE: Nonblocking, and does nothing if another redo
// was already requested.
-func (bpr *bpRequester) redo(peerID p2p.ID) {
+func (bpr *bpRequester) redo(peerID p2pTypes.ID) {
select {
case bpr.redoCh <- peerID:
default:
@@ -631,5 +631,5 @@ OUTER_LOOP:
// delivering the block
type BlockRequest struct {
Height int64
- PeerID p2p.ID
+ PeerID p2pTypes.ID
}
diff --git a/tm2/pkg/bft/blockchain/pool_test.go b/tm2/pkg/bft/blockchain/pool_test.go
index a4d5636d5e3..ee58d672e75 100644
--- a/tm2/pkg/bft/blockchain/pool_test.go
+++ b/tm2/pkg/bft/blockchain/pool_test.go
@@ -5,12 +5,12 @@ import (
"testing"
"time"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/gnolang/gno/tm2/pkg/bft/types"
"github.com/gnolang/gno/tm2/pkg/log"
- "github.com/gnolang/gno/tm2/pkg/p2p"
"github.com/gnolang/gno/tm2/pkg/random"
)
@@ -19,7 +19,7 @@ func init() {
}
type testPeer struct {
- id p2p.ID
+ id p2pTypes.ID
height int64
inputChan chan inputData // make sure each peer's data is sequential
}
@@ -47,7 +47,7 @@ func (p testPeer) simulateInput(input inputData) {
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
}
-type testPeers map[p2p.ID]testPeer
+type testPeers map[p2pTypes.ID]testPeer
func (ps testPeers) start() {
for _, v := range ps {
@@ -64,7 +64,7 @@ func (ps testPeers) stop() {
func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
peers := make(testPeers, numPeers)
for i := 0; i < numPeers; i++ {
- peerID := p2p.ID(random.RandStr(12))
+ peerID := p2pTypes.ID(random.RandStr(12))
height := minHeight + random.RandInt63n(maxHeight-minHeight)
peers[peerID] = testPeer{peerID, height, make(chan inputData, 10)}
}
@@ -172,7 +172,7 @@ func TestBlockPoolTimeout(t *testing.T) {
// Pull from channels
counter := 0
- timedOut := map[p2p.ID]struct{}{}
+ timedOut := map[p2pTypes.ID]struct{}{}
for {
select {
case err := <-errorsCh:
@@ -195,7 +195,7 @@ func TestBlockPoolRemovePeer(t *testing.T) {
peers := make(testPeers, 10)
for i := 0; i < 10; i++ {
- peerID := p2p.ID(fmt.Sprintf("%d", i+1))
+ peerID := p2pTypes.ID(fmt.Sprintf("%d", i+1))
height := int64(i + 1)
peers[peerID] = testPeer{peerID, height, make(chan inputData)}
}
@@ -215,10 +215,10 @@ func TestBlockPoolRemovePeer(t *testing.T) {
assert.EqualValues(t, 10, pool.MaxPeerHeight())
// remove not-existing peer
- assert.NotPanics(t, func() { pool.RemovePeer(p2p.ID("Superman")) })
+ assert.NotPanics(t, func() { pool.RemovePeer(p2pTypes.ID("Superman")) })
// remove peer with biggest height
- pool.RemovePeer(p2p.ID("10"))
+ pool.RemovePeer(p2pTypes.ID("10"))
assert.EqualValues(t, 9, pool.MaxPeerHeight())
// remove all peers
diff --git a/tm2/pkg/bft/blockchain/reactor.go b/tm2/pkg/bft/blockchain/reactor.go
index 09e1225b717..417e96ad383 100644
--- a/tm2/pkg/bft/blockchain/reactor.go
+++ b/tm2/pkg/bft/blockchain/reactor.go
@@ -12,6 +12,7 @@ import (
"github.com/gnolang/gno/tm2/pkg/bft/store"
"github.com/gnolang/gno/tm2/pkg/bft/types"
"github.com/gnolang/gno/tm2/pkg/p2p"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
)
const (
@@ -37,15 +38,14 @@ const (
bcBlockResponseMessageFieldKeySize
)
-type consensusReactor interface {
- // for when we switch from blockchain reactor and fast sync to
- // the consensus machine
- SwitchToConsensus(sm.State, int)
-}
+// SwitchToConsensusFn is a callback method that is meant to
+// stop the syncing process as soon as the latest known height is reached,
+// and start the consensus process for the validator node
+type SwitchToConsensusFn func(sm.State, int)
type peerError struct {
err error
- peerID p2p.ID
+ peerID p2pTypes.ID
}
func (e peerError) Error() string {
@@ -66,11 +66,17 @@ type BlockchainReactor struct {
requestsCh <-chan BlockRequest
errorsCh <-chan peerError
+
+ switchToConsensusFn SwitchToConsensusFn
}
// NewBlockchainReactor returns new reactor instance.
-func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
+func NewBlockchainReactor(
+ state sm.State,
+ blockExec *sm.BlockExecutor,
+ store *store.BlockStore,
fastSync bool,
+ switchToConsensusFn SwitchToConsensusFn,
) *BlockchainReactor {
if state.LastBlockHeight != store.Height() {
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
@@ -89,13 +95,14 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *st
)
bcR := &BlockchainReactor{
- initialState: state,
- blockExec: blockExec,
- store: store,
- pool: pool,
- fastSync: fastSync,
- requestsCh: requestsCh,
- errorsCh: errorsCh,
+ initialState: state,
+ blockExec: blockExec,
+ store: store,
+ pool: pool,
+ fastSync: fastSync,
+ requestsCh: requestsCh,
+ errorsCh: errorsCh,
+ switchToConsensusFn: switchToConsensusFn,
}
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
return bcR
@@ -138,7 +145,7 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
}
// AddPeer implements Reactor by sending our state to peer.
-func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
+func (bcR *BlockchainReactor) AddPeer(peer p2p.PeerConn) {
msgBytes := amino.MustMarshalAny(&bcStatusResponseMessage{bcR.store.Height()})
peer.Send(BlockchainChannel, msgBytes)
// it's OK if send fails. will try later in poolRoutine
@@ -148,7 +155,7 @@ func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
}
// RemovePeer implements Reactor by removing peer from the pool.
-func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
+func (bcR *BlockchainReactor) RemovePeer(peer p2p.PeerConn, reason interface{}) {
bcR.pool.RemovePeer(peer.ID())
}
@@ -157,7 +164,7 @@ func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// According to the Tendermint spec, if all nodes are honest,
// no node should be requesting for a block that's non-existent.
func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage,
- src p2p.Peer,
+ src p2p.PeerConn,
) (queued bool) {
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
@@ -172,7 +179,7 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage,
}
// Receive implements Reactor by handling 4 types of messages (look below).
-func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
+func (bcR *BlockchainReactor) Receive(chID byte, src p2p.PeerConn, msgBytes []byte) {
msg, err := decodeMsg(msgBytes)
if err != nil {
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
@@ -257,16 +264,13 @@ FOR_LOOP:
select {
case <-switchToConsensusTicker.C:
height, numPending, lenRequesters := bcR.pool.GetStatus()
- outbound, inbound, _ := bcR.Switch.NumPeers()
- bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters,
- "outbound", outbound, "inbound", inbound)
+
+ bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters)
if bcR.pool.IsCaughtUp() {
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
bcR.pool.Stop()
- conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
- if ok {
- conR.SwitchToConsensus(state, blocksSynced)
- }
+
+ bcR.switchToConsensusFn(state, blocksSynced)
// else {
// should only happen during testing
// }
diff --git a/tm2/pkg/bft/blockchain/reactor_test.go b/tm2/pkg/bft/blockchain/reactor_test.go
index a40dbc6376b..1bc2df59055 100644
--- a/tm2/pkg/bft/blockchain/reactor_test.go
+++ b/tm2/pkg/bft/blockchain/reactor_test.go
@@ -1,14 +1,13 @@
package blockchain
import (
+ "context"
"log/slog"
"os"
"sort"
"testing"
"time"
- "github.com/stretchr/testify/assert"
-
abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types"
"github.com/gnolang/gno/tm2/pkg/bft/appconn"
cfg "github.com/gnolang/gno/tm2/pkg/bft/config"
@@ -20,9 +19,12 @@ import (
tmtime "github.com/gnolang/gno/tm2/pkg/bft/types/time"
"github.com/gnolang/gno/tm2/pkg/db/memdb"
"github.com/gnolang/gno/tm2/pkg/errors"
+ p2pTesting "github.com/gnolang/gno/tm2/pkg/internal/p2p"
"github.com/gnolang/gno/tm2/pkg/log"
"github.com/gnolang/gno/tm2/pkg/p2p"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/gnolang/gno/tm2/pkg/testutils"
+ "github.com/stretchr/testify/assert"
)
var config *cfg.Config
@@ -110,7 +112,7 @@ func newBlockchainReactor(logger *slog.Logger, genDoc *types.GenesisDoc, privVal
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
}
- bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
+ bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync, nil)
bcReactor.SetLogger(logger.With("module", "blockchain"))
return BlockchainReactorPair{bcReactor, proxyApp}
@@ -125,15 +127,35 @@ func TestNoBlockResponse(t *testing.T) {
maxBlockHeight := int64(65)
- reactorPairs := make([]BlockchainReactorPair, 2)
+ var (
+ reactorPairs = make([]BlockchainReactorPair, 2)
+ options = make(map[int][]p2p.SwitchOption)
+ )
- reactorPairs[0] = newBlockchainReactor(log.NewTestingLogger(t), genDoc, privVals, maxBlockHeight)
- reactorPairs[1] = newBlockchainReactor(log.NewTestingLogger(t), genDoc, privVals, 0)
+ for i := range reactorPairs {
+ height := int64(0)
+ if i == 0 {
+ height = maxBlockHeight
+ }
- p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
- s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
- return s
- }, p2p.Connect2Switches)
+ reactorPairs[i] = newBlockchainReactor(log.NewTestingLogger(t), genDoc, privVals, height)
+
+ options[i] = []p2p.SwitchOption{
+ p2p.WithReactor("BLOCKCHAIN", reactorPairs[i].reactor),
+ }
+ }
+
+ ctx, cancelFn := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancelFn()
+
+ testingCfg := p2pTesting.TestingConfig{
+ Count: 2,
+ P2PCfg: config.P2P,
+ SwitchOptions: options,
+ Channels: []byte{BlockchainChannel},
+ }
+
+ p2pTesting.MakeConnectedPeers(t, ctx, testingCfg)
defer func() {
for _, r := range reactorPairs {
@@ -194,17 +216,35 @@ func TestFlappyBadBlockStopsPeer(t *testing.T) {
otherChain.app.Stop()
}()
- reactorPairs := make([]BlockchainReactorPair, 4)
+ var (
+ reactorPairs = make([]BlockchainReactorPair, 4)
+ options = make(map[int][]p2p.SwitchOption)
+ )
+
+ for i := range reactorPairs {
+ height := int64(0)
+ if i == 0 {
+ height = maxBlockHeight
+ }
+
+ reactorPairs[i] = newBlockchainReactor(log.NewNoopLogger(), genDoc, privVals, height)
- reactorPairs[0] = newBlockchainReactor(log.NewNoopLogger(), genDoc, privVals, maxBlockHeight)
- reactorPairs[1] = newBlockchainReactor(log.NewNoopLogger(), genDoc, privVals, 0)
- reactorPairs[2] = newBlockchainReactor(log.NewNoopLogger(), genDoc, privVals, 0)
- reactorPairs[3] = newBlockchainReactor(log.NewNoopLogger(), genDoc, privVals, 0)
+ options[i] = []p2p.SwitchOption{
+ p2p.WithReactor("BLOCKCHAIN", reactorPairs[i].reactor),
+ }
+ }
- switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch {
- s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
- return s
- }, p2p.Connect2Switches)
+ ctx, cancelFn := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancelFn()
+
+ testingCfg := p2pTesting.TestingConfig{
+ Count: 4,
+ P2PCfg: config.P2P,
+ SwitchOptions: options,
+ Channels: []byte{BlockchainChannel},
+ }
+
+ switches, transports := p2pTesting.MakeConnectedPeers(t, ctx, testingCfg)
defer func() {
for _, r := range reactorPairs {
@@ -222,7 +262,7 @@ func TestFlappyBadBlockStopsPeer(t *testing.T) {
}
// at this time, reactors[0-3] is the newest
- assert.Equal(t, 3, reactorPairs[1].reactor.Switch.Peers().Size())
+ assert.Equal(t, 3, len(reactorPairs[1].reactor.Switch.Peers().List()))
// mark reactorPairs[3] is an invalid peer
reactorPairs[3].reactor.store = otherChain.reactor.store
@@ -230,24 +270,41 @@ func TestFlappyBadBlockStopsPeer(t *testing.T) {
lastReactorPair := newBlockchainReactor(log.NewNoopLogger(), genDoc, privVals, 0)
reactorPairs = append(reactorPairs, lastReactorPair)
- switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
- s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].reactor)
- return s
- }, p2p.Connect2Switches)...)
+ persistentPeers := make([]*p2pTypes.NetAddress, 0, len(transports))
- for i := 0; i < len(reactorPairs)-1; i++ {
- p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
+ for _, tr := range transports {
+ addr := tr.NetAddress()
+ persistentPeers = append(persistentPeers, &addr)
}
+ for i, opt := range options {
+ opt = append(opt, p2p.WithPersistentPeers(persistentPeers))
+
+ options[i] = opt
+ }
+
+ ctx, cancelFn = context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancelFn()
+
+ testingCfg = p2pTesting.TestingConfig{
+ Count: 1,
+ P2PCfg: config.P2P,
+ SwitchOptions: options,
+ Channels: []byte{BlockchainChannel},
+ }
+
+ sw, _ := p2pTesting.MakeConnectedPeers(t, ctx, testingCfg)
+ switches = append(switches, sw...)
+
for {
- if lastReactorPair.reactor.pool.IsCaughtUp() || lastReactorPair.reactor.Switch.Peers().Size() == 0 {
+ if lastReactorPair.reactor.pool.IsCaughtUp() || len(lastReactorPair.reactor.Switch.Peers().List()) == 0 {
break
}
time.Sleep(1 * time.Second)
}
- assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1)
+ assert.True(t, len(lastReactorPair.reactor.Switch.Peers().List()) < len(reactorPairs)-1)
}
func TestBcBlockRequestMessageValidateBasic(t *testing.T) {
diff --git a/tm2/pkg/bft/config/config.go b/tm2/pkg/bft/config/config.go
index f9e9a0cd899..1a01686f4bd 100644
--- a/tm2/pkg/bft/config/config.go
+++ b/tm2/pkg/bft/config/config.go
@@ -6,6 +6,7 @@ import (
"path/filepath"
"regexp"
"slices"
+ "time"
"dario.cat/mergo"
abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types"
@@ -17,6 +18,7 @@ import (
"github.com/gnolang/gno/tm2/pkg/errors"
osm "github.com/gnolang/gno/tm2/pkg/os"
p2p "github.com/gnolang/gno/tm2/pkg/p2p/config"
+ sdk "github.com/gnolang/gno/tm2/pkg/sdk/config"
telemetry "github.com/gnolang/gno/tm2/pkg/telemetry/config"
)
@@ -54,6 +56,7 @@ type Config struct {
Consensus *cns.ConsensusConfig `json:"consensus" toml:"consensus" comment:"##### consensus configuration options #####"`
TxEventStore *eventstore.Config `json:"tx_event_store" toml:"tx_event_store" comment:"##### event store #####"`
Telemetry *telemetry.Config `json:"telemetry" toml:"telemetry" comment:"##### node telemetry #####"`
+ Application *sdk.AppConfig `json:"application" toml:"application" comment:"##### app settings #####"`
}
// DefaultConfig returns a default configuration for a Tendermint node
@@ -66,6 +69,7 @@ func DefaultConfig() *Config {
Consensus: cns.DefaultConsensusConfig(),
TxEventStore: eventstore.DefaultEventStoreConfig(),
Telemetry: telemetry.DefaultTelemetryConfig(),
+ Application: sdk.DefaultAppConfig(),
}
}
@@ -163,16 +167,26 @@ func LoadOrMakeConfigWithOptions(root string, opts ...Option) (*Config, error) {
return cfg, nil
}
+// testP2PConfig returns a configuration for testing the peer-to-peer layer
+func testP2PConfig() *p2p.P2PConfig {
+ cfg := p2p.DefaultP2PConfig()
+ cfg.ListenAddress = "tcp://0.0.0.0:26656"
+ cfg.FlushThrottleTimeout = 10 * time.Millisecond
+
+ return cfg
+}
+
// TestConfig returns a configuration that can be used for testing
func TestConfig() *Config {
return &Config{
BaseConfig: testBaseConfig(),
RPC: rpc.TestRPCConfig(),
- P2P: p2p.TestP2PConfig(),
+ P2P: testP2PConfig(),
Mempool: mem.TestMempoolConfig(),
Consensus: cns.TestConsensusConfig(),
TxEventStore: eventstore.DefaultEventStoreConfig(),
Telemetry: telemetry.DefaultTelemetryConfig(),
+ Application: sdk.DefaultAppConfig(),
}
}
@@ -228,6 +242,9 @@ func (cfg *Config) ValidateBasic() error {
if err := cfg.Consensus.ValidateBasic(); err != nil {
return errors.Wrap(err, "Error in [consensus] section")
}
+ if err := cfg.Application.ValidateBasic(); err != nil {
+ return errors.Wrap(err, "Error in [application] section")
+ }
return nil
}
@@ -318,10 +335,6 @@ type BaseConfig struct {
// TCP or UNIX socket address for the profiling server to listen on
ProfListenAddress string `toml:"prof_laddr" comment:"TCP or UNIX socket address for the profiling server to listen on"`
-
- // If true, query the ABCI app on connecting to a new peer
- // so the app can decide if we should keep the connection or not
- FilterPeers bool `toml:"filter_peers" comment:"If true, query the ABCI app on connecting to a new peer\n so the app can decide if we should keep the connection or not"` // false
}
// DefaultBaseConfig returns a default base configuration for a Tendermint node
@@ -335,7 +348,6 @@ func DefaultBaseConfig() BaseConfig {
ABCI: SocketABCI,
ProfListenAddress: "",
FastSyncMode: true,
- FilterPeers: false,
DBBackend: db.GoLevelDBBackend.String(),
DBPath: DefaultDBDir,
}
@@ -372,6 +384,10 @@ func (cfg BaseConfig) NodeKeyFile() string {
// DBDir returns the full path to the database directory
func (cfg BaseConfig) DBDir() string {
+ if filepath.IsAbs(cfg.DBPath) {
+ return cfg.DBPath
+ }
+
return filepath.Join(cfg.RootDir, cfg.DBPath)
}
diff --git a/tm2/pkg/bft/config/config_test.go b/tm2/pkg/bft/config/config_test.go
index 77f7c0d5e16..ea37e6e1763 100644
--- a/tm2/pkg/bft/config/config_test.go
+++ b/tm2/pkg/bft/config/config_test.go
@@ -185,3 +185,28 @@ func TestConfig_ValidateBaseConfig(t *testing.T) {
assert.ErrorIs(t, c.BaseConfig.ValidateBasic(), errInvalidProfListenAddress)
})
}
+
+func TestConfig_DBDir(t *testing.T) {
+ t.Parallel()
+
+ t.Run("DB path is absolute", func(t *testing.T) {
+ t.Parallel()
+
+ c := DefaultConfig()
+ c.RootDir = "/root"
+ c.DBPath = "/abs/path"
+
+ assert.Equal(t, c.DBPath, c.DBDir())
+ assert.NotEqual(t, filepath.Join(c.RootDir, c.DBPath), c.DBDir())
+ })
+
+ t.Run("DB path is relative", func(t *testing.T) {
+ t.Parallel()
+
+ c := DefaultConfig()
+ c.RootDir = "/root"
+ c.DBPath = "relative/path"
+
+ assert.Equal(t, filepath.Join(c.RootDir, c.DBPath), c.DBDir())
+ })
+}
diff --git a/tm2/pkg/bft/consensus/reactor.go b/tm2/pkg/bft/consensus/reactor.go
index aee695114f8..f39f012b289 100644
--- a/tm2/pkg/bft/consensus/reactor.go
+++ b/tm2/pkg/bft/consensus/reactor.go
@@ -35,7 +35,7 @@ const (
// ConsensusReactor defines a reactor for the consensus service.
type ConsensusReactor struct {
- p2p.BaseReactor // BaseService + p2p.Switch
+ p2p.BaseReactor // BaseService + p2p.MultiplexSwitch
conS *ConsensusState
@@ -157,7 +157,7 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
}
// InitPeer implements Reactor by creating a state for the peer.
-func (conR *ConsensusReactor) InitPeer(peer p2p.Peer) p2p.Peer {
+func (conR *ConsensusReactor) InitPeer(peer p2p.PeerConn) p2p.PeerConn {
peerState := NewPeerState(peer).SetLogger(conR.Logger)
peer.Set(types.PeerStateKey, peerState)
return peer
@@ -165,7 +165,7 @@ func (conR *ConsensusReactor) InitPeer(peer p2p.Peer) p2p.Peer {
// AddPeer implements Reactor by spawning multiple gossiping goroutines for the
// peer.
-func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) {
+func (conR *ConsensusReactor) AddPeer(peer p2p.PeerConn) {
if !conR.IsRunning() {
return
}
@@ -187,7 +187,7 @@ func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) {
}
// RemovePeer is a noop.
-func (conR *ConsensusReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
+func (conR *ConsensusReactor) RemovePeer(peer p2p.PeerConn, reason interface{}) {
if !conR.IsRunning() {
return
}
@@ -205,7 +205,7 @@ func (conR *ConsensusReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// Peer state updates can happen in parallel, but processing of
// proposals, block parts, and votes are ordered by the receiveRoutine
// NOTE: blocks on consensus state for proposals, block parts, and votes
-func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
+func (conR *ConsensusReactor) Receive(chID byte, src p2p.PeerConn, msgBytes []byte) {
if !conR.IsRunning() {
conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes)
return
@@ -417,7 +417,7 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
conR.Switch.Broadcast(StateChannel, amino.MustMarshalAny(msg))
/*
// TODO: Make this broadcast more selective.
- for _, peer := range conR.Switch.Peers().List() {
+ for _, peer := range conR.MultiplexSwitch.Peers().List() {
ps, ok := peer.Get(PeerStateKey).(*PeerState)
if !ok {
panic(fmt.Sprintf("Peer %v has no state", peer))
@@ -446,13 +446,13 @@ func makeRoundStepMessage(event cstypes.EventNewRoundStep) (nrsMsg *NewRoundStep
return
}
-func (conR *ConsensusReactor) sendNewRoundStepMessage(peer p2p.Peer) {
+func (conR *ConsensusReactor) sendNewRoundStepMessage(peer p2p.PeerConn) {
rs := conR.conS.GetRoundState()
nrsMsg := makeRoundStepMessage(rs.EventNewRoundStep())
peer.Send(StateChannel, amino.MustMarshalAny(nrsMsg))
}
-func (conR *ConsensusReactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) {
+func (conR *ConsensusReactor) gossipDataRoutine(peer p2p.PeerConn, ps *PeerState) {
logger := conR.Logger.With("peer", peer)
OUTER_LOOP:
@@ -547,7 +547,7 @@ OUTER_LOOP:
}
func (conR *ConsensusReactor) gossipDataForCatchup(logger *slog.Logger, rs *cstypes.RoundState,
- prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer,
+ prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.PeerConn,
) {
if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
// Ensure that the peer's PartSetHeader is correct
@@ -589,7 +589,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger *slog.Logger, rs *csty
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
}
-func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
+func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.PeerConn, ps *PeerState) {
logger := conR.Logger.With("peer", peer)
// Simple hack to throttle logs upon sleep.
@@ -715,7 +715,7 @@ func (conR *ConsensusReactor) gossipVotesForHeight(logger *slog.Logger, rs *csty
// NOTE: `queryMaj23Routine` has a simple crude design since it only comes
// into play for liveness when there's a signature DDoS attack happening.
-func (conR *ConsensusReactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) {
+func (conR *ConsensusReactor) queryMaj23Routine(peer p2p.PeerConn, ps *PeerState) {
logger := conR.Logger.With("peer", peer)
OUTER_LOOP:
@@ -826,12 +826,12 @@ func (conR *ConsensusReactor) peerStatsRoutine() {
case *VoteMessage:
if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 {
// TODO: peer metrics.
- // conR.Switch.MarkPeerAsGood(peer)
+ // conR.MultiplexSwitch.MarkPeerAsGood(peer)
}
case *BlockPartMessage:
if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 {
// TODO: peer metrics.
- // conR.Switch.MarkPeerAsGood(peer)
+ // conR.MultiplexSwitch.MarkPeerAsGood(peer)
}
}
case <-conR.conS.Quit():
@@ -878,7 +878,7 @@ var (
// NOTE: PeerStateExposed gets dumped with rpc/core/consensus.go.
// Be mindful of what you Expose.
type PeerState struct {
- peer p2p.Peer
+ peer p2p.PeerConn
logger *slog.Logger
mtx sync.Mutex // NOTE: Modify below using setters, never directly.
@@ -886,7 +886,7 @@ type PeerState struct {
}
// NewPeerState returns a new PeerState for the given Peer
-func NewPeerState(peer p2p.Peer) *PeerState {
+func NewPeerState(peer p2p.PeerConn) *PeerState {
return &PeerState{
peer: peer,
logger: log.NewNoopLogger(),
diff --git a/tm2/pkg/bft/consensus/reactor_test.go b/tm2/pkg/bft/consensus/reactor_test.go
index 42f944b7481..0e1d6249783 100644
--- a/tm2/pkg/bft/consensus/reactor_test.go
+++ b/tm2/pkg/bft/consensus/reactor_test.go
@@ -1,14 +1,13 @@
package consensus
import (
+ "context"
"fmt"
"log/slog"
"sync"
"testing"
"time"
- "github.com/stretchr/testify/assert"
-
"github.com/gnolang/gno/tm2/pkg/amino"
"github.com/gnolang/gno/tm2/pkg/bft/abci/example/kvstore"
cfg "github.com/gnolang/gno/tm2/pkg/bft/config"
@@ -18,27 +17,39 @@ import (
"github.com/gnolang/gno/tm2/pkg/bitarray"
"github.com/gnolang/gno/tm2/pkg/crypto/tmhash"
"github.com/gnolang/gno/tm2/pkg/events"
+ p2pTesting "github.com/gnolang/gno/tm2/pkg/internal/p2p"
"github.com/gnolang/gno/tm2/pkg/log"
osm "github.com/gnolang/gno/tm2/pkg/os"
"github.com/gnolang/gno/tm2/pkg/p2p"
- "github.com/gnolang/gno/tm2/pkg/p2p/mock"
"github.com/gnolang/gno/tm2/pkg/testutils"
+ "github.com/stretchr/testify/assert"
)
// ----------------------------------------------
// in-process testnets
-func startConsensusNet(css []*ConsensusState, n int) ([]*ConsensusReactor, []<-chan events.Event, []events.EventSwitch, []*p2p.Switch) {
+func startConsensusNet(
+ t *testing.T,
+ css []*ConsensusState,
+ n int,
+) ([]*ConsensusReactor, []<-chan events.Event, []events.EventSwitch, []*p2p.MultiplexSwitch) {
+ t.Helper()
+
reactors := make([]*ConsensusReactor, n)
blocksSubs := make([]<-chan events.Event, 0)
eventSwitches := make([]events.EventSwitch, n)
- p2pSwitches := ([]*p2p.Switch)(nil)
+ p2pSwitches := ([]*p2p.MultiplexSwitch)(nil)
+ options := make(map[int][]p2p.SwitchOption)
for i := 0; i < n; i++ {
/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
if err != nil { t.Fatal(err)}*/
reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states
reactors[i].SetLogger(css[i].Logger)
+ options[i] = []p2p.SwitchOption{
+ p2p.WithReactor("CONSENSUS", reactors[i]),
+ }
+
// evsw is already started with the cs
eventSwitches[i] = css[i].evsw
reactors[i].SetEventSwitch(eventSwitches[i])
@@ -51,11 +62,22 @@ func startConsensusNet(css []*ConsensusState, n int) ([]*ConsensusReactor, []<-c
}
}
// make connected switches and start all reactors
- p2pSwitches = p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
- s.AddReactor("CONSENSUS", reactors[i])
- s.SetLogger(reactors[i].conS.Logger.With("module", "p2p"))
- return s
- }, p2p.Connect2Switches)
+ ctx, cancelFn := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancelFn()
+
+ testingCfg := p2pTesting.TestingConfig{
+ P2PCfg: config.P2P,
+ Count: n,
+ SwitchOptions: options,
+ Channels: []byte{
+ StateChannel,
+ DataChannel,
+ VoteChannel,
+ VoteSetBitsChannel,
+ },
+ }
+
+ p2pSwitches, _ = p2pTesting.MakeConnectedPeers(t, ctx, testingCfg)
// now that everyone is connected, start the state machines
// If we started the state machines before everyone was connected,
@@ -68,11 +90,15 @@ func startConsensusNet(css []*ConsensusState, n int) ([]*ConsensusReactor, []<-c
return reactors, blocksSubs, eventSwitches, p2pSwitches
}
-func stopConsensusNet(logger *slog.Logger, reactors []*ConsensusReactor, eventSwitches []events.EventSwitch, p2pSwitches []*p2p.Switch) {
+func stopConsensusNet(
+ logger *slog.Logger,
+ reactors []*ConsensusReactor,
+ eventSwitches []events.EventSwitch,
+ p2pSwitches []*p2p.MultiplexSwitch,
+) {
logger.Info("stopConsensusNet", "n", len(reactors))
- for i, r := range reactors {
+ for i := range reactors {
logger.Info("stopConsensusNet: Stopping ConsensusReactor", "i", i)
- r.Switch.Stop()
}
for i, b := range eventSwitches {
logger.Info("stopConsensusNet: Stopping evsw", "i", i)
@@ -92,7 +118,7 @@ func TestReactorBasic(t *testing.T) {
N := 4
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
- reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(css, N)
+ reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(t, css, N)
defer stopConsensusNet(log.NewTestingLogger(t), reactors, eventSwitches, p2pSwitches)
// wait till everyone makes the first new block
timeoutWaitGroup(t, N, func(j int) {
@@ -112,7 +138,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
c.Consensus.CreateEmptyBlocks = false
})
defer cleanup()
- reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(css, N)
+ reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(t, css, N)
defer stopConsensusNet(log.NewTestingLogger(t), reactors, eventSwitches, p2pSwitches)
// send a tx
@@ -132,12 +158,12 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
N := 1
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
- reactors, _, eventSwitches, p2pSwitches := startConsensusNet(css, N)
+ reactors, _, eventSwitches, p2pSwitches := startConsensusNet(t, css, N)
defer stopConsensusNet(log.NewTestingLogger(t), reactors, eventSwitches, p2pSwitches)
var (
reactor = reactors[0]
- peer = mock.NewPeer(nil)
+ peer = p2pTesting.NewPeer(t)
msg = amino.MustMarshalAny(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType})
)
@@ -156,12 +182,12 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
N := 1
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
- reactors, _, eventSwitches, p2pSwitches := startConsensusNet(css, N)
+ reactors, _, eventSwitches, p2pSwitches := startConsensusNet(t, css, N)
defer stopConsensusNet(log.NewTestingLogger(t), reactors, eventSwitches, p2pSwitches)
var (
reactor = reactors[0]
- peer = mock.NewPeer(nil)
+ peer = p2pTesting.NewPeer(t)
msg = amino.MustMarshalAny(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType})
)
@@ -182,7 +208,7 @@ func TestFlappyReactorRecordsVotesAndBlockParts(t *testing.T) {
N := 4
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
- reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(css, N)
+ reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(t, css, N)
defer stopConsensusNet(log.NewTestingLogger(t), reactors, eventSwitches, p2pSwitches)
// wait till everyone makes the first new block
@@ -210,7 +236,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
css, cleanup := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
defer cleanup()
- reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(css, nVals)
+ reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(t, css, nVals)
defer stopConsensusNet(logger, reactors, eventSwitches, p2pSwitches)
// map of active validators
@@ -276,7 +302,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
logger := log.NewTestingLogger(t)
- reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(css, nPeers)
+ reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(t, css, nPeers)
defer stopConsensusNet(logger, reactors, eventSwitches, p2pSwitches)
// map of active validators
@@ -375,7 +401,7 @@ func TestReactorWithTimeoutCommit(t *testing.T) {
css[i].config.SkipTimeoutCommit = false
}
- reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(css, N-1)
+ reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(t, css, N-1)
defer stopConsensusNet(log.NewTestingLogger(t), reactors, eventSwitches, p2pSwitches)
// wait till everyone makes the first new block
diff --git a/tm2/pkg/bft/consensus/state.go b/tm2/pkg/bft/consensus/state.go
index 8b2653813e3..d9c78ec1bdf 100644
--- a/tm2/pkg/bft/consensus/state.go
+++ b/tm2/pkg/bft/consensus/state.go
@@ -23,7 +23,7 @@ import (
"github.com/gnolang/gno/tm2/pkg/errors"
"github.com/gnolang/gno/tm2/pkg/events"
osm "github.com/gnolang/gno/tm2/pkg/os"
- "github.com/gnolang/gno/tm2/pkg/p2p"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/gnolang/gno/tm2/pkg/service"
"github.com/gnolang/gno/tm2/pkg/telemetry"
"github.com/gnolang/gno/tm2/pkg/telemetry/metrics"
@@ -53,7 +53,7 @@ type newRoundStepInfo struct {
// msgs from the reactor which may update the state
type msgInfo struct {
Msg ConsensusMessage `json:"msg"`
- PeerID p2p.ID `json:"peer_key"`
+ PeerID p2pTypes.ID `json:"peer_key"`
}
// WAL message.
@@ -399,7 +399,7 @@ func (cs *ConsensusState) OpenWAL(walFile string) (walm.WAL, error) {
// TODO: should these return anything or let callers just use events?
// AddVote inputs a vote.
-func (cs *ConsensusState) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) {
+func (cs *ConsensusState) AddVote(vote *types.Vote, peerID p2pTypes.ID) (added bool, err error) {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""}
} else {
@@ -411,7 +411,7 @@ func (cs *ConsensusState) AddVote(vote *types.Vote, peerID p2p.ID) (added bool,
}
// SetProposal inputs a proposal.
-func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerID p2p.ID) error {
+func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerID p2pTypes.ID) error {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""}
} else {
@@ -423,7 +423,7 @@ func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerID p2p.ID) e
}
// AddProposalBlockPart inputs a part of the proposal block.
-func (cs *ConsensusState) AddProposalBlockPart(height int64, round int, part *types.Part, peerID p2p.ID) error {
+func (cs *ConsensusState) AddProposalBlockPart(height int64, round int, part *types.Part, peerID p2pTypes.ID) error {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""}
} else {
@@ -435,7 +435,7 @@ func (cs *ConsensusState) AddProposalBlockPart(height int64, round int, part *ty
}
// SetProposalAndBlock inputs the proposal and all block parts.
-func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerID p2p.ID) error {
+func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerID p2pTypes.ID) error {
if err := cs.SetProposal(proposal, peerID); err != nil {
return err
}
@@ -1444,7 +1444,7 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
// NOTE: block is not necessarily valid.
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block.
-func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (added bool, err error) {
+func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2pTypes.ID) (added bool, err error) {
height, round, part := msg.Height, msg.Round, msg.Part
// Blocks might be reused, so round mismatch is OK
@@ -1514,7 +1514,7 @@ func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p
}
// Attempt to add the vote. if its a duplicate signature, dupeout the validator
-func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) {
+func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2pTypes.ID) (bool, error) {
added, err := cs.addVote(vote, peerID)
if err != nil {
// If the vote height is off, we'll just ignore it,
@@ -1547,7 +1547,7 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, err
// -----------------------------------------------------------------------------
-func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) {
+func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2pTypes.ID) (added bool, err error) {
cs.Logger.Debug("addVote", "voteHeight", vote.Height, "voteType", vote.Type, "valIndex", vote.ValidatorIndex, "csHeight", cs.Height)
// A precommit for the previous height?
diff --git a/tm2/pkg/bft/consensus/state_test.go b/tm2/pkg/bft/consensus/state_test.go
index 201cf8906b3..4f4b3e3eb05 100644
--- a/tm2/pkg/bft/consensus/state_test.go
+++ b/tm2/pkg/bft/consensus/state_test.go
@@ -1733,7 +1733,7 @@ func TestStateOutputsBlockPartsStats(t *testing.T) {
// create dummy peer
cs, _ := randConsensusState(1)
- peer := p2pmock.NewPeer(nil)
+ peer := p2pmock.Peer{}
// 1) new block part
parts := types.NewPartSetFromData(random.RandBytes(100), 10)
@@ -1777,7 +1777,7 @@ func TestStateOutputVoteStats(t *testing.T) {
cs, vss := randConsensusState(2)
// create dummy peer
- peer := p2pmock.NewPeer(nil)
+ peer := p2pmock.Peer{}
vote := signVote(vss[1], types.PrecommitType, []byte("test"), types.PartSetHeader{})
diff --git a/tm2/pkg/bft/consensus/types/height_vote_set.go b/tm2/pkg/bft/consensus/types/height_vote_set.go
index b81937ebd1e..7f3d52022ad 100644
--- a/tm2/pkg/bft/consensus/types/height_vote_set.go
+++ b/tm2/pkg/bft/consensus/types/height_vote_set.go
@@ -8,7 +8,7 @@ import (
"github.com/gnolang/gno/tm2/pkg/amino"
"github.com/gnolang/gno/tm2/pkg/bft/types"
- "github.com/gnolang/gno/tm2/pkg/p2p"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
)
type RoundVoteSet struct {
@@ -39,9 +39,9 @@ type HeightVoteSet struct {
valSet *types.ValidatorSet
mtx sync.Mutex
- round int // max tracked round
- roundVoteSets map[int]RoundVoteSet // keys: [0...round]
- peerCatchupRounds map[p2p.ID][]int // keys: peer.ID; values: at most 2 rounds
+ round int // max tracked round
+ roundVoteSets map[int]RoundVoteSet // keys: [0...round]
+ peerCatchupRounds map[p2pTypes.ID][]int // keys: peer.ID; values: at most 2 rounds
}
func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet {
@@ -59,7 +59,7 @@ func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) {
hvs.height = height
hvs.valSet = valSet
hvs.roundVoteSets = make(map[int]RoundVoteSet)
- hvs.peerCatchupRounds = make(map[p2p.ID][]int)
+ hvs.peerCatchupRounds = make(map[p2pTypes.ID][]int)
hvs.addRound(0)
hvs.round = 0
@@ -108,7 +108,7 @@ func (hvs *HeightVoteSet) addRound(round int) {
// Duplicate votes return added=false, err=nil.
// By convention, peerID is "" if origin is self.
-func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) {
+func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2pTypes.ID) (added bool, err error) {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
if !types.IsVoteTypeValid(vote.Type) {
@@ -176,7 +176,7 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ types.SignedMsgType) *type
// NOTE: if there are too many peers, or too much peer churn,
// this can cause memory issues.
// TODO: implement ability to remove peers too
-func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ types.SignedMsgType, peerID p2p.ID, blockID types.BlockID) error {
+func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ types.SignedMsgType, peerID p2pTypes.ID, blockID types.BlockID) error {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
if !types.IsVoteTypeValid(type_) {
diff --git a/tm2/pkg/bft/mempool/reactor.go b/tm2/pkg/bft/mempool/reactor.go
index 3ef85b80a21..cf253999fb3 100644
--- a/tm2/pkg/bft/mempool/reactor.go
+++ b/tm2/pkg/bft/mempool/reactor.go
@@ -13,6 +13,7 @@ import (
"github.com/gnolang/gno/tm2/pkg/bft/types"
"github.com/gnolang/gno/tm2/pkg/clist"
"github.com/gnolang/gno/tm2/pkg/p2p"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
)
const (
@@ -39,25 +40,25 @@ type Reactor struct {
type mempoolIDs struct {
mtx sync.RWMutex
- peerMap map[p2p.ID]uint16
+ peerMap map[p2pTypes.ID]uint16
nextID uint16 // assumes that a node will never have over 65536 active peers
- activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
+ activeIDs map[uint16]struct{} // used to check if a given mempoolID key is used, the value doesn't matter
}
// Reserve searches for the next unused ID and assigns it to the
// peer.
-func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
+func (ids *mempoolIDs) ReserveForPeer(id p2pTypes.ID) {
ids.mtx.Lock()
defer ids.mtx.Unlock()
- curID := ids.nextPeerID()
- ids.peerMap[peer.ID()] = curID
+ curID := ids.nextMempoolPeerID()
+ ids.peerMap[id] = curID
ids.activeIDs[curID] = struct{}{}
}
-// nextPeerID returns the next unused peer ID to use.
+// nextMempoolPeerID returns the next unused peer ID to use.
// This assumes that ids's mutex is already locked.
-func (ids *mempoolIDs) nextPeerID() uint16 {
+func (ids *mempoolIDs) nextMempoolPeerID() uint16 {
if len(ids.activeIDs) == maxActiveIDs {
panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs))
}
@@ -73,28 +74,28 @@ func (ids *mempoolIDs) nextPeerID() uint16 {
}
// Reclaim returns the ID reserved for the peer back to unused pool.
-func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
+func (ids *mempoolIDs) Reclaim(id p2pTypes.ID) {
ids.mtx.Lock()
defer ids.mtx.Unlock()
- removedID, ok := ids.peerMap[peer.ID()]
+ removedID, ok := ids.peerMap[id]
if ok {
delete(ids.activeIDs, removedID)
- delete(ids.peerMap, peer.ID())
+ delete(ids.peerMap, id)
}
}
// GetForPeer returns an ID reserved for the peer.
-func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
+func (ids *mempoolIDs) GetForPeer(id p2pTypes.ID) uint16 {
ids.mtx.RLock()
defer ids.mtx.RUnlock()
- return ids.peerMap[peer.ID()]
+ return ids.peerMap[id]
}
func newMempoolIDs() *mempoolIDs {
return &mempoolIDs{
- peerMap: make(map[p2p.ID]uint16),
+ peerMap: make(map[p2pTypes.ID]uint16),
activeIDs: map[uint16]struct{}{0: {}},
nextID: 1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
}
@@ -138,20 +139,20 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
// AddPeer implements Reactor.
// It starts a broadcast routine ensuring all txs are forwarded to the given peer.
-func (memR *Reactor) AddPeer(peer p2p.Peer) {
- memR.ids.ReserveForPeer(peer)
+func (memR *Reactor) AddPeer(peer p2p.PeerConn) {
+ memR.ids.ReserveForPeer(peer.ID())
go memR.broadcastTxRoutine(peer)
}
// RemovePeer implements Reactor.
-func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
- memR.ids.Reclaim(peer)
+func (memR *Reactor) RemovePeer(peer p2p.PeerConn, reason interface{}) {
+ memR.ids.Reclaim(peer.ID())
// broadcast routine checks if peer is gone and returns
}
// Receive implements Reactor.
// It adds any received transactions to the mempool.
-func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
+func (memR *Reactor) Receive(chID byte, src p2p.PeerConn, msgBytes []byte) {
msg, err := memR.decodeMsg(msgBytes)
if err != nil {
memR.Logger.Error("Error decoding mempool message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
@@ -162,8 +163,8 @@ func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
switch msg := msg.(type) {
case *TxMessage:
- peerID := memR.ids.GetForPeer(src)
- err := memR.mempool.CheckTxWithInfo(msg.Tx, nil, TxInfo{SenderID: peerID})
+ mempoolID := memR.ids.GetForPeer(src.ID())
+ err := memR.mempool.CheckTxWithInfo(msg.Tx, nil, TxInfo{SenderID: mempoolID})
if err != nil {
memR.Logger.Info("Could not check tx", "tx", txID(msg.Tx), "err", err)
}
@@ -179,12 +180,12 @@ type PeerState interface {
}
// Send new mempool txs to peer.
-func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
+func (memR *Reactor) broadcastTxRoutine(peer p2p.PeerConn) {
if !memR.config.Broadcast {
return
}
- peerID := memR.ids.GetForPeer(peer)
+ mempoolID := memR.ids.GetForPeer(peer.ID())
var next *clist.CElement
for {
// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
@@ -213,7 +214,7 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
if !ok {
// Peer does not have a state yet. We set it in the consensus reactor, but
- // when we add peer in Switch, the order we call reactors#AddPeer is
+ // when we add peer in MultiplexSwitch, the order we call reactors#AddPeer is
// different every time due to us using a map. Sometimes other reactors
// will be initialized before the consensus reactor. We should wait a few
// milliseconds and retry.
@@ -226,7 +227,7 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
}
// ensure peer hasn't already sent us this tx
- if _, ok := memTx.senders.Load(peerID); !ok {
+ if _, ok := memTx.senders.Load(mempoolID); !ok {
// send memTx
msg := &TxMessage{Tx: memTx.tx}
success := peer.Send(MempoolChannel, amino.MustMarshalAny(msg))
diff --git a/tm2/pkg/bft/mempool/reactor_test.go b/tm2/pkg/bft/mempool/reactor_test.go
index e7a3c43a6b9..2d20fb252e2 100644
--- a/tm2/pkg/bft/mempool/reactor_test.go
+++ b/tm2/pkg/bft/mempool/reactor_test.go
@@ -1,26 +1,36 @@
package mempool
import (
- "net"
+ "context"
+ "fmt"
"sync"
"testing"
"time"
"github.com/fortytw2/leaktest"
- "github.com/stretchr/testify/assert"
-
"github.com/gnolang/gno/tm2/pkg/bft/abci/example/kvstore"
memcfg "github.com/gnolang/gno/tm2/pkg/bft/mempool/config"
"github.com/gnolang/gno/tm2/pkg/bft/proxy"
"github.com/gnolang/gno/tm2/pkg/bft/types"
"github.com/gnolang/gno/tm2/pkg/errors"
+ p2pTesting "github.com/gnolang/gno/tm2/pkg/internal/p2p"
"github.com/gnolang/gno/tm2/pkg/log"
"github.com/gnolang/gno/tm2/pkg/p2p"
p2pcfg "github.com/gnolang/gno/tm2/pkg/p2p/config"
- "github.com/gnolang/gno/tm2/pkg/p2p/mock"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/gnolang/gno/tm2/pkg/testutils"
+ "github.com/stretchr/testify/assert"
)
+// testP2PConfig returns a configuration for testing the peer-to-peer layer
+func testP2PConfig() *p2pcfg.P2PConfig {
+ cfg := p2pcfg.DefaultP2PConfig()
+ cfg.ListenAddress = "tcp://0.0.0.0:26656"
+ cfg.FlushThrottleTimeout = 10 * time.Millisecond
+
+ return cfg
+}
+
type peerState struct {
height int64
}
@@ -30,65 +40,108 @@ func (ps peerState) GetHeight() int64 {
}
// connect N mempool reactors through N switches
-func makeAndConnectReactors(mconfig *memcfg.MempoolConfig, pconfig *p2pcfg.P2PConfig, n int) []*Reactor {
- reactors := make([]*Reactor, n)
- logger := log.NewNoopLogger()
+func makeAndConnectReactors(t *testing.T, mconfig *memcfg.MempoolConfig, pconfig *p2pcfg.P2PConfig, n int) []*Reactor {
+ t.Helper()
+
+ var (
+ reactors = make([]*Reactor, n)
+ logger = log.NewNoopLogger()
+ options = make(map[int][]p2p.SwitchOption)
+ )
+
for i := 0; i < n; i++ {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
- reactors[i] = NewReactor(mconfig, mempool) // so we dont start the consensus states
- reactors[i].SetLogger(logger.With("validator", i))
+ reactor := NewReactor(mconfig, mempool) // so we dont start the consensus states
+ reactor.SetLogger(logger.With("validator", i))
+
+ options[i] = []p2p.SwitchOption{
+ p2p.WithReactor("MEMPOOL", reactor),
+ }
+
+ reactors[i] = reactor
+ }
+
+ // "Simulate" the networking layer
+ ctx, cancelFn := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancelFn()
+
+ cfg := p2pTesting.TestingConfig{
+ Count: n,
+ P2PCfg: pconfig,
+ SwitchOptions: options,
+ Channels: []byte{MempoolChannel},
}
- p2p.MakeConnectedSwitches(pconfig, n, func(i int, s *p2p.Switch) *p2p.Switch {
- s.AddReactor("MEMPOOL", reactors[i])
- return s
- }, p2p.Connect2Switches)
+ p2pTesting.MakeConnectedPeers(t, ctx, cfg)
+
return reactors
}
-func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
+func waitForTxsOnReactors(
+ t *testing.T,
+ txs types.Txs,
+ reactors []*Reactor,
+) {
t.Helper()
- // wait for the txs in all mempools
- wg := new(sync.WaitGroup)
+ ctx, cancelFn := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancelFn()
+
+ // Wait for the txs to propagate in all mempools
+ var wg sync.WaitGroup
+
for i, reactor := range reactors {
wg.Add(1)
+
go func(r *Reactor, reactorIndex int) {
defer wg.Done()
- waitForTxsOnReactor(t, txs, r, reactorIndex)
+
+ reapedTxs := waitForTxsOnReactor(t, ctx, len(txs), r)
+
+ for i, tx := range txs {
+ assert.Equalf(t, tx, reapedTxs[i],
+ fmt.Sprintf(
+ "txs at index %d on reactor %d don't match: %v vs %v",
+ i, reactorIndex,
+ tx,
+ reapedTxs[i],
+ ),
+ )
+ }
}(reactor, i)
}
- done := make(chan struct{})
- go func() {
- wg.Wait()
- close(done)
- }()
-
- timer := time.After(timeout)
- select {
- case <-timer:
- t.Fatal("Timed out waiting for txs")
- case <-done:
- }
+ wg.Wait()
}
-func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
+func waitForTxsOnReactor(
+ t *testing.T,
+ ctx context.Context,
+ expectedLength int,
+ reactor *Reactor,
+) types.Txs {
t.Helper()
- mempool := reactor.mempool
- for mempool.Size() < len(txs) {
- time.Sleep(time.Millisecond * 100)
- }
-
- reapedTxs := mempool.ReapMaxTxs(len(txs))
- for i, tx := range txs {
- assert.Equalf(t, tx, reapedTxs[i],
- "txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
+ var (
+ mempool = reactor.mempool
+ ticker = time.NewTicker(100 * time.Millisecond)
+ )
+
+ for {
+ select {
+ case <-ctx.Done():
+ t.Fatal("timed out waiting for txs")
+ case <-ticker.C:
+ if mempool.Size() < expectedLength {
+ continue
+ }
+
+ return mempool.ReapMaxTxs(expectedLength)
+ }
}
}
@@ -100,32 +153,29 @@ func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {
assert.Zero(t, reactor.mempool.Size())
}
-const (
- numTxs = 1000
- timeout = 120 * time.Second // ridiculously high because CircleCI is slow
-)
-
func TestReactorBroadcastTxMessage(t *testing.T) {
t.Parallel()
mconfig := memcfg.TestMempoolConfig()
- pconfig := p2pcfg.TestP2PConfig()
+ pconfig := testP2PConfig()
const N = 4
- reactors := makeAndConnectReactors(mconfig, pconfig, N)
- defer func() {
+ reactors := makeAndConnectReactors(t, mconfig, pconfig, N)
+ t.Cleanup(func() {
for _, r := range reactors {
- r.Stop()
+ assert.NoError(t, r.Stop())
}
- }()
+ })
+
for _, r := range reactors {
for _, peer := range r.Switch.Peers().List() {
+ fmt.Printf("Setting peer %s\n", peer.ID())
peer.Set(types.PeerStateKey, peerState{1})
}
}
// send a bunch of txs to the first reactor's mempool
// and wait for them all to be received in the others
- txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID, true)
+ txs := checkTxs(t, reactors[0].mempool, 1000, UnknownPeerID, true)
waitForTxsOnReactors(t, txs, reactors)
}
@@ -133,9 +183,9 @@ func TestReactorNoBroadcastToSender(t *testing.T) {
t.Parallel()
mconfig := memcfg.TestMempoolConfig()
- pconfig := p2pcfg.TestP2PConfig()
+ pconfig := testP2PConfig()
const N = 2
- reactors := makeAndConnectReactors(mconfig, pconfig, N)
+ reactors := makeAndConnectReactors(t, mconfig, pconfig, N)
defer func() {
for _, r := range reactors {
r.Stop()
@@ -144,7 +194,7 @@ func TestReactorNoBroadcastToSender(t *testing.T) {
// send a bunch of txs to the first reactor's mempool, claiming it came from peer
// ensure peer gets no txs
- checkTxs(t, reactors[0].mempool, numTxs, 1, true)
+ checkTxs(t, reactors[0].mempool, 1000, 1, true)
ensureNoTxs(t, reactors[1], 100*time.Millisecond)
}
@@ -158,9 +208,9 @@ func TestFlappyBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
}
mconfig := memcfg.TestMempoolConfig()
- pconfig := p2pcfg.TestP2PConfig()
+ pconfig := testP2PConfig()
const N = 2
- reactors := makeAndConnectReactors(mconfig, pconfig, N)
+ reactors := makeAndConnectReactors(t, mconfig, pconfig, N)
defer func() {
for _, r := range reactors {
r.Stop()
@@ -186,9 +236,9 @@ func TestFlappyBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
}
mconfig := memcfg.TestMempoolConfig()
- pconfig := p2pcfg.TestP2PConfig()
+ pconfig := testP2PConfig()
const N = 2
- reactors := makeAndConnectReactors(mconfig, pconfig, N)
+ reactors := makeAndConnectReactors(t, mconfig, pconfig, N)
// stop reactors
for _, r := range reactors {
@@ -205,15 +255,15 @@ func TestMempoolIDsBasic(t *testing.T) {
ids := newMempoolIDs()
- peer := mock.NewPeer(net.IP{127, 0, 0, 1})
+ id := p2pTypes.GenerateNodeKey().ID()
- ids.ReserveForPeer(peer)
- assert.EqualValues(t, 1, ids.GetForPeer(peer))
- ids.Reclaim(peer)
+ ids.ReserveForPeer(id)
+ assert.EqualValues(t, 1, ids.GetForPeer(id))
+ ids.Reclaim(id)
- ids.ReserveForPeer(peer)
- assert.EqualValues(t, 2, ids.GetForPeer(peer))
- ids.Reclaim(peer)
+ ids.ReserveForPeer(id)
+ assert.EqualValues(t, 2, ids.GetForPeer(id))
+ ids.Reclaim(id)
}
func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
@@ -227,12 +277,13 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
ids := newMempoolIDs()
for i := 0; i < maxActiveIDs-1; i++ {
- peer := mock.NewPeer(net.IP{127, 0, 0, 1})
- ids.ReserveForPeer(peer)
+ id := p2pTypes.GenerateNodeKey().ID()
+ ids.ReserveForPeer(id)
}
assert.Panics(t, func() {
- peer := mock.NewPeer(net.IP{127, 0, 0, 1})
- ids.ReserveForPeer(peer)
+ id := p2pTypes.GenerateNodeKey().ID()
+
+ ids.ReserveForPeer(id)
})
}
diff --git a/tm2/pkg/bft/node/node.go b/tm2/pkg/bft/node/node.go
index e29de3dd1ae..c1afb2996fa 100644
--- a/tm2/pkg/bft/node/node.go
+++ b/tm2/pkg/bft/node/node.go
@@ -12,12 +12,16 @@ import (
"sync"
"time"
+ goErrors "errors"
+
"github.com/gnolang/gno/tm2/pkg/bft/appconn"
"github.com/gnolang/gno/tm2/pkg/bft/state/eventstore/file"
+ "github.com/gnolang/gno/tm2/pkg/p2p/conn"
+ "github.com/gnolang/gno/tm2/pkg/p2p/discovery"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/rs/cors"
"github.com/gnolang/gno/tm2/pkg/amino"
- abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types"
bc "github.com/gnolang/gno/tm2/pkg/bft/blockchain"
cfg "github.com/gnolang/gno/tm2/pkg/bft/config"
cs "github.com/gnolang/gno/tm2/pkg/bft/consensus"
@@ -43,6 +47,23 @@ import (
verset "github.com/gnolang/gno/tm2/pkg/versionset"
)
+// Reactors are hooks for the p2p module,
+// to alert of connecting / disconnecting peers
+const (
+ mempoolReactorName = "MEMPOOL"
+ blockchainReactorName = "BLOCKCHAIN"
+ consensusReactorName = "CONSENSUS"
+ discoveryReactorName = "DISCOVERY"
+)
+
+const (
+ mempoolModuleName = "mempool"
+ blockchainModuleName = "blockchain"
+ consensusModuleName = "consensus"
+ p2pModuleName = "p2p"
+ discoveryModuleName = "discovery"
+)
+
// ------------------------------------------------------------------------------
// DBContext specifies config information for loading a new DB.
@@ -87,7 +108,7 @@ func DefaultNewNode(
logger *slog.Logger,
) (*Node, error) {
// Generate node PrivKey
- nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
+ nodeKey, err := p2pTypes.LoadOrGenNodeKey(config.NodeKeyFile())
if err != nil {
return nil, err
}
@@ -118,30 +139,6 @@ func DefaultNewNode(
// Option sets a parameter for the node.
type Option func(*Node)
-// CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
-// the node's Switch.
-//
-// WARNING: using any name from the below list of the existing reactors will
-// result in replacing it with the custom one.
-//
-// - MEMPOOL
-// - BLOCKCHAIN
-// - CONSENSUS
-// - EVIDENCE
-// - PEX
-func CustomReactors(reactors map[string]p2p.Reactor) Option {
- return func(n *Node) {
- for name, reactor := range reactors {
- if existingReactor := n.sw.Reactor(name); existingReactor != nil {
- n.sw.Logger.Info("Replacing existing reactor with a custom one",
- "name", name, "existing", existingReactor, "custom", reactor)
- n.sw.RemoveReactor(name, existingReactor)
- }
- n.sw.AddReactor(name, reactor)
- }
- }
-}
-
// ------------------------------------------------------------------------------
// Node is the highest level interface to a full Tendermint node.
@@ -155,11 +152,12 @@ type Node struct {
privValidator types.PrivValidator // local node's validator key
// network
- transport *p2p.MultiplexTransport
- sw *p2p.Switch // p2p connections
- nodeInfo p2p.NodeInfo
- nodeKey *p2p.NodeKey // our node privkey
- isListening bool
+ transport *p2p.MultiplexTransport
+ sw *p2p.MultiplexSwitch // p2p connections
+ discoveryReactor *discovery.Reactor // discovery reactor
+ nodeInfo p2pTypes.NodeInfo
+ nodeKey *p2pTypes.NodeKey // our node privkey
+ isListening bool
// services
evsw events.EventSwitch
@@ -279,7 +277,7 @@ func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp appconn.AppConn
state.ConsensusParams.Block.MaxTxBytes,
mempl.WithPreCheck(sm.TxPreCheck(state)),
)
- mempoolLogger := logger.With("module", "mempool")
+ mempoolLogger := logger.With("module", mempoolModuleName)
mempoolReactor := mempl.NewReactor(config.Mempool, mempool)
mempoolReactor.SetLogger(mempoolLogger)
@@ -289,16 +287,23 @@ func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp appconn.AppConn
return mempoolReactor, mempool
}
-func createBlockchainReactor(config *cfg.Config,
+func createBlockchainReactor(
state sm.State,
blockExec *sm.BlockExecutor,
blockStore *store.BlockStore,
fastSync bool,
+ switchToConsensusFn bc.SwitchToConsensusFn,
logger *slog.Logger,
) (bcReactor p2p.Reactor, err error) {
- bcReactor = bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
+ bcReactor = bc.NewBlockchainReactor(
+ state.Copy(),
+ blockExec,
+ blockStore,
+ fastSync,
+ switchToConsensusFn,
+ )
- bcReactor.SetLogger(logger.With("module", "blockchain"))
+ bcReactor.SetLogger(logger.With("module", blockchainModuleName))
return bcReactor, nil
}
@@ -331,93 +336,15 @@ func createConsensusReactor(config *cfg.Config,
return consensusReactor, consensusState
}
-func createTransport(config *cfg.Config, nodeInfo p2p.NodeInfo, nodeKey *p2p.NodeKey, proxyApp appconn.AppConns) (*p2p.MultiplexTransport, []p2p.PeerFilterFunc) {
- var (
- mConnConfig = p2p.MConnConfig(config.P2P)
- transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
- connFilters = []p2p.ConnFilterFunc{}
- peerFilters = []p2p.PeerFilterFunc{}
- )
-
- if !config.P2P.AllowDuplicateIP {
- connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
- }
-
- // Filter peers by addr or pubkey with an ABCI query.
- // If the query return code is OK, add peer.
- if config.FilterPeers {
- connFilters = append(
- connFilters,
- // ABCI query for address filtering.
- func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
- res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
- Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
- })
- if err != nil {
- return err
- }
- if res.IsErr() {
- return fmt.Errorf("error querying abci app: %v", res)
- }
-
- return nil
- },
- )
-
- peerFilters = append(
- peerFilters,
- // ABCI query for ID filtering.
- func(_ p2p.IPeerSet, p p2p.Peer) error {
- res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
- Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
- })
- if err != nil {
- return err
- }
- if res.IsErr() {
- return fmt.Errorf("error querying abci app: %v", res)
- }
-
- return nil
- },
- )
- }
-
- p2p.MultiplexTransportConnFilters(connFilters...)(transport)
- return transport, peerFilters
-}
-
-func createSwitch(config *cfg.Config,
- transport *p2p.MultiplexTransport,
- peerFilters []p2p.PeerFilterFunc,
- mempoolReactor *mempl.Reactor,
- bcReactor p2p.Reactor,
- consensusReactor *cs.ConsensusReactor,
- nodeInfo p2p.NodeInfo,
- nodeKey *p2p.NodeKey,
- p2pLogger *slog.Logger,
-) *p2p.Switch {
- sw := p2p.NewSwitch(
- config.P2P,
- transport,
- p2p.SwitchPeerFilters(peerFilters...),
- )
- sw.SetLogger(p2pLogger)
- sw.AddReactor("MEMPOOL", mempoolReactor)
- sw.AddReactor("BLOCKCHAIN", bcReactor)
- sw.AddReactor("CONSENSUS", consensusReactor)
-
- sw.SetNodeInfo(nodeInfo)
- sw.SetNodeKey(nodeKey)
-
- p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
- return sw
+type nodeReactor struct {
+ name string
+ reactor p2p.Reactor
}
// NewNode returns a new, ready to go, Tendermint Node.
func NewNode(config *cfg.Config,
privValidator types.PrivValidator,
- nodeKey *p2p.NodeKey,
+ nodeKey *p2pTypes.NodeKey,
clientCreator appconn.ClientCreator,
genesisDocProvider GenesisDocProvider,
dbProvider DBProvider,
@@ -463,7 +390,7 @@ func NewNode(config *cfg.Config,
// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
// and replays any blocks as necessary to sync tendermint with the app.
- consensusLogger := logger.With("module", "consensus")
+ consensusLogger := logger.With("module", consensusModuleName)
if err := doHandshake(stateDB, state, blockStore, genDoc, evsw, proxyApp, consensusLogger); err != nil {
return nil, err
}
@@ -506,38 +433,103 @@ func NewNode(config *cfg.Config,
mempool,
)
- // Make BlockchainReactor
- bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync, logger)
- if err != nil {
- return nil, errors.Wrap(err, "could not create blockchain reactor")
- }
-
// Make ConsensusReactor
consensusReactor, consensusState := createConsensusReactor(
config, state, blockExec, blockStore, mempool,
privValidator, fastSync, evsw, consensusLogger,
)
+ // Make BlockchainReactor
+ bcReactor, err := createBlockchainReactor(
+ state,
+ blockExec,
+ blockStore,
+ fastSync,
+ consensusReactor.SwitchToConsensus,
+ logger,
+ )
+ if err != nil {
+ return nil, errors.Wrap(err, "could not create blockchain reactor")
+ }
+
+ reactors := []nodeReactor{
+ {
+ mempoolReactorName, mempoolReactor,
+ },
+ {
+ blockchainReactorName, bcReactor,
+ },
+ {
+ consensusReactorName, consensusReactor,
+ },
+ }
+
nodeInfo, err := makeNodeInfo(config, nodeKey, txEventStore, genDoc, state)
if err != nil {
return nil, errors.Wrap(err, "error making NodeInfo")
}
- // Setup Transport.
- transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
+ p2pLogger := logger.With("module", p2pModuleName)
- // Setup Switch.
- p2pLogger := logger.With("module", "p2p")
- sw := createSwitch(
- config, transport, peerFilters, mempoolReactor, bcReactor,
- consensusReactor, nodeInfo, nodeKey, p2pLogger,
+ // Setup the multiplex transport, used by the P2P switch
+ transport := p2p.NewMultiplexTransport(
+ nodeInfo,
+ *nodeKey,
+ conn.MConfigFromP2P(config.P2P),
+ p2pLogger.With("transport", "multiplex"),
)
- err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
- if err != nil {
- return nil, errors.Wrap(err, "could not add peers from persistent_peers field")
+ var discoveryReactor *discovery.Reactor
+
+ if config.P2P.PeerExchange {
+ discoveryReactor = discovery.NewReactor()
+
+ discoveryReactor.SetLogger(logger.With("module", discoveryModuleName))
+
+ reactors = append(reactors, nodeReactor{
+ name: discoveryReactorName,
+ reactor: discoveryReactor,
+ })
+ }
+
+ // Setup MultiplexSwitch.
+ peerAddrs, errs := p2pTypes.NewNetAddressFromStrings(
+ splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "),
+ )
+ for _, err = range errs {
+ p2pLogger.Error("invalid persistent peer address", "err", err)
+ }
+
+ // Parse the private peer IDs
+ privatePeerIDs, errs := p2pTypes.NewIDFromStrings(
+ splitAndTrimEmpty(config.P2P.PrivatePeerIDs, ",", " "),
+ )
+ for _, err = range errs {
+ p2pLogger.Error("invalid private peer ID", "err", err)
+ }
+
+ // Prepare the misc switch options
+ opts := []p2p.SwitchOption{
+ p2p.WithPersistentPeers(peerAddrs),
+ p2p.WithPrivatePeers(privatePeerIDs),
+ p2p.WithMaxInboundPeers(config.P2P.MaxNumInboundPeers),
+ p2p.WithMaxOutboundPeers(config.P2P.MaxNumOutboundPeers),
+ }
+
+ // Prepare the reactor switch options
+ for _, r := range reactors {
+ opts = append(opts, p2p.WithReactor(r.name, r.reactor))
}
+ sw := p2p.NewMultiplexSwitch(
+ transport,
+ opts...,
+ )
+
+ sw.SetLogger(p2pLogger)
+
+ p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
+
if config.ProfListenAddress != "" {
server := &http.Server{
Addr: config.ProfListenAddress,
@@ -554,10 +546,11 @@ func NewNode(config *cfg.Config,
genesisDoc: genDoc,
privValidator: privValidator,
- transport: transport,
- sw: sw,
- nodeInfo: nodeInfo,
- nodeKey: nodeKey,
+ transport: transport,
+ sw: sw,
+ discoveryReactor: discoveryReactor,
+ nodeInfo: nodeInfo,
+ nodeKey: nodeKey,
evsw: evsw,
stateDB: stateDB,
@@ -611,10 +604,16 @@ func (n *Node) OnStart() error {
}
// Start the transport.
- addr, err := p2p.NewNetAddressFromString(p2p.NetAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
+ lAddr := n.config.P2P.ExternalAddress
+ if lAddr == "" {
+ lAddr = n.config.P2P.ListenAddress
+ }
+
+ addr, err := p2pTypes.NewNetAddressFromString(p2pTypes.NetAddressString(n.nodeKey.ID(), lAddr))
if err != nil {
- return err
+ return fmt.Errorf("unable to parse network address, %w", err)
}
+
if err := n.transport.Listen(*addr); err != nil {
return err
}
@@ -639,11 +638,14 @@ func (n *Node) OnStart() error {
}
// Always connect to persistent peers
- err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
- if err != nil {
- return errors.Wrap(err, "could not dial peers from persistent_peers field")
+ peerAddrs, errs := p2pTypes.NewNetAddressFromStrings(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
+ for _, err := range errs {
+ n.Logger.Error("invalid persistent peer address", "err", err)
}
+ // Dial the persistent peers
+ n.sw.DialPeers(peerAddrs...)
+
return nil
}
@@ -657,8 +659,15 @@ func (n *Node) OnStop() {
n.evsw.Stop()
n.eventStoreService.Stop()
+ // Stop the node p2p transport
+ if err := n.transport.Close(); err != nil {
+ n.Logger.Error("unable to gracefully close transport", "err", err)
+ }
+
// now stop the reactors
- n.sw.Stop()
+ if err := n.sw.Stop(); err != nil {
+ n.Logger.Error("unable to gracefully close switch", "err", err)
+ }
// stop mempool WAL
if n.config.Mempool.WalEnabled() {
@@ -791,7 +800,7 @@ func joinListenerAddresses(ll []net.Listener) string {
}
// Switch returns the Node's Switch.
-func (n *Node) Switch() *p2p.Switch {
+func (n *Node) Switch() *p2p.MultiplexSwitch {
return n.sw
}
@@ -859,17 +868,17 @@ func (n *Node) IsListening() bool {
}
// NodeInfo returns the Node's Info from the Switch.
-func (n *Node) NodeInfo() p2p.NodeInfo {
+func (n *Node) NodeInfo() p2pTypes.NodeInfo {
return n.nodeInfo
}
func makeNodeInfo(
config *cfg.Config,
- nodeKey *p2p.NodeKey,
+ nodeKey *p2pTypes.NodeKey,
txEventStore eventstore.TxEventStore,
genDoc *types.GenesisDoc,
state sm.State,
-) (p2p.NodeInfo, error) {
+) (p2pTypes.NodeInfo, error) {
txIndexerStatus := eventstore.StatusOff
if txEventStore.GetType() != null.EventStoreType {
txIndexerStatus = eventstore.StatusOn
@@ -882,8 +891,9 @@ func makeNodeInfo(
Version: state.AppVersion,
})
- nodeInfo := p2p.NodeInfo{
+ nodeInfo := p2pTypes.NodeInfo{
VersionSet: vset,
+ PeerID: nodeKey.ID(),
Network: genDoc.ChainID,
Version: version.Version,
Channels: []byte{
@@ -892,24 +902,23 @@ func makeNodeInfo(
mempl.MempoolChannel,
},
Moniker: config.Moniker,
- Other: p2p.NodeInfoOther{
+ Other: p2pTypes.NodeInfoOther{
TxIndex: txIndexerStatus,
RPCAddress: config.RPC.ListenAddress,
},
}
- lAddr := config.P2P.ExternalAddress
- if lAddr == "" {
- lAddr = config.P2P.ListenAddress
+ if config.P2P.PeerExchange {
+ nodeInfo.Channels = append(nodeInfo.Channels, discovery.Channel)
}
- addr, err := p2p.NewNetAddressFromString(p2p.NetAddressString(nodeKey.ID(), lAddr))
- if err != nil {
- return nodeInfo, errors.Wrap(err, "invalid (local) node net address")
+
+ // Validate the node info
+ err := nodeInfo.Validate()
+ if err != nil && !goErrors.Is(err, p2pTypes.ErrUnspecifiedIP) {
+ return p2pTypes.NodeInfo{}, fmt.Errorf("unable to validate node info, %w", err)
}
- nodeInfo.NetAddress = addr
- err = nodeInfo.Validate()
- return nodeInfo, err
+ return nodeInfo, nil
}
// ------------------------------------------------------------------------------
diff --git a/tm2/pkg/bft/node/node_test.go b/tm2/pkg/bft/node/node_test.go
index 6e86a0bcc6f..1ea789d31c2 100644
--- a/tm2/pkg/bft/node/node_test.go
+++ b/tm2/pkg/bft/node/node_test.go
@@ -25,8 +25,6 @@ import (
"github.com/gnolang/gno/tm2/pkg/db/memdb"
"github.com/gnolang/gno/tm2/pkg/events"
"github.com/gnolang/gno/tm2/pkg/log"
- "github.com/gnolang/gno/tm2/pkg/p2p"
- p2pmock "github.com/gnolang/gno/tm2/pkg/p2p/mock"
"github.com/gnolang/gno/tm2/pkg/random"
)
@@ -40,8 +38,6 @@ func TestNodeStartStop(t *testing.T) {
err = n.Start()
require.NoError(t, err)
- t.Logf("Started node %v", n.sw.NodeInfo())
-
// wait for the node to produce a block
blocksSub := events.SubscribeToEvent(n.EventSwitch(), "node_test", types.EventNewBlock{})
require.NoError(t, err)
@@ -308,39 +304,6 @@ func TestCreateProposalBlock(t *testing.T) {
assert.NoError(t, err)
}
-func TestNodeNewNodeCustomReactors(t *testing.T) {
- config, genesisFile := cfg.ResetTestRoot("node_new_node_custom_reactors_test")
- defer os.RemoveAll(config.RootDir)
-
- cr := p2pmock.NewReactor()
- customBlockchainReactor := p2pmock.NewReactor()
-
- nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
- require.NoError(t, err)
-
- n, err := NewNode(config,
- privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()),
- nodeKey,
- proxy.DefaultClientCreator(nil, config.ProxyApp, config.ABCI, config.DBDir()),
- DefaultGenesisDocProviderFunc(genesisFile),
- DefaultDBProvider,
- events.NewEventSwitch(),
- log.NewTestingLogger(t),
- CustomReactors(map[string]p2p.Reactor{"FOO": cr, "BLOCKCHAIN": customBlockchainReactor}),
- )
- require.NoError(t, err)
-
- err = n.Start()
- require.NoError(t, err)
- defer n.Stop()
-
- assert.True(t, cr.IsRunning())
- assert.Equal(t, cr, n.Switch().Reactor("FOO"))
-
- assert.True(t, customBlockchainReactor.IsRunning())
- assert.Equal(t, customBlockchainReactor, n.Switch().Reactor("BLOCKCHAIN"))
-}
-
func state(nVals int, height int64) (sm.State, dbm.DB) {
vals := make([]types.GenesisValidator, nVals)
for i := 0; i < nVals; i++ {
diff --git a/tm2/pkg/bft/rpc/client/batch_test.go b/tm2/pkg/bft/rpc/client/batch_test.go
index 52930e5c372..fcd0f3f834d 100644
--- a/tm2/pkg/bft/rpc/client/batch_test.go
+++ b/tm2/pkg/bft/rpc/client/batch_test.go
@@ -10,7 +10,7 @@ import (
ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types"
types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types"
bfttypes "github.com/gnolang/gno/tm2/pkg/bft/types"
- "github.com/gnolang/gno/tm2/pkg/p2p"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -116,7 +116,7 @@ func TestRPCBatch_Send(t *testing.T) {
var (
numRequests = 10
expectedStatus = &ctypes.ResultStatus{
- NodeInfo: p2p.NodeInfo{
+ NodeInfo: p2pTypes.NodeInfo{
Moniker: "dummy",
},
}
@@ -160,7 +160,7 @@ func TestRPCBatch_Endpoints(t *testing.T) {
{
statusMethod,
&ctypes.ResultStatus{
- NodeInfo: p2p.NodeInfo{
+ NodeInfo: p2pTypes.NodeInfo{
Moniker: "dummy",
},
},
diff --git a/tm2/pkg/bft/rpc/client/client_test.go b/tm2/pkg/bft/rpc/client/client_test.go
index cb88c91fc5f..31889f59883 100644
--- a/tm2/pkg/bft/rpc/client/client_test.go
+++ b/tm2/pkg/bft/rpc/client/client_test.go
@@ -14,7 +14,7 @@ import (
ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types"
types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types"
bfttypes "github.com/gnolang/gno/tm2/pkg/bft/types"
- "github.com/gnolang/gno/tm2/pkg/p2p"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -114,7 +114,7 @@ func TestRPCClient_Status(t *testing.T) {
var (
expectedStatus = &ctypes.ResultStatus{
- NodeInfo: p2p.NodeInfo{
+ NodeInfo: p2pTypes.NodeInfo{
Moniker: "dummy",
},
}
@@ -811,17 +811,17 @@ func TestRPCClient_Batch(t *testing.T) {
var (
expectedStatuses = []*ctypes.ResultStatus{
{
- NodeInfo: p2p.NodeInfo{
+ NodeInfo: p2pTypes.NodeInfo{
Moniker: "dummy",
},
},
{
- NodeInfo: p2p.NodeInfo{
+ NodeInfo: p2pTypes.NodeInfo{
Moniker: "dummy",
},
},
{
- NodeInfo: p2p.NodeInfo{
+ NodeInfo: p2pTypes.NodeInfo{
Moniker: "dummy",
},
},
diff --git a/tm2/pkg/bft/rpc/client/e2e_test.go b/tm2/pkg/bft/rpc/client/e2e_test.go
index 08d4b9b735d..358c66b0b26 100644
--- a/tm2/pkg/bft/rpc/client/e2e_test.go
+++ b/tm2/pkg/bft/rpc/client/e2e_test.go
@@ -13,7 +13,7 @@ import (
ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types"
types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types"
bfttypes "github.com/gnolang/gno/tm2/pkg/bft/types"
- "github.com/gnolang/gno/tm2/pkg/p2p"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/gorilla/websocket"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -170,7 +170,7 @@ func TestRPCClient_E2E_Endpoints(t *testing.T) {
{
statusMethod,
&ctypes.ResultStatus{
- NodeInfo: p2p.NodeInfo{
+ NodeInfo: p2pTypes.NodeInfo{
Moniker: "dummy",
},
},
diff --git a/tm2/pkg/bft/rpc/client/local.go b/tm2/pkg/bft/rpc/client/local.go
index 59c4216a468..4bc724e7d70 100644
--- a/tm2/pkg/bft/rpc/client/local.go
+++ b/tm2/pkg/bft/rpc/client/local.go
@@ -106,14 +106,6 @@ func (c *Local) Health() (*ctypes.ResultHealth, error) {
return core.Health(c.ctx)
}
-func (c *Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
- return core.UnsafeDialSeeds(c.ctx, seeds)
-}
-
-func (c *Local) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) {
- return core.UnsafeDialPeers(c.ctx, peers, persistent)
-}
-
func (c *Local) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
return core.BlockchainInfo(c.ctx, minHeight, maxHeight)
}
diff --git a/tm2/pkg/bft/rpc/core/net.go b/tm2/pkg/bft/rpc/core/net.go
index 975d5ed822f..f8839b7d91f 100644
--- a/tm2/pkg/bft/rpc/core/net.go
+++ b/tm2/pkg/bft/rpc/core/net.go
@@ -3,7 +3,6 @@ package core
import (
ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types"
rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types"
- "github.com/gnolang/gno/tm2/pkg/errors"
)
// Get network info.
@@ -154,10 +153,14 @@ import (
// }
//
// ```
-func NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) {
- out, in, _ := p2pPeers.NumPeers()
+func NetInfo(_ *rpctypes.Context) (*ctypes.ResultNetInfo, error) {
+ var (
+ set = p2pPeers.Peers()
+ out, in = set.NumOutbound(), set.NumInbound()
+ )
+
peers := make([]ctypes.Peer, 0, out+in)
- for _, peer := range p2pPeers.Peers().List() {
+ for _, peer := range set.List() {
nodeInfo := peer.NodeInfo()
peers = append(peers, ctypes.Peer{
NodeInfo: nodeInfo,
@@ -166,9 +169,7 @@ func NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) {
RemoteIP: peer.RemoteIP().String(),
})
}
- // TODO: Should we include PersistentPeers and Seeds in here?
- // PRO: useful info
- // CON: privacy
+
return &ctypes.ResultNetInfo{
Listening: p2pTransport.IsListening(),
Listeners: p2pTransport.Listeners(),
@@ -177,33 +178,6 @@ func NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) {
}, nil
}
-func UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) {
- if len(seeds) == 0 {
- return &ctypes.ResultDialSeeds{}, errors.New("No seeds provided")
- }
- logger.Info("DialSeeds", "seeds", seeds)
- if err := p2pPeers.DialPeersAsync(seeds); err != nil {
- return &ctypes.ResultDialSeeds{}, err
- }
- return &ctypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil
-}
-
-func UnsafeDialPeers(ctx *rpctypes.Context, peers []string, persistent bool) (*ctypes.ResultDialPeers, error) {
- if len(peers) == 0 {
- return &ctypes.ResultDialPeers{}, errors.New("No peers provided")
- }
- logger.Info("DialPeers", "peers", peers, "persistent", persistent)
- if persistent {
- if err := p2pPeers.AddPersistentPeers(peers); err != nil {
- return &ctypes.ResultDialPeers{}, err
- }
- }
- if err := p2pPeers.DialPeersAsync(peers); err != nil {
- return &ctypes.ResultDialPeers{}, err
- }
- return &ctypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil
-}
-
// Get genesis file.
//
// ```shell
diff --git a/tm2/pkg/bft/rpc/core/net_test.go b/tm2/pkg/bft/rpc/core/net_test.go
deleted file mode 100644
index 3273837b6ce..00000000000
--- a/tm2/pkg/bft/rpc/core/net_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package core
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types"
- "github.com/gnolang/gno/tm2/pkg/log"
- "github.com/gnolang/gno/tm2/pkg/p2p"
- p2pcfg "github.com/gnolang/gno/tm2/pkg/p2p/config"
-)
-
-func TestUnsafeDialSeeds(t *testing.T) {
- t.Parallel()
-
- sw := p2p.MakeSwitch(p2pcfg.DefaultP2PConfig(), 1, "testing", "123.123.123",
- func(n int, sw *p2p.Switch) *p2p.Switch { return sw })
- err := sw.Start()
- require.NoError(t, err)
- defer sw.Stop()
-
- logger = log.NewNoopLogger()
- p2pPeers = sw
-
- testCases := []struct {
- seeds []string
- isErr bool
- }{
- {[]string{}, true},
- {[]string{"g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:41198"}, false},
- {[]string{"127.0.0.1:41198"}, true},
- }
-
- for _, tc := range testCases {
- res, err := UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds)
- if tc.isErr {
- assert.Error(t, err)
- } else {
- assert.NoError(t, err)
- assert.NotNil(t, res)
- }
- }
-}
-
-func TestUnsafeDialPeers(t *testing.T) {
- t.Parallel()
-
- sw := p2p.MakeSwitch(p2pcfg.DefaultP2PConfig(), 1, "testing", "123.123.123",
- func(n int, sw *p2p.Switch) *p2p.Switch { return sw })
- err := sw.Start()
- require.NoError(t, err)
- defer sw.Stop()
-
- logger = log.NewNoopLogger()
- p2pPeers = sw
-
- testCases := []struct {
- peers []string
- isErr bool
- }{
- {[]string{}, true},
- {[]string{"g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:41198"}, false},
- {[]string{"127.0.0.1:41198"}, true},
- }
-
- for _, tc := range testCases {
- res, err := UnsafeDialPeers(&rpctypes.Context{}, tc.peers, false)
- if tc.isErr {
- assert.Error(t, err)
- } else {
- assert.NoError(t, err)
- assert.NotNil(t, res)
- }
- }
-}
diff --git a/tm2/pkg/bft/rpc/core/pipe.go b/tm2/pkg/bft/rpc/core/pipe.go
index 9493e7c5873..085fc35da55 100644
--- a/tm2/pkg/bft/rpc/core/pipe.go
+++ b/tm2/pkg/bft/rpc/core/pipe.go
@@ -15,6 +15,7 @@ import (
dbm "github.com/gnolang/gno/tm2/pkg/db"
"github.com/gnolang/gno/tm2/pkg/events"
"github.com/gnolang/gno/tm2/pkg/p2p"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
)
const (
@@ -38,14 +39,11 @@ type Consensus interface {
type transport interface {
Listeners() []string
IsListening() bool
- NodeInfo() p2p.NodeInfo
+ NodeInfo() p2pTypes.NodeInfo
}
type peers interface {
- AddPersistentPeers([]string) error
- DialPeersAsync([]string) error
- NumPeers() (outbound, inbound, dialig int)
- Peers() p2p.IPeerSet
+ Peers() p2p.PeerSet
}
// ----------------------------------------------
diff --git a/tm2/pkg/bft/rpc/core/routes.go b/tm2/pkg/bft/rpc/core/routes.go
index 8d210f67985..76217a7cbd9 100644
--- a/tm2/pkg/bft/rpc/core/routes.go
+++ b/tm2/pkg/bft/rpc/core/routes.go
@@ -36,8 +36,6 @@ var Routes = map[string]*rpc.RPCFunc{
func AddUnsafeRoutes() {
// control API
- Routes["dial_seeds"] = rpc.NewRPCFunc(UnsafeDialSeeds, "seeds")
- Routes["dial_peers"] = rpc.NewRPCFunc(UnsafeDialPeers, "peers,persistent")
Routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(UnsafeFlushMempool, "")
// profiler API
diff --git a/tm2/pkg/bft/rpc/core/types/responses.go b/tm2/pkg/bft/rpc/core/types/responses.go
index 2874517147d..76474867b27 100644
--- a/tm2/pkg/bft/rpc/core/types/responses.go
+++ b/tm2/pkg/bft/rpc/core/types/responses.go
@@ -11,6 +11,7 @@ import (
"github.com/gnolang/gno/tm2/pkg/bft/types"
"github.com/gnolang/gno/tm2/pkg/crypto"
"github.com/gnolang/gno/tm2/pkg/p2p"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
)
// List of blocks
@@ -74,9 +75,9 @@ type ValidatorInfo struct {
// Node Status
type ResultStatus struct {
- NodeInfo p2p.NodeInfo `json:"node_info"`
- SyncInfo SyncInfo `json:"sync_info"`
- ValidatorInfo ValidatorInfo `json:"validator_info"`
+ NodeInfo p2pTypes.NodeInfo `json:"node_info"`
+ SyncInfo SyncInfo `json:"sync_info"`
+ ValidatorInfo ValidatorInfo `json:"validator_info"`
}
// Is TxIndexing enabled
@@ -107,7 +108,7 @@ type ResultDialPeers struct {
// A peer
type Peer struct {
- NodeInfo p2p.NodeInfo `json:"node_info"`
+ NodeInfo p2pTypes.NodeInfo `json:"node_info"`
IsOutbound bool `json:"is_outbound"`
ConnectionStatus p2p.ConnectionStatus `json:"connection_status"`
RemoteIP string `json:"remote_ip"`
diff --git a/tm2/pkg/bft/rpc/core/types/responses_test.go b/tm2/pkg/bft/rpc/core/types/responses_test.go
index 268a8d25c34..7d03addc546 100644
--- a/tm2/pkg/bft/rpc/core/types/responses_test.go
+++ b/tm2/pkg/bft/rpc/core/types/responses_test.go
@@ -3,9 +3,8 @@ package core_types
import (
"testing"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/stretchr/testify/assert"
-
- "github.com/gnolang/gno/tm2/pkg/p2p"
)
func TestStatusIndexer(t *testing.T) {
@@ -17,17 +16,17 @@ func TestStatusIndexer(t *testing.T) {
status = &ResultStatus{}
assert.False(t, status.TxIndexEnabled())
- status.NodeInfo = p2p.NodeInfo{}
+ status.NodeInfo = types.NodeInfo{}
assert.False(t, status.TxIndexEnabled())
cases := []struct {
expected bool
- other p2p.NodeInfoOther
+ other types.NodeInfoOther
}{
- {false, p2p.NodeInfoOther{}},
- {false, p2p.NodeInfoOther{TxIndex: "aa"}},
- {false, p2p.NodeInfoOther{TxIndex: "off"}},
- {true, p2p.NodeInfoOther{TxIndex: "on"}},
+ {false, types.NodeInfoOther{}},
+ {false, types.NodeInfoOther{TxIndex: "aa"}},
+ {false, types.NodeInfoOther{TxIndex: "off"}},
+ {true, types.NodeInfoOther{TxIndex: "on"}},
}
for _, tc := range cases {
diff --git a/tm2/pkg/bft/rpc/lib/client/http/client.go b/tm2/pkg/bft/rpc/lib/client/http/client.go
index aa4fc5c5392..288eca57300 100644
--- a/tm2/pkg/bft/rpc/lib/client/http/client.go
+++ b/tm2/pkg/bft/rpc/lib/client/http/client.go
@@ -166,14 +166,14 @@ func defaultHTTPClient(remoteAddr string) *http.Client {
Transport: &http.Transport{
// Set to true to prevent GZIP-bomb DoS attacks
DisableCompression: true,
- DialContext: func(_ context.Context, network, addr string) (net.Conn, error) {
- return makeHTTPDialer(remoteAddr)(network, addr)
+ DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
+ return makeHTTPDialer(remoteAddr)
},
},
}
}
-func makeHTTPDialer(remoteAddr string) func(string, string) (net.Conn, error) {
+func makeHTTPDialer(remoteAddr string) (net.Conn, error) {
protocol, address := parseRemoteAddr(remoteAddr)
// net.Dial doesn't understand http/https, so change it to TCP
@@ -182,9 +182,7 @@ func makeHTTPDialer(remoteAddr string) func(string, string) (net.Conn, error) {
protocol = protoTCP
}
- return func(proto, addr string) (net.Conn, error) {
- return net.Dial(protocol, address)
- }
+ return net.Dial(protocol, address)
}
// protocol - client's protocol (for example, "http", "https", "wss", "ws", "tcp")
diff --git a/tm2/pkg/bft/rpc/lib/client/http/client_test.go b/tm2/pkg/bft/rpc/lib/client/http/client_test.go
index 4ccbfdc2d1e..0d88ee32650 100644
--- a/tm2/pkg/bft/rpc/lib/client/http/client_test.go
+++ b/tm2/pkg/bft/rpc/lib/client/http/client_test.go
@@ -76,7 +76,7 @@ func TestClient_makeHTTPDialer(t *testing.T) {
t.Run("http", func(t *testing.T) {
t.Parallel()
- _, err := makeHTTPDialer("https://.")("hello", "world")
+ _, err := makeHTTPDialer("https://.")
require.Error(t, err)
assert.Contains(t, err.Error(), "dial tcp:", "should convert https to tcp")
@@ -85,7 +85,7 @@ func TestClient_makeHTTPDialer(t *testing.T) {
t.Run("udp", func(t *testing.T) {
t.Parallel()
- _, err := makeHTTPDialer("udp://.")("hello", "world")
+ _, err := makeHTTPDialer("udp://.")
require.Error(t, err)
assert.Contains(t, err.Error(), "dial udp:", "udp protocol should remain the same")
diff --git a/tm2/pkg/crypto/crypto.go b/tm2/pkg/crypto/crypto.go
index 7757b75354e..7908a082d3b 100644
--- a/tm2/pkg/crypto/crypto.go
+++ b/tm2/pkg/crypto/crypto.go
@@ -3,6 +3,7 @@ package crypto
import (
"bytes"
"encoding/json"
+ "errors"
"fmt"
"github.com/gnolang/gno/tm2/pkg/bech32"
@@ -128,6 +129,8 @@ func (addr *Address) DecodeString(str string) error {
// ----------------------------------------
// ID
+var ErrZeroID = errors.New("address ID is zero")
+
// The bech32 representation w/ bech32 prefix.
type ID string
@@ -141,16 +144,12 @@ func (id ID) String() string {
func (id ID) Validate() error {
if id.IsZero() {
- return fmt.Errorf("zero ID is invalid")
+ return ErrZeroID
}
+
var addr Address
- err := addr.DecodeID(id)
- return err
-}
-func AddressFromID(id ID) (addr Address, err error) {
- err = addr.DecodeString(string(id))
- return
+ return addr.DecodeID(id)
}
func (addr Address) ID() ID {
diff --git a/tm2/pkg/crypto/ed25519/ed25519.go b/tm2/pkg/crypto/ed25519/ed25519.go
index 8976994986c..f8b9529b788 100644
--- a/tm2/pkg/crypto/ed25519/ed25519.go
+++ b/tm2/pkg/crypto/ed25519/ed25519.go
@@ -68,11 +68,9 @@ func (privKey PrivKeyEd25519) PubKey() crypto.PubKey {
// Equals - you probably don't need to use this.
// Runs in constant time based on length of the keys.
func (privKey PrivKeyEd25519) Equals(other crypto.PrivKey) bool {
- if otherEd, ok := other.(PrivKeyEd25519); ok {
- return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1
- } else {
- return false
- }
+ otherEd, ok := other.(PrivKeyEd25519)
+
+ return ok && subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1
}
// GenPrivKey generates a new ed25519 private key.
diff --git a/tm2/pkg/internal/p2p/p2p.go b/tm2/pkg/internal/p2p/p2p.go
new file mode 100644
index 00000000000..1e650e0cd25
--- /dev/null
+++ b/tm2/pkg/internal/p2p/p2p.go
@@ -0,0 +1,255 @@
+// Package p2p contains testing code that is moved over, and adapted from p2p/test_utils.go.
+// This isn't a good way to simulate the networking layer in TM2 modules.
+// It actually isn't a good way to simulate the networking layer, in anything.
+//
+// Code is carried over to keep the testing code of p2p-dependent modules happy
+// and "working". We should delete this entire package the second TM2 module unit tests don't
+// need to rely on a live p2p cluster to pass.
+package p2p
+
+import (
+ "context"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "net"
+ "testing"
+ "time"
+
+ "github.com/gnolang/gno/tm2/pkg/log"
+ "github.com/gnolang/gno/tm2/pkg/p2p"
+ p2pcfg "github.com/gnolang/gno/tm2/pkg/p2p/config"
+ "github.com/gnolang/gno/tm2/pkg/p2p/conn"
+ "github.com/gnolang/gno/tm2/pkg/p2p/events"
+ p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types"
+ "github.com/gnolang/gno/tm2/pkg/service"
+ "github.com/gnolang/gno/tm2/pkg/versionset"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
+)
+
+// TestingConfig is the P2P cluster testing config
+type TestingConfig struct {
+ P2PCfg *p2pcfg.P2PConfig // the common p2p configuration
+ Count int // the size of the cluster
+ SwitchOptions map[int][]p2p.SwitchOption // multiplex switch options
+ Channels []byte // the common p2p peer multiplex channels
+}
+
+// MakeConnectedPeers creates a cluster of peers, with the given options.
+// Used to simulate the networking layer for a TM2 module
+func MakeConnectedPeers(
+ t *testing.T,
+ ctx context.Context,
+ cfg TestingConfig,
+) ([]*p2p.MultiplexSwitch, []*p2p.MultiplexTransport) {
+ t.Helper()
+
+ // Initialize collections for switches, transports, and addresses.
+ var (
+ sws = make([]*p2p.MultiplexSwitch, 0, cfg.Count)
+ ts = make([]*p2p.MultiplexTransport, 0, cfg.Count)
+ addrs = make([]*p2pTypes.NetAddress, 0, cfg.Count)
+ )
+
+ createTransport := func(index int) *p2p.MultiplexTransport {
+ // Generate a fresh key
+ key := p2pTypes.GenerateNodeKey()
+
+ addr, err := p2pTypes.NewNetAddress(
+ key.ID(),
+ &net.TCPAddr{
+ IP: net.ParseIP("127.0.0.1"),
+ Port: 0, // random free port
+ },
+ )
+ require.NoError(t, err)
+
+ info := p2pTypes.NodeInfo{
+ VersionSet: versionset.VersionSet{
+ versionset.VersionInfo{Name: "p2p", Version: "v0.0.0"},
+ },
+ PeerID: key.ID(),
+ Network: "testing",
+ Software: "p2ptest",
+ Version: "v1.2.3-rc.0-deadbeef",
+ Channels: cfg.Channels,
+ Moniker: fmt.Sprintf("node-%d", index),
+ Other: p2pTypes.NodeInfoOther{
+ TxIndex: "off",
+ RPCAddress: fmt.Sprintf("127.0.0.1:%d", 0),
+ },
+ }
+
+ transport := p2p.NewMultiplexTransport(
+ info,
+ *key,
+ conn.MConfigFromP2P(cfg.P2PCfg),
+ log.NewNoopLogger(),
+ )
+
+ require.NoError(t, transport.Listen(*addr))
+ t.Cleanup(func() { assert.NoError(t, transport.Close()) })
+
+ return transport
+ }
+
+ // Create transports and gather addresses
+ for i := 0; i < cfg.Count; i++ {
+ transport := createTransport(i)
+ addr := transport.NetAddress()
+
+ addrs = append(addrs, &addr)
+ ts = append(ts, transport)
+ }
+
+ // Connect switches and ensure all peers are connected
+ connectPeers := func(switchIndex int) error {
+ multiplexSwitch := p2p.NewMultiplexSwitch(
+ ts[switchIndex],
+ cfg.SwitchOptions[switchIndex]...,
+ )
+
+ ch, unsubFn := multiplexSwitch.Subscribe(func(event events.Event) bool {
+ return event.Type() == events.PeerConnected
+ })
+ defer unsubFn()
+
+ // Start the switch
+ require.NoError(t, multiplexSwitch.Start())
+
+ // Save it
+ sws = append(sws, multiplexSwitch)
+
+ if cfg.Count == 1 {
+ // No peers to dial, switch is alone
+ return nil
+ }
+
+ // Async dial the other peers
+ multiplexSwitch.DialPeers(addrs...)
+
+ // Set up an exit timer
+ timer := time.NewTimer(1 * time.Minute)
+ defer timer.Stop()
+
+ var (
+ connectedPeers = make(map[p2pTypes.ID]struct{})
+ targetPeers = cfg.Count - 1
+ )
+
+ for {
+ select {
+ case evRaw := <-ch:
+ ev := evRaw.(events.PeerConnectedEvent)
+
+ connectedPeers[ev.PeerID] = struct{}{}
+
+ if len(connectedPeers) == targetPeers {
+ return nil
+ }
+ case <-timer.C:
+ return errors.New("timed out waiting for peer switches to connect")
+ }
+ }
+ }
+
+ g, _ := errgroup.WithContext(ctx)
+ for i := 0; i < cfg.Count; i++ {
+ g.Go(func() error { return connectPeers(i) })
+ }
+
+ require.NoError(t, g.Wait())
+
+ return sws, ts
+}
+
+// createRoutableAddr generates a valid, routable NetAddress for the given node ID using a secure random IP
+func createRoutableAddr(t *testing.T, id p2pTypes.ID) *p2pTypes.NetAddress {
+ t.Helper()
+
+ generateIP := func() string {
+ ip := make([]byte, 4)
+
+ _, err := rand.Read(ip)
+ require.NoError(t, err)
+
+ return fmt.Sprintf("%d.%d.%d.%d", ip[0], ip[1], ip[2], ip[3])
+ }
+
+ for {
+ addrStr := fmt.Sprintf("%s@%s:26656", id, generateIP())
+
+ netAddr, err := p2pTypes.NewNetAddressFromString(addrStr)
+ require.NoError(t, err)
+
+ if netAddr.Routable() {
+ return netAddr
+ }
+ }
+}
+
+// Peer is a live peer, utilized for testing purposes.
+// This Peer implementation is NOT thread safe
+type Peer struct {
+ *service.BaseService
+ ip net.IP
+ id p2pTypes.ID
+ addr *p2pTypes.NetAddress
+ kv map[string]any
+
+ Outbound, Persistent, Private bool
+}
+
+// NewPeer creates and starts a new mock peer.
+// It generates a new routable address for the peer
+func NewPeer(t *testing.T) *Peer {
+ t.Helper()
+
+ var (
+ nodeKey = p2pTypes.GenerateNodeKey()
+ netAddr = createRoutableAddr(t, nodeKey.ID())
+ )
+
+ mp := &Peer{
+ ip: netAddr.IP,
+ id: nodeKey.ID(),
+ addr: netAddr,
+ kv: make(map[string]any),
+ }
+
+ mp.BaseService = service.NewBaseService(nil, "MockPeer", mp)
+
+ require.NoError(t, mp.Start())
+
+ return mp
+}
+
+func (mp *Peer) FlushStop() { mp.Stop() }
+func (mp *Peer) TrySend(_ byte, _ []byte) bool { return true }
+func (mp *Peer) Send(_ byte, _ []byte) bool { return true }
+func (mp *Peer) NodeInfo() p2pTypes.NodeInfo {
+ return p2pTypes.NodeInfo{
+ PeerID: mp.id,
+ }
+}
+func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
+func (mp *Peer) ID() p2pTypes.ID { return mp.id }
+func (mp *Peer) IsOutbound() bool { return mp.Outbound }
+func (mp *Peer) IsPersistent() bool { return mp.Persistent }
+func (mp *Peer) IsPrivate() bool { return mp.Private }
+func (mp *Peer) Get(key string) interface{} {
+ if value, ok := mp.kv[key]; ok {
+ return value
+ }
+ return nil
+}
+
+func (mp *Peer) Set(key string, value interface{}) {
+ mp.kv[key] = value
+}
+func (mp *Peer) RemoteIP() net.IP { return mp.ip }
+func (mp *Peer) SocketAddr() *p2pTypes.NetAddress { return mp.addr }
+func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} }
+func (mp *Peer) CloseConn() error { return nil }
diff --git a/tm2/pkg/overflow/README.md b/tm2/pkg/overflow/README.md
index 55a9ba4c327..26ba7dc9985 100644
--- a/tm2/pkg/overflow/README.md
+++ b/tm2/pkg/overflow/README.md
@@ -2,26 +2,25 @@
Check for int/int8/int16/int64/int32 integer overflow in Golang arithmetic.
-Forked from https://github.com/JohnCGriffin/overflow
+Originally forked from https://github.com/JohnCGriffin/overflow.
### Install
-```
-go get github.com/johncgriffin/overflow
-```
-Note that because Go has no template types, the majority of repetitive code is
-generated by overflow_template.sh. If you have to change an
-algorithm, change it there and regenerate the Go code via:
-```
+
+The majority of repetitive code is generated by overflow_template.sh. If you
+have to change an algorithm, change it there and regenerate the Go code via:
+
+```sh
go generate
```
+
### Synopsis
-```
+```go
package main
import "fmt"
import "math"
-import "github.com/JohnCGriffin/overflow"
+import "github.com/gnolang/gno/tm2/pkg/overflow"
func main() {
@@ -29,38 +28,33 @@ func main() {
for i := 0; i < 10; i++ {
sum, ok := overflow.Add(addend, i)
- fmt.Printf("%v+%v -> (%v,%v)\n",
+ fmt.Printf("%v+%v -> (%v, %v)\n",
addend, i, sum, ok)
}
}
```
+
yields the output
-```
-9223372036854775802+0 -> (9223372036854775802,true)
-9223372036854775802+1 -> (9223372036854775803,true)
-9223372036854775802+2 -> (9223372036854775804,true)
-9223372036854775802+3 -> (9223372036854775805,true)
-9223372036854775802+4 -> (9223372036854775806,true)
-9223372036854775802+5 -> (9223372036854775807,true)
-9223372036854775802+6 -> (0,false)
-9223372036854775802+7 -> (0,false)
-9223372036854775802+8 -> (0,false)
-9223372036854775802+9 -> (0,false)
+
+```console
+9223372036854775802+0 -> (9223372036854775802, true)
+9223372036854775802+1 -> (9223372036854775803, true)
+9223372036854775802+2 -> (9223372036854775804, true)
+9223372036854775802+3 -> (9223372036854775805, true)
+9223372036854775802+4 -> (9223372036854775806, true)
+9223372036854775802+5 -> (9223372036854775807, true)
+9223372036854775802+6 -> (0, false)
+9223372036854775802+7 -> (0, false)
+9223372036854775802+8 -> (0, false)
+9223372036854775802+9 -> (0, false)
```
For int, int64, and int32 types, provide Add, Add32, Add64, Sub, Sub32, Sub64, etc.
-Unsigned types not covered at the moment, but such additions are welcome.
### Stay calm and panic
-There's a good case to be made that a panic is an unidiomatic but proper response. Iff you
-believe that there's no valid way to continue your program after math goes wayward, you can
-use the easier Addp, Mulp, Subp, and Divp versions which return the normal result or panic.
-
-
-
-
-
-
-
+There's a good case to be made that a panic is an unidiomatic but proper
+response. If you believe that there's no valid way to continue your program
+after math goes wayward, you can use the easier Addp, Mulp, Subp, and Divp
+versions which return the normal result or panic.
diff --git a/tm2/pkg/overflow/overflow_impl.go b/tm2/pkg/overflow/overflow_impl.go
index a9a90c43835..0f057f65387 100644
--- a/tm2/pkg/overflow/overflow_impl.go
+++ b/tm2/pkg/overflow/overflow_impl.go
@@ -1,10 +1,8 @@
package overflow
-// This is generated code, created by overflow_template.sh executed
-// by "go generate"
+// Code generated by overflow_template.sh from 'go generate'. DO NOT EDIT.
-// Add8 performs + operation on two int8 operands
-// returning a result and status
+// Add8 performs + operation on two int8 operands, returning a result and status.
func Add8(a, b int8) (int8, bool) {
c := a + b
if (c > a) == (b > 0) {
@@ -13,7 +11,7 @@ func Add8(a, b int8) (int8, bool) {
return c, false
}
-// Add8p is the unchecked panicing version of Add8
+// Add8p is the unchecked panicing version of Add8.
func Add8p(a, b int8) int8 {
r, ok := Add8(a, b)
if !ok {
@@ -22,8 +20,7 @@ func Add8p(a, b int8) int8 {
return r
}
-// Sub8 performs - operation on two int8 operands
-// returning a result and status
+// Sub8 performs - operation on two int8 operands, returning a result and status.
func Sub8(a, b int8) (int8, bool) {
c := a - b
if (c < a) == (b > 0) {
@@ -32,7 +29,7 @@ func Sub8(a, b int8) (int8, bool) {
return c, false
}
-// Sub8p is the unchecked panicing version of Sub8
+// Sub8p is the unchecked panicing version of Sub8.
func Sub8p(a, b int8) int8 {
r, ok := Sub8(a, b)
if !ok {
@@ -41,8 +38,7 @@ func Sub8p(a, b int8) int8 {
return r
}
-// Mul8 performs * operation on two int8 operands
-// returning a result and status
+// Mul8 performs * operation on two int8 operands returning a result and status.
func Mul8(a, b int8) (int8, bool) {
if a == 0 || b == 0 {
return 0, true
@@ -56,7 +52,7 @@ func Mul8(a, b int8) (int8, bool) {
return c, false
}
-// Mul8p is the unchecked panicing version of Mul8
+// Mul8p is the unchecked panicing version of Mul8.
func Mul8p(a, b int8) int8 {
r, ok := Mul8(a, b)
if !ok {
@@ -65,14 +61,13 @@ func Mul8p(a, b int8) int8 {
return r
}
-// Div8 performs / operation on two int8 operands
-// returning a result and status
+// Div8 performs / operation on two int8 operands, returning a result and status.
func Div8(a, b int8) (int8, bool) {
q, _, ok := Quotient8(a, b)
return q, ok
}
-// Div8p is the unchecked panicing version of Div8
+// Div8p is the unchecked panicing version of Div8.
func Div8p(a, b int8) int8 {
r, ok := Div8(a, b)
if !ok {
@@ -81,19 +76,19 @@ func Div8p(a, b int8) int8 {
return r
}
-// Quotient8 performs + operation on two int8 operands
-// returning a quotient, a remainder and status
+// Quotient8 performs / operation on two int8 operands, returning a quotient,
+// a remainder and status.
func Quotient8(a, b int8) (int8, int8, bool) {
if b == 0 {
return 0, 0, false
}
c := a / b
- status := (c < 0) == ((a < 0) != (b < 0))
- return c, a % b, status
+ status := (c < 0) == ((a < 0) != (b < 0)) || (c == 0) // no sign check for 0 quotient
+ return c, a%b, status
}
-// Add16 performs + operation on two int16 operands
-// returning a result and status
+
+// Add16 performs + operation on two int16 operands, returning a result and status.
func Add16(a, b int16) (int16, bool) {
c := a + b
if (c > a) == (b > 0) {
@@ -102,7 +97,7 @@ func Add16(a, b int16) (int16, bool) {
return c, false
}
-// Add16p is the unchecked panicing version of Add16
+// Add16p is the unchecked panicing version of Add16.
func Add16p(a, b int16) int16 {
r, ok := Add16(a, b)
if !ok {
@@ -111,8 +106,7 @@ func Add16p(a, b int16) int16 {
return r
}
-// Sub16 performs - operation on two int16 operands
-// returning a result and status
+// Sub16 performs - operation on two int16 operands, returning a result and status.
func Sub16(a, b int16) (int16, bool) {
c := a - b
if (c < a) == (b > 0) {
@@ -121,7 +115,7 @@ func Sub16(a, b int16) (int16, bool) {
return c, false
}
-// Sub16p is the unchecked panicing version of Sub16
+// Sub16p is the unchecked panicing version of Sub16.
func Sub16p(a, b int16) int16 {
r, ok := Sub16(a, b)
if !ok {
@@ -130,8 +124,7 @@ func Sub16p(a, b int16) int16 {
return r
}
-// Mul16 performs * operation on two int16 operands
-// returning a result and status
+// Mul16 performs * operation on two int16 operands returning a result and status.
func Mul16(a, b int16) (int16, bool) {
if a == 0 || b == 0 {
return 0, true
@@ -145,7 +138,7 @@ func Mul16(a, b int16) (int16, bool) {
return c, false
}
-// Mul16p is the unchecked panicing version of Mul16
+// Mul16p is the unchecked panicing version of Mul16.
func Mul16p(a, b int16) int16 {
r, ok := Mul16(a, b)
if !ok {
@@ -154,14 +147,13 @@ func Mul16p(a, b int16) int16 {
return r
}
-// Div16 performs / operation on two int16 operands
-// returning a result and status
+// Div16 performs / operation on two int16 operands, returning a result and status.
func Div16(a, b int16) (int16, bool) {
q, _, ok := Quotient16(a, b)
return q, ok
}
-// Div16p is the unchecked panicing version of Div16
+// Div16p is the unchecked panicing version of Div16.
func Div16p(a, b int16) int16 {
r, ok := Div16(a, b)
if !ok {
@@ -170,19 +162,19 @@ func Div16p(a, b int16) int16 {
return r
}
-// Quotient16 performs + operation on two int16 operands
-// returning a quotient, a remainder and status
+// Quotient16 performs / operation on two int16 operands, returning a quotient,
+// a remainder and status.
func Quotient16(a, b int16) (int16, int16, bool) {
if b == 0 {
return 0, 0, false
}
c := a / b
- status := (c < 0) == ((a < 0) != (b < 0))
- return c, a % b, status
+ status := (c < 0) == ((a < 0) != (b < 0)) || (c == 0) // no sign check for 0 quotient
+ return c, a%b, status
}
-// Add32 performs + operation on two int32 operands
-// returning a result and status
+
+// Add32 performs + operation on two int32 operands, returning a result and status.
func Add32(a, b int32) (int32, bool) {
c := a + b
if (c > a) == (b > 0) {
@@ -191,7 +183,7 @@ func Add32(a, b int32) (int32, bool) {
return c, false
}
-// Add32p is the unchecked panicing version of Add32
+// Add32p is the unchecked panicing version of Add32.
func Add32p(a, b int32) int32 {
r, ok := Add32(a, b)
if !ok {
@@ -200,8 +192,7 @@ func Add32p(a, b int32) int32 {
return r
}
-// Sub32 performs - operation on two int32 operands
-// returning a result and status
+// Sub32 performs - operation on two int32 operands, returning a result and status.
func Sub32(a, b int32) (int32, bool) {
c := a - b
if (c < a) == (b > 0) {
@@ -210,7 +201,7 @@ func Sub32(a, b int32) (int32, bool) {
return c, false
}
-// Sub32p is the unchecked panicing version of Sub32
+// Sub32p is the unchecked panicing version of Sub32.
func Sub32p(a, b int32) int32 {
r, ok := Sub32(a, b)
if !ok {
@@ -219,8 +210,7 @@ func Sub32p(a, b int32) int32 {
return r
}
-// Mul32 performs * operation on two int32 operands
-// returning a result and status
+// Mul32 performs * operation on two int32 operands returning a result and status.
func Mul32(a, b int32) (int32, bool) {
if a == 0 || b == 0 {
return 0, true
@@ -234,7 +224,7 @@ func Mul32(a, b int32) (int32, bool) {
return c, false
}
-// Mul32p is the unchecked panicing version of Mul32
+// Mul32p is the unchecked panicing version of Mul32.
func Mul32p(a, b int32) int32 {
r, ok := Mul32(a, b)
if !ok {
@@ -243,14 +233,13 @@ func Mul32p(a, b int32) int32 {
return r
}
-// Div32 performs / operation on two int32 operands
-// returning a result and status
+// Div32 performs / operation on two int32 operands, returning a result and status.
func Div32(a, b int32) (int32, bool) {
q, _, ok := Quotient32(a, b)
return q, ok
}
-// Div32p is the unchecked panicing version of Div32
+// Div32p is the unchecked panicing version of Div32.
func Div32p(a, b int32) int32 {
r, ok := Div32(a, b)
if !ok {
@@ -259,19 +248,19 @@ func Div32p(a, b int32) int32 {
return r
}
-// Quotient32 performs + operation on two int32 operands
-// returning a quotient, a remainder and status
+// Quotient32 performs / operation on two int32 operands, returning a quotient,
+// a remainder and status.
func Quotient32(a, b int32) (int32, int32, bool) {
if b == 0 {
return 0, 0, false
}
c := a / b
- status := (c < 0) == ((a < 0) != (b < 0))
- return c, a % b, status
+ status := (c < 0) == ((a < 0) != (b < 0)) || (c == 0) // no sign check for 0 quotient
+ return c, a%b, status
}
-// Add64 performs + operation on two int64 operands
-// returning a result and status
+
+// Add64 performs + operation on two int64 operands, returning a result and status.
func Add64(a, b int64) (int64, bool) {
c := a + b
if (c > a) == (b > 0) {
@@ -280,7 +269,7 @@ func Add64(a, b int64) (int64, bool) {
return c, false
}
-// Add64p is the unchecked panicing version of Add64
+// Add64p is the unchecked panicing version of Add64.
func Add64p(a, b int64) int64 {
r, ok := Add64(a, b)
if !ok {
@@ -289,8 +278,7 @@ func Add64p(a, b int64) int64 {
return r
}
-// Sub64 performs - operation on two int64 operands
-// returning a result and status
+// Sub64 performs - operation on two int64 operands, returning a result and status.
func Sub64(a, b int64) (int64, bool) {
c := a - b
if (c < a) == (b > 0) {
@@ -299,7 +287,7 @@ func Sub64(a, b int64) (int64, bool) {
return c, false
}
-// Sub64p is the unchecked panicing version of Sub64
+// Sub64p is the unchecked panicing version of Sub64.
func Sub64p(a, b int64) int64 {
r, ok := Sub64(a, b)
if !ok {
@@ -308,8 +296,7 @@ func Sub64p(a, b int64) int64 {
return r
}
-// Mul64 performs * operation on two int64 operands
-// returning a result and status
+// Mul64 performs * operation on two int64 operands returning a result and status.
func Mul64(a, b int64) (int64, bool) {
if a == 0 || b == 0 {
return 0, true
@@ -323,7 +310,7 @@ func Mul64(a, b int64) (int64, bool) {
return c, false
}
-// Mul64p is the unchecked panicing version of Mul64
+// Mul64p is the unchecked panicing version of Mul64.
func Mul64p(a, b int64) int64 {
r, ok := Mul64(a, b)
if !ok {
@@ -332,14 +319,13 @@ func Mul64p(a, b int64) int64 {
return r
}
-// Div64 performs / operation on two int64 operands
-// returning a result and status
+// Div64 performs / operation on two int64 operands, returning a result and status.
func Div64(a, b int64) (int64, bool) {
q, _, ok := Quotient64(a, b)
return q, ok
}
-// Div64p is the unchecked panicing version of Div64
+// Div64p is the unchecked panicing version of Div64.
func Div64p(a, b int64) int64 {
r, ok := Div64(a, b)
if !ok {
@@ -348,13 +334,14 @@ func Div64p(a, b int64) int64 {
return r
}
-// Quotient64 performs + operation on two int64 operands
-// returning a quotient, a remainder and status
+// Quotient64 performs / operation on two int64 operands, returning a quotient,
+// a remainder and status.
func Quotient64(a, b int64) (int64, int64, bool) {
if b == 0 {
return 0, 0, false
}
c := a / b
- status := (c < 0) == ((a < 0) != (b < 0))
- return c, a % b, status
+ status := (c < 0) == ((a < 0) != (b < 0)) || (c == 0) // no sign check for 0 quotient
+ return c, a%b, status
}
+
diff --git a/tm2/pkg/overflow/overflow_template.sh b/tm2/pkg/overflow/overflow_template.sh
index a2a85f2c581..0cc3c9595bf 100755
--- a/tm2/pkg/overflow/overflow_template.sh
+++ b/tm2/pkg/overflow/overflow_template.sh
@@ -4,109 +4,94 @@ exec > overflow_impl.go
echo "package overflow
-// This is generated code, created by overflow_template.sh executed
-// by \"go generate\"
-
-"
-
+// Code generated by overflow_template.sh from 'go generate'. DO NOT EDIT."
for SIZE in 8 16 32 64
do
-echo "
-
-// Add${SIZE} performs + operation on two int${SIZE} operands
-// returning a result and status
+ echo "
+// Add${SIZE} performs + operation on two int${SIZE} operands, returning a result and status.
func Add${SIZE}(a, b int${SIZE}) (int${SIZE}, bool) {
- c := a + b
- if (c > a) == (b > 0) {
- return c, true
- }
- return c, false
+ c := a + b
+ if (c > a) == (b > 0) {
+ return c, true
+ }
+ return c, false
}
-// Add${SIZE}p is the unchecked panicing version of Add${SIZE}
+// Add${SIZE}p is the unchecked panicing version of Add${SIZE}.
func Add${SIZE}p(a, b int${SIZE}) int${SIZE} {
- r, ok := Add${SIZE}(a, b)
- if !ok {
- panic(\"addition overflow\")
- }
- return r
+ r, ok := Add${SIZE}(a, b)
+ if !ok {
+ panic(\"addition overflow\")
+ }
+ return r
}
-
-// Sub${SIZE} performs - operation on two int${SIZE} operands
-// returning a result and status
+// Sub${SIZE} performs - operation on two int${SIZE} operands, returning a result and status.
func Sub${SIZE}(a, b int${SIZE}) (int${SIZE}, bool) {
- c := a - b
- if (c < a) == (b > 0) {
- return c, true
- }
- return c, false
+ c := a - b
+ if (c < a) == (b > 0) {
+ return c, true
+ }
+ return c, false
}
-// Sub${SIZE}p is the unchecked panicing version of Sub${SIZE}
+// Sub${SIZE}p is the unchecked panicing version of Sub${SIZE}.
func Sub${SIZE}p(a, b int${SIZE}) int${SIZE} {
- r, ok := Sub${SIZE}(a, b)
- if !ok {
- panic(\"subtraction overflow\")
- }
- return r
+ r, ok := Sub${SIZE}(a, b)
+ if !ok {
+ panic(\"subtraction overflow\")
+ }
+ return r
}
-
-// Mul${SIZE} performs * operation on two int${SIZE} operands
-// returning a result and status
+// Mul${SIZE} performs * operation on two int${SIZE} operands returning a result and status.
func Mul${SIZE}(a, b int${SIZE}) (int${SIZE}, bool) {
- if a == 0 || b == 0 {
- return 0, true
- }
- c := a * b
- if (c < 0) == ((a < 0) != (b < 0)) {
- if c/b == a {
- return c, true
- }
- }
- return c, false
+ if a == 0 || b == 0 {
+ return 0, true
+ }
+ c := a * b
+ if (c < 0) == ((a < 0) != (b < 0)) {
+ if c/b == a {
+ return c, true
+ }
+ }
+ return c, false
}
-// Mul${SIZE}p is the unchecked panicing version of Mul${SIZE}
+// Mul${SIZE}p is the unchecked panicing version of Mul${SIZE}.
func Mul${SIZE}p(a, b int${SIZE}) int${SIZE} {
- r, ok := Mul${SIZE}(a, b)
- if !ok {
- panic(\"multiplication overflow\")
- }
- return r
+ r, ok := Mul${SIZE}(a, b)
+ if !ok {
+ panic(\"multiplication overflow\")
+ }
+ return r
}
-
-
-// Div${SIZE} performs / operation on two int${SIZE} operands
-// returning a result and status
+// Div${SIZE} performs / operation on two int${SIZE} operands, returning a result and status.
func Div${SIZE}(a, b int${SIZE}) (int${SIZE}, bool) {
- q, _, ok := Quotient${SIZE}(a, b)
- return q, ok
+ q, _, ok := Quotient${SIZE}(a, b)
+ return q, ok
}
-// Div${SIZE}p is the unchecked panicing version of Div${SIZE}
+// Div${SIZE}p is the unchecked panicing version of Div${SIZE}.
func Div${SIZE}p(a, b int${SIZE}) int${SIZE} {
- r, ok := Div${SIZE}(a, b)
- if !ok {
- panic(\"division failure\")
- }
- return r
+ r, ok := Div${SIZE}(a, b)
+ if !ok {
+ panic(\"division failure\")
+ }
+ return r
}
-// Quotient${SIZE} performs + operation on two int${SIZE} operands
-// returning a quotient, a remainder and status
+// Quotient${SIZE} performs / operation on two int${SIZE} operands, returning a quotient,
+// a remainder and status.
func Quotient${SIZE}(a, b int${SIZE}) (int${SIZE}, int${SIZE}, bool) {
- if b == 0 {
- return 0, 0, false
- }
- c := a / b
- status := (c < 0) == ((a < 0) != (b < 0))
- return c, a % b, status
+ if b == 0 {
+ return 0, 0, false
+ }
+ c := a / b
+ status := (c < 0) == ((a < 0) != (b < 0)) || (c == 0) // no sign check for 0 quotient
+ return c, a%b, status
}
"
done
-
-go run -modfile ../../../misc/devdeps/go.mod mvdan.cc/gofumpt -w overflow_impl.go
diff --git a/tm2/pkg/overflow/overflow_test.go b/tm2/pkg/overflow/overflow_test.go
index 2b2d345b55d..e6327c9e862 100644
--- a/tm2/pkg/overflow/overflow_test.go
+++ b/tm2/pkg/overflow/overflow_test.go
@@ -28,8 +28,7 @@ func TestAlgorithms(t *testing.T) {
// now the verification
result, ok := Add8(a8, b8)
if ok && int64(result) != r64 {
- t.Errorf("failed to fail on %v + %v = %v instead of %v\n",
- a8, b8, result, r64)
+ t.Errorf("failed to fail on %v + %v = %v instead of %v\n", a8, b8, result, r64)
errors++
}
if !ok && int64(result) == r64 {
@@ -45,8 +44,7 @@ func TestAlgorithms(t *testing.T) {
// now the verification
result, ok := Sub8(a8, b8)
if ok && int64(result) != r64 {
- t.Errorf("failed to fail on %v - %v = %v instead of %v\n",
- a8, b8, result, r64)
+ t.Errorf("failed to fail on %v - %v = %v instead of %v\n", a8, b8, result, r64)
}
if !ok && int64(result) == r64 {
t.Fail()
@@ -61,8 +59,7 @@ func TestAlgorithms(t *testing.T) {
// now the verification
result, ok := Mul8(a8, b8)
if ok && int64(result) != r64 {
- t.Errorf("failed to fail on %v * %v = %v instead of %v\n",
- a8, b8, result, r64)
+ t.Errorf("failed to fail on %v * %v = %v instead of %v\n", a8, b8, result, r64)
errors++
}
if !ok && int64(result) == r64 {
@@ -78,11 +75,10 @@ func TestAlgorithms(t *testing.T) {
// now the verification
result, _, ok := Quotient8(a8, b8)
if ok && int64(result) != r64 {
- t.Errorf("failed to fail on %v / %v = %v instead of %v\n",
- a8, b8, result, r64)
+ t.Errorf("failed to fail on %v / %v = %v instead of %v\n", a8, b8, result, r64)
errors++
}
- if !ok && result != 0 && int64(result) == r64 {
+ if !ok && int64(result) == r64 {
t.Fail()
errors++
}
diff --git a/tm2/pkg/p2p/README.md b/tm2/pkg/p2p/README.md
index 81888403e1c..f64fafb53e0 100644
--- a/tm2/pkg/p2p/README.md
+++ b/tm2/pkg/p2p/README.md
@@ -1,4 +1,604 @@
-# p2p
+## Overview
-The p2p package provides an abstraction around peer-to-peer communication.
+The `p2p` package, and its “sub-packages” contain the required building blocks for Tendermint2’s networking layer.
+This document aims to explain the `p2p` terminology, and better document the way the `p2p` module works within the TM2
+ecosystem, especially in relation to other modules like `consensus`, `blockchain` and `mempool`.
+
+## Common Types
+
+To fully understand the `Concepts` section of the `p2p` documentation, there must be at least a basic understanding of
+the terminology of the `p2p` module, because there are types that keep popping up constantly, and it’s worth
+understanding what they’re about.
+
+### `NetAddress`
+
+```go
+package types
+
+// NetAddress defines information about a peer on the network
+// including its ID, IP address, and port
+type NetAddress struct {
+ ID ID `json:"id"` // unique peer identifier (public key address)
+ IP net.IP `json:"ip"` // the IP part of the dial address
+ Port uint16 `json:"port"` // the port part of the dial address
+}
+```
+
+A `NetAddress` is simply a wrapper for a unique peer in the network.
+
+This address consists of several parts:
+
+- the peer’s ID, derived from the peer’s public key (it’s the address).
+- the peer’s dial address, used for executing TCP dials.
+
+### `ID`
+
+```go
+// ID represents the cryptographically unique Peer ID
+type ID = crypto.ID
+```
+
+The peer ID is the unique peer identifier. It is used for unambiguously resolving who a peer is, during communication.
+
+The reason the peer ID is utilized is because it is derived from the peer’s public key, used to encrypt communication,
+and it needs to match the public key used in p2p communication. It can, and should be, considered unique.
+
+### `Reactor`
+
+Without going too much into detail in the terminology section, as a much more detailed explanation is discussed below:
+
+A `Reactor` is an abstraction of a Tendermint2 module, that needs to utilize the `p2p` layer.
+
+Currently active reactors in TM2, that utilize the p2p layer:
+
+- the consensus reactor, that handles consensus message passing
+- the blockchain reactor, that handles block syncing
+- the mempool reactor, that handles transaction gossiping
+
+All of these functionalities require a live p2p network to work, and `Reactor`s are the answer for how they can be aware
+of things happening in the network (like new peers joining, for example).
+
+## Concepts
+
+### Peer
+
+`Peer` is an abstraction over a p2p connection that is:
+
+- **verified**, meaning it went through the handshaking process and the information the other peer shared checked out (
+ this process is discussed in detail later).
+- **multiplexed over TCP** (the only kind of p2p connections TM2 supports).
+
+```go
+package p2p
+
+// Peer is a wrapper for a connected peer
+type Peer interface {
+ service.Service
+
+ FlushStop()
+
+ ID() types.ID // peer's cryptographic ID
+ RemoteIP() net.IP // remote IP of the connection
+ RemoteAddr() net.Addr // remote address of the connection
+
+ IsOutbound() bool // did we dial the peer
+ IsPersistent() bool // do we redial this peer when we disconnect
+ IsPrivate() bool // do we share the peer
+
+ CloseConn() error // close original connection
+
+ NodeInfo() types.NodeInfo // peer's info
+ Status() ConnectionStatus
+ SocketAddr() *types.NetAddress // actual address of the socket
+
+ Send(byte, []byte) bool
+ TrySend(byte, []byte) bool
+
+ Set(string, any)
+ Get(string) any
+}
+```
+
+There are more than a few things to break down here, so let’s tackle them individually.
+
+The `Peer` abstraction holds callbacks relating to information about the actual live peer connection, such as what kind
+of direction it is, what is the connection status, and others.
+
+```go
+package p2p
+
+type Peer interface {
+ // ...
+
+ ID() types.ID // peer's cryptographic ID
+ RemoteIP() net.IP // remote IP of the connection
+ RemoteAddr() net.Addr // remote address of the connection
+
+ NodeInfo() types.NodeInfo // peer's info
+ Status() ConnectionStatus
+ SocketAddr() *types.NetAddress // actual address of the socket
+
+ IsOutbound() bool // did we dial the peer
+ IsPersistent() bool // do we redial this peer when we disconnect
+ IsPrivate() bool // do we share the peer
+
+ // ...
+}
+
+```
+
+However, there is part of the `Peer` abstraction that outlines the flipped design of the entire `p2p` module, and a
+severe limitation of this implementation.
+
+```go
+package p2p
+
+type Peer interface {
+ // ...
+
+ Send(byte, []byte) bool
+ TrySend(byte, []byte) bool
+
+ // ...
+}
+```
+
+The `Peer` abstraction is used internally in `p2p`, but also by other modules that need to interact with the networking
+layer — this is in itself the biggest crux of the current `p2p` implementation: modules *need to understand* how to use
+and communicate with peers, regardless of the protocol logic. Networking is not an abstraction for the modules, but a
+spec requirement. What this essentially means is there is heavy implementation leaking to parts of the TM2 codebase that
+shouldn’t need to know how to handle individual peer broadcasts, or how to trigger custom protocol communication (like
+syncing for example).
+If `module A` wants to broadcast something to the peer network of the node, it needs to do something like this:
+
+```go
+package main
+
+func main() {
+ // ...
+
+ peers := sw.Peers().List() // fetch the peer list
+
+ for _, p := range peers {
+ p.Send(...) // directly message the peer (imitate broadcast)
+ }
+
+ // ...
+}
+```
+
+An additional odd choice in the `Peer` API is the ability to use the peer as a KV store:
+
+```go
+package p2p
+
+type Peer interface {
+ // ...
+
+ Set(string, any)
+ Get(string) any
+
+ // ...
+}
+```
+
+For example, these methods are used within the `consensus` and `mempool` modules to keep track of active peer states (
+like current HRS data, or current peer mempool metadata). Instead of the module handling individual peer state, this
+responsibility is shifted to the peer implementation, causing an odd code dependency situation.
+
+The root of this “flipped” design (modules needing to understand how to interact with peers) stems from the fact that
+peers are instantiated with a multiplex TCP connection under the hood, and basically just wrap that connection. The
+`Peer` API is an abstraction for the multiplexed TCP connection, under the hood.
+
+Changing this dependency stew would require a much larger rewrite of not just the `p2p` module, but other modules (
+`consensus`, `blockchain`, `mempool`) as well, and is as such left as-is.
+
+### Switch
+
+In short, a `Switch` is just the middleware layer that handles module <> `Transport` requests, and manages peers on a
+high application level (that the `Transport` doesn’t concern itself with).
+
+The `Switch` is the entity that manages active peer connections.
+
+```go
+package p2p
+
+// Switch is the abstraction in the p2p module that handles
+// and manages peer connections thorough a Transport
+type Switch interface {
+ // Broadcast publishes data on the given channel, to all peers
+ Broadcast(chID byte, data []byte)
+
+ // Peers returns the latest peer set
+ Peers() PeerSet
+
+ // Subscribe subscribes to active switch events
+ Subscribe(filterFn events.EventFilter) (<-chan events.Event, func())
+
+ // StopPeerForError stops the peer with the given reason
+ StopPeerForError(peer Peer, err error)
+
+ // DialPeers marks the given peers as ready for async dialing
+ DialPeers(peerAddrs ...*types.NetAddress)
+}
+
+```
+
+The API of the `Switch` is relatively straightforward. Users of the `Switch` instantiate it with a `Transport`, and
+utilize it as-is.
+
+The API of the `Switch` is geared towards asynchronicity, and as such users of the `Switch` need to adapt to some
+limitations, such as not having synchronous dials, or synchronous broadcasts.
+
+#### Services
+
+There are 3 services that run on top of the `MultiplexSwitch`, upon startup:
+
+- **the accept service**
+- **the dial service**
+- **the redial service**
+
+```go
+package p2p
+
+// OnStart implements BaseService. It starts all the reactors and peers.
+func (sw *MultiplexSwitch) OnStart() error {
+ // Start reactors
+ for _, reactor := range sw.reactors {
+ if err := reactor.Start(); err != nil {
+ return fmt.Errorf("unable to start reactor %w", err)
+ }
+ }
+
+ // Run the peer accept routine.
+ // The accept routine asynchronously accepts
+ // and processes incoming peer connections
+ go sw.runAcceptLoop(sw.ctx)
+
+ // Run the dial routine.
+ // The dial routine parses items in the dial queue
+ // and initiates outbound peer connections
+ go sw.runDialLoop(sw.ctx)
+
+ // Run the redial routine.
+ // The redial routine monitors for important
+ // peer disconnects, and attempts to reconnect
+ // to them
+ go sw.runRedialLoop(sw.ctx)
+
+ return nil
+}
+```
+
+##### Accept Service
+
+The `MultiplexSwitch` needs to actively listen for incoming connections, and handle them accordingly. These situations
+occur when a peer *Dials* (more on this later) another peer, and wants to establish a connection. This connection is
+outbound for one peer, and inbound for the other.
+
+Depending on what kind of security policies or configuration the peer has in place, the connection can be accepted, or
+rejected for a number of reasons:
+
+- the maximum number of inbound peers is reached
+- the multiplex connection fails upon startup (rare)
+
+The `Switch` relies on the `Transport` to return a **verified and valid** peer connection. After the `Transport`
+delivers, the `Switch` makes sure having the peer makes sense, given the p2p configuration of the node.
+
+```go
+package p2p
+
+func (sw *MultiplexSwitch) runAcceptLoop(ctx context.Context) {
+ // ...
+
+ p, err := sw.transport.Accept(ctx, sw.peerBehavior)
+ if err != nil {
+ sw.Logger.Error(
+ "error encountered during peer connection accept",
+ "err", err,
+ )
+
+ continue
+ }
+
+ // Ignore connection if we already have enough peers.
+ if in := sw.Peers().NumInbound(); in >= sw.maxInboundPeers {
+ sw.Logger.Info(
+ "Ignoring inbound connection: already have enough inbound peers",
+ "address", p.SocketAddr(),
+ "have", in,
+ "max", sw.maxInboundPeers,
+ )
+
+ sw.transport.Remove(p)
+
+ continue
+ }
+
+ // ...
+}
+
+```
+
+In fact, this is the central point in the relationship between the `Switch` and `Transport`.
+The `Transport` is responsible for establishing the connection, and the `Switch` is responsible for handling it after
+it’s been established.
+
+When TM2 modules communicate with the `p2p` module, they communicate *with the `Switch`, not the `Transport`* to execute
+peer-related actions.
+
+##### Dial Service
+
+Peers are dialed asynchronously in the `Switch`, as is suggested by the `Switch` API:
+
+```go
+DialPeers(peerAddrs ...*types.NetAddress)
+```
+
+The `MultiplexSwitch` implementation utilizes a concept called a *dial queue*.
+
+A dial queue is a priority-based queue (sorted by dial time, ascending) from which dial requests are taken out of and
+executed in the form of peer dialing (through the `Transport`, of course).
+
+The queue needs to be sorted by the dial time, since there are asynchronous dial requests that need to be executed as
+soon as possible, while others can wait to be executed up until a certain point in time.
+
+```go
+package p2p
+
+func (sw *MultiplexSwitch) runDialLoop(ctx context.Context) {
+ // ...
+
+ // Grab a dial item
+ item := sw.dialQueue.Peek()
+ if item == nil {
+ // Nothing to dial
+ continue
+ }
+
+ // Check if the dial time is right
+ // for the item
+ if time.Now().Before(item.Time) {
+ // Nothing to dial
+ continue
+ }
+
+ // Pop the item from the dial queue
+ item = sw.dialQueue.Pop()
+
+ // Dial the peer
+ sw.Logger.Info(
+ "dialing peer",
+ "address", item.Address.String(),
+ )
+
+ // ...
+}
+```
+
+To follow the outcomes of dial requests, users of the `Switch` can subscribe to peer events (more on this later).
+
+##### Redial Service
+
+The TM2 `p2p` module has a concept of something called *persistent peers*.
+
+Persistent peers are specific peers whose connections must be preserved, at all costs. They are specified in the
+top-level node P2P configuration, under `p2p.persistent_peers`.
+
+These peer connections are special, as they don’t adhere to high-level configuration limits like the maximum peer cap,
+instead, they are monitored and handled actively.
+
+A good candidate for a persistent peer is a bootnode, that bootstraps and facilitates peer discovery for the network.
+
+If a persistent peer connection is lost for whatever reason (for ex, the peer disconnects), the redial service of the
+`MultiplexSwitch` will create a dial request for the dial service, and attempt to re-establish the lost connection.
+
+```go
+package p2p
+
+func (sw *MultiplexSwitch) runRedialLoop(ctx context.Context) {
+ // ...
+
+ var (
+ peers = sw.Peers()
+ peersToDial = make([]*types.NetAddress, 0)
+ )
+
+ sw.persistentPeers.Range(func(key, value any) bool {
+ var (
+ id = key.(types.ID)
+ addr = value.(*types.NetAddress)
+ )
+
+ // Check if the peer is part of the peer set
+ // or is scheduled for dialing
+ if peers.Has(id) || sw.dialQueue.Has(addr) {
+ return true
+ }
+
+ peersToDial = append(peersToDial, addr)
+
+ return true
+ })
+
+ if len(peersToDial) == 0 {
+ // No persistent peers are missing
+ return
+ }
+
+ // Add the peers to the dial queue
+ sw.DialPeers(peersToDial...)
+
+ // ...
+}
+```
+
+#### Events
+
+The `Switch` is meant to be asynchronous.
+
+This means that processes like dialing peers, removing peers, doing broadcasts and more, is not a synchronous blocking
+process for the `Switch` user.
+
+To be able to tap into the outcome of these asynchronous events, the `Switch` utilizes a simple event system, based on
+event filters.
+
+```go
+package main
+
+func main() {
+ // ...
+
+ // Subscribe to live switch events
+ ch, unsubFn := multiplexSwitch.Subscribe(func(event events.Event) bool {
+ // This subscription will only return "PeerConnected" events
+ return event.Type() == events.PeerConnected
+ })
+
+ defer unsubFn() // removes the subscription
+
+ select {
+ // Events are sent to the channel as soon as
+ // they appear and pass the subscription filter
+ case ev <- ch:
+ e := ev.(*events.PeerConnectedEvent)
+ // use event data...
+ case <-ctx.Done():
+ // ...
+ }
+
+ // ...
+}
+```
+
+An event setup like this is useful for example when the user of the `Switch` wants to capture successful peer dial
+events, in realtime.
+
+#### What is “peer behavior”?
+
+```go
+package p2p
+
+// PeerBehavior wraps the Reactor and MultiplexSwitch information a Transport would need when
+// dialing or accepting new Peer connections.
+// It is worth noting that the only reason why this information is required in the first place,
+// is because Peers expose an API through which different TM modules can interact with them.
+// In the future™, modules should not directly "Send" anything to Peers, but instead communicate through
+// other mediums, such as the P2P module
+type PeerBehavior interface {
+ // ReactorChDescriptors returns the Reactor channel descriptors
+ ReactorChDescriptors() []*conn.ChannelDescriptor
+
+ // Reactors returns the node's active p2p Reactors (modules)
+ Reactors() map[byte]Reactor
+
+ // HandlePeerError propagates a peer connection error for further processing
+ HandlePeerError(Peer, error)
+
+ // IsPersistentPeer returns a flag indicating if the given peer is persistent
+ IsPersistentPeer(types.ID) bool
+
+ // IsPrivatePeer returns a flag indicating if the given peer is private
+ IsPrivatePeer(types.ID) bool
+}
+
+```
+
+In short, the previously-mentioned crux of the `p2p` implementation (having `Peer`s be directly managed by different TM2
+modules) requires information on how to behave when interacting with other peers.
+
+TM2 modules on `peer A` communicate through something called *channels* to the same modules on `peer B`. For example, if
+the `mempool` module on `peer A` wants to share a transaction to the mempool module on `peer B`, it will utilize a
+dedicated (and unique!) channel for it (ex. `0x30`). This is a protocol that lives on top of the already-established
+multiplexed connection, and metadata relating to it is passed down through *peer behavior*.
+
+### Transport
+
+As previously mentioned, the `Transport` is the infrastructure layer of the `p2p` module.
+
+In contrast to the `Switch`, which is concerned with higher-level application logic (like the number of peers, peer
+limits, etc), the `Transport` is concerned with actually establishing and maintaining peer connections on a much lower
+level.
+
+```go
+package p2p
+
+// Transport handles peer dialing and connection acceptance. Additionally,
+// it is also responsible for any custom connection mechanisms (like handshaking).
+// Peers returned by the transport are considered to be verified and sound
+type Transport interface {
+ // NetAddress returns the Transport's dial address
+ NetAddress() types.NetAddress
+
+ // Accept returns a newly connected inbound peer
+ Accept(context.Context, PeerBehavior) (Peer, error)
+
+ // Dial dials a peer, and returns it
+ Dial(context.Context, types.NetAddress, PeerBehavior) (Peer, error)
+
+ // Remove drops any resources associated
+ // with the Peer in the transport
+ Remove(Peer)
+}
+```
+
+When peers dial other peers in TM2, they are in fact dialing their `Transport`s, and the connection is being handled
+here.
+
+- `Accept` waits for an **incoming** connection, parses it and returns it.
+- `Dial` attempts to establish an **outgoing** connection, parses it and returns it.
+
+There are a few important steps that happen when establishing a p2p connection in TM2, between 2 different peers:
+
+1. The peers go through a handshaking process, and establish something called a *secret connection*. The handshaking
+ process is based on the [STS protocol](https://github.com/tendermint/tendermint/blob/0.1/docs/sts-final.pdf), and
+ after it is completed successfully, all communication between the 2 peers is **encrypted**.
+2. After establishing a secret connection, the peers exchange their respective node information. The purpose of this
+ step is to verify that the peers are indeed compatible with each other, and should be establishing a connection in
+ the first place (same network, common protocols , etc).
+3. Once the secret connection is established, and the node information is exchanged, the connection to the peer is
+ considered valid and verified — it can now be used by the `Switch` (accepted, or rejected, based on `Switch`
+ high-level constraints). Note the distinction here that the `Transport` establishes and maintains the connection, but
+ it can ultimately be scraped by the `Switch` at any point in time.
+
+### Peer Discovery
+
+There is a final service that runs alongside the previously-mentioned `Switch` services — peer discovery.
+
+Every blockchain node needs an adequate amount of peers to communicate with, in order to ensure smooth functioning. For
+validator nodes, they need to be *loosely connected* to at least 2/3+ of the validator set in order to participate and
+not cause block misses or mis-votes (loosely connected means that there always exists a path between different peers in
+the network topology, that allows them to be reachable to each other).
+
+The peer discovery service ensures that the given node is always learning more about the overall network topology, and
+filling out any empty connection slots (outbound peers).
+
+This background service works in the following (albeit primitive) way:
+
+1. At specific intervals, `node A` checks its peer table, and picks a random peer `P`, from the active peer list.
+2. When `P` is picked, `node A` initiates a discovery protocol process, in which:
+ - `node A` sends a request to peer `P` for his peer list (max 30 peers)
+ - peer `P` responds to the request
+
+3. Once `node A` has the peer list from `P`, it adds the entire peer list into the dial queue, to establish outbound
+ peer connections.
+
+This process repeats at specific intervals. It is worth nothing that if the limit of outbound peers is reached, the peer
+dials have no effect.
+
+#### Bootnodes (Seeds)
+
+Bootnodes are specialized network nodes that play a critical role in the initial peer discovery process for new nodes
+joining the network.
+
+When a blockchain client starts, it needs to find and connect to other nodes to synchronize data and participate in the
+network. Bootnodes provide a predefined list of accessible and reliable entry points that act as a gateway for
+discovering other active nodes (through peer discovery).
+
+These nodes are provided as part of the node’s p2p configuration. Once connected to a bootnode, the client uses peer
+discovery to discover and connect to additional peers, enabling full participation and unlocking other client
+protocols (consensus, mempool…).
+
+Bootnodes usually do not store the full blockchain or participate in consensus; their primary role is to facilitate
+connectivity in the network (act as a peer relay).
\ No newline at end of file
diff --git a/tm2/pkg/p2p/base_reactor.go b/tm2/pkg/p2p/base_reactor.go
index 91b3981d109..35b03f73be4 100644
--- a/tm2/pkg/p2p/base_reactor.go
+++ b/tm2/pkg/p2p/base_reactor.go
@@ -6,17 +6,17 @@ import (
)
// Reactor is responsible for handling incoming messages on one or more
-// Channel. Switch calls GetChannels when reactor is added to it. When a new
+// Channel. MultiplexSwitch calls GetChannels when reactor is added to it. When a new
// peer joins our node, InitPeer and AddPeer are called. RemovePeer is called
// when the peer is stopped. Receive is called when a message is received on a
// channel associated with this reactor.
//
-// Peer#Send or Peer#TrySend should be used to send the message to a peer.
+// PeerConn#Send or PeerConn#TrySend should be used to send the message to a peer.
type Reactor interface {
service.Service // Start, Stop
// SetSwitch allows setting a switch.
- SetSwitch(*Switch)
+ SetSwitch(Switch)
// GetChannels returns the list of MConnection.ChannelDescriptor. Make sure
// that each ID is unique across all the reactors added to the switch.
@@ -28,15 +28,15 @@ type Reactor interface {
// NOTE: The switch won't call AddPeer nor RemovePeer if it fails to start
// the peer. Do not store any data associated with the peer in the reactor
// itself unless you don't want to have a state, which is never cleaned up.
- InitPeer(peer Peer) Peer
+ InitPeer(peer PeerConn) PeerConn
// AddPeer is called by the switch after the peer is added and successfully
// started. Use it to start goroutines communicating with the peer.
- AddPeer(peer Peer)
+ AddPeer(peer PeerConn)
// RemovePeer is called by the switch when the peer is stopped (due to error
// or other reason).
- RemovePeer(peer Peer, reason interface{})
+ RemovePeer(peer PeerConn, reason interface{})
// Receive is called by the switch when msgBytes is received from the peer.
//
@@ -44,14 +44,14 @@ type Reactor interface {
// copying.
//
// CONTRACT: msgBytes are not nil.
- Receive(chID byte, peer Peer, msgBytes []byte)
+ Receive(chID byte, peer PeerConn, msgBytes []byte)
}
-//--------------------------------------
+// --------------------------------------
type BaseReactor struct {
- service.BaseService // Provides Start, Stop, .Quit
- Switch *Switch
+ service.BaseService // Provides Start, Stop, Quit
+ Switch Switch
}
func NewBaseReactor(name string, impl Reactor) *BaseReactor {
@@ -61,11 +61,11 @@ func NewBaseReactor(name string, impl Reactor) *BaseReactor {
}
}
-func (br *BaseReactor) SetSwitch(sw *Switch) {
+func (br *BaseReactor) SetSwitch(sw Switch) {
br.Switch = sw
}
-func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil }
-func (*BaseReactor) AddPeer(peer Peer) {}
-func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {}
-func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}
-func (*BaseReactor) InitPeer(peer Peer) Peer { return peer }
+func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil }
+func (*BaseReactor) AddPeer(_ PeerConn) {}
+func (*BaseReactor) RemovePeer(_ PeerConn, _ any) {}
+func (*BaseReactor) Receive(_ byte, _ PeerConn, _ []byte) {}
+func (*BaseReactor) InitPeer(peer PeerConn) PeerConn { return peer }
diff --git a/tm2/pkg/p2p/cmd/stest/main.go b/tm2/pkg/p2p/cmd/stest/main.go
deleted file mode 100644
index 2835e0cc1f0..00000000000
--- a/tm2/pkg/p2p/cmd/stest/main.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "net"
- "os"
-
- "github.com/gnolang/gno/tm2/pkg/crypto/ed25519"
- p2pconn "github.com/gnolang/gno/tm2/pkg/p2p/conn"
-)
-
-var (
- remote string
- listen string
-)
-
-func init() {
- flag.StringVar(&listen, "listen", "", "set to :port if server, eg :8080")
- flag.StringVar(&remote, "remote", "", "remote ip:port")
- flag.Parse()
-}
-
-func main() {
- if listen != "" {
- fmt.Println("listening at", listen)
- ln, err := net.Listen("tcp", listen)
- if err != nil {
- // handle error
- }
- conn, err := ln.Accept()
- if err != nil {
- panic(err)
- }
- handleConnection(conn)
- } else {
- // connect to remote.
- if remote == "" {
- panic("must specify remote ip:port unless server")
- }
- fmt.Println("connecting to", remote)
- conn, err := net.Dial("tcp", remote)
- if err != nil {
- panic(err)
- }
- handleConnection(conn)
- }
-}
-
-func handleConnection(conn net.Conn) {
- priv := ed25519.GenPrivKey()
- pub := priv.PubKey()
- fmt.Println("local pubkey:", pub)
- fmt.Println("local pubkey addr:", pub.Address())
-
- sconn, err := p2pconn.MakeSecretConnection(conn, priv)
- if err != nil {
- panic(err)
- }
- // Read line from sconn and print.
- go func() {
- sc := bufio.NewScanner(sconn)
- for sc.Scan() {
- line := sc.Text() // GET the line string
- fmt.Println(">>", line)
- }
- if err := sc.Err(); err != nil {
- panic(err)
- }
- }()
- // Read line from stdin and write.
- for {
- sc := bufio.NewScanner(os.Stdin)
- for sc.Scan() {
- line := sc.Text() + "\n"
- _, err := sconn.Write([]byte(line))
- if err != nil {
- panic(err)
- }
- }
- if err := sc.Err(); err != nil {
- panic(err)
- }
- }
-}
diff --git a/tm2/pkg/p2p/config/config.go b/tm2/pkg/p2p/config/config.go
index 07692145fee..380dadc4f6f 100644
--- a/tm2/pkg/p2p/config/config.go
+++ b/tm2/pkg/p2p/config/config.go
@@ -1,19 +1,15 @@
package config
import (
+ "errors"
"time"
-
- "github.com/gnolang/gno/tm2/pkg/errors"
)
-// -----------------------------------------------------------------------------
-// P2PConfig
-
-const (
- // FuzzModeDrop is a mode in which we randomly drop reads/writes, connections or sleep
- FuzzModeDrop = iota
- // FuzzModeDelay is a mode in which we randomly sleep
- FuzzModeDelay
+var (
+ ErrInvalidFlushThrottleTimeout = errors.New("invalid flush throttle timeout")
+ ErrInvalidMaxPayloadSize = errors.New("invalid message payload size")
+ ErrInvalidSendRate = errors.New("invalid packet send rate")
+ ErrInvalidReceiveRate = errors.New("invalid packet receive rate")
)
// P2PConfig defines the configuration options for the Tendermint peer-to-peer networking layer
@@ -32,14 +28,11 @@ type P2PConfig struct {
// Comma separated list of nodes to keep persistent connections to
PersistentPeers string `json:"persistent_peers" toml:"persistent_peers" comment:"Comma separated list of nodes to keep persistent connections to"`
- // UPNP port forwarding
- UPNP bool `json:"upnp" toml:"upnp" comment:"UPNP port forwarding"`
-
// Maximum number of inbound peers
- MaxNumInboundPeers int `json:"max_num_inbound_peers" toml:"max_num_inbound_peers" comment:"Maximum number of inbound peers"`
+ MaxNumInboundPeers uint64 `json:"max_num_inbound_peers" toml:"max_num_inbound_peers" comment:"Maximum number of inbound peers"`
// Maximum number of outbound peers to connect to, excluding persistent peers
- MaxNumOutboundPeers int `json:"max_num_outbound_peers" toml:"max_num_outbound_peers" comment:"Maximum number of outbound peers to connect to, excluding persistent peers"`
+ MaxNumOutboundPeers uint64 `json:"max_num_outbound_peers" toml:"max_num_outbound_peers" comment:"Maximum number of outbound peers to connect to, excluding persistent peers"`
// Time to wait before flushing messages out on the connection
FlushThrottleTimeout time.Duration `json:"flush_throttle_timeout" toml:"flush_throttle_timeout" comment:"Time to wait before flushing messages out on the connection"`
@@ -54,105 +47,45 @@ type P2PConfig struct {
RecvRate int64 `json:"recv_rate" toml:"recv_rate" comment:"Rate at which packets can be received, in bytes/second"`
// Set true to enable the peer-exchange reactor
- PexReactor bool `json:"pex" toml:"pex" comment:"Set true to enable the peer-exchange reactor"`
+ PeerExchange bool `json:"pex" toml:"pex" comment:"Set true to enable the peer-exchange reactor"`
- // Seed mode, in which node constantly crawls the network and looks for
- // peers. If another node asks it for addresses, it responds and disconnects.
- //
- // Does not work if the peer-exchange reactor is disabled.
- SeedMode bool `json:"seed_mode" toml:"seed_mode" comment:"Seed mode, in which node constantly crawls the network and looks for\n peers. If another node asks it for addresses, it responds and disconnects.\n\n Does not work if the peer-exchange reactor is disabled."`
-
- // Comma separated list of peer IDs to keep private (will not be gossiped to
- // other peers)
+ // Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
PrivatePeerIDs string `json:"private_peer_ids" toml:"private_peer_ids" comment:"Comma separated list of peer IDs to keep private (will not be gossiped to other peers)"`
-
- // Toggle to disable guard against peers connecting from the same ip.
- AllowDuplicateIP bool `json:"allow_duplicate_ip" toml:"allow_duplicate_ip" comment:"Toggle to disable guard against peers connecting from the same ip."`
-
- // Peer connection configuration.
- HandshakeTimeout time.Duration `json:"handshake_timeout" toml:"handshake_timeout" comment:"Peer connection configuration."`
- DialTimeout time.Duration `json:"dial_timeout" toml:"dial_timeout"`
-
- // Testing params.
- // Force dial to fail
- TestDialFail bool `json:"test_dial_fail" toml:"test_dial_fail"`
- // FUzz connection
- TestFuzz bool `json:"test_fuzz" toml:"test_fuzz"`
- TestFuzzConfig *FuzzConnConfig `json:"test_fuzz_config" toml:"test_fuzz_config"`
}
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer
func DefaultP2PConfig() *P2PConfig {
return &P2PConfig{
ListenAddress: "tcp://0.0.0.0:26656",
- ExternalAddress: "",
- UPNP: false,
+ ExternalAddress: "", // nothing is advertised differently
MaxNumInboundPeers: 40,
MaxNumOutboundPeers: 10,
FlushThrottleTimeout: 100 * time.Millisecond,
MaxPacketMsgPayloadSize: 1024, // 1 kB
SendRate: 5120000, // 5 mB/s
RecvRate: 5120000, // 5 mB/s
- PexReactor: true,
- SeedMode: false,
- AllowDuplicateIP: false,
- HandshakeTimeout: 20 * time.Second,
- DialTimeout: 3 * time.Second,
- TestDialFail: false,
- TestFuzz: false,
- TestFuzzConfig: DefaultFuzzConnConfig(),
+ PeerExchange: true,
}
}
-// TestP2PConfig returns a configuration for testing the peer-to-peer layer
-func TestP2PConfig() *P2PConfig {
- cfg := DefaultP2PConfig()
- cfg.ListenAddress = "tcp://0.0.0.0:26656"
- cfg.FlushThrottleTimeout = 10 * time.Millisecond
- cfg.AllowDuplicateIP = true
- return cfg
-}
-
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *P2PConfig) ValidateBasic() error {
- if cfg.MaxNumInboundPeers < 0 {
- return errors.New("max_num_inbound_peers can't be negative")
- }
- if cfg.MaxNumOutboundPeers < 0 {
- return errors.New("max_num_outbound_peers can't be negative")
- }
if cfg.FlushThrottleTimeout < 0 {
- return errors.New("flush_throttle_timeout can't be negative")
+ return ErrInvalidFlushThrottleTimeout
}
+
if cfg.MaxPacketMsgPayloadSize < 0 {
- return errors.New("max_packet_msg_payload_size can't be negative")
+ return ErrInvalidMaxPayloadSize
}
+
if cfg.SendRate < 0 {
- return errors.New("send_rate can't be negative")
+ return ErrInvalidSendRate
}
+
if cfg.RecvRate < 0 {
- return errors.New("recv_rate can't be negative")
+ return ErrInvalidReceiveRate
}
- return nil
-}
-
-// FuzzConnConfig is a FuzzedConnection configuration.
-type FuzzConnConfig struct {
- Mode int
- MaxDelay time.Duration
- ProbDropRW float64
- ProbDropConn float64
- ProbSleep float64
-}
-// DefaultFuzzConnConfig returns the default config.
-func DefaultFuzzConnConfig() *FuzzConnConfig {
- return &FuzzConnConfig{
- Mode: FuzzModeDrop,
- MaxDelay: 3 * time.Second,
- ProbDropRW: 0.2,
- ProbDropConn: 0.00,
- ProbSleep: 0.00,
- }
+ return nil
}
diff --git a/tm2/pkg/p2p/config/config_test.go b/tm2/pkg/p2p/config/config_test.go
new file mode 100644
index 00000000000..f624563d984
--- /dev/null
+++ b/tm2/pkg/p2p/config/config_test.go
@@ -0,0 +1,59 @@
+package config
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestP2PConfig_ValidateBasic(t *testing.T) {
+ t.Parallel()
+
+ t.Run("invalid flush throttle timeout", func(t *testing.T) {
+ t.Parallel()
+
+ cfg := DefaultP2PConfig()
+
+ cfg.FlushThrottleTimeout = -1
+
+ assert.ErrorIs(t, cfg.ValidateBasic(), ErrInvalidFlushThrottleTimeout)
+ })
+
+ t.Run("invalid max packet payload size", func(t *testing.T) {
+ t.Parallel()
+
+ cfg := DefaultP2PConfig()
+
+ cfg.MaxPacketMsgPayloadSize = -1
+
+ assert.ErrorIs(t, cfg.ValidateBasic(), ErrInvalidMaxPayloadSize)
+ })
+
+ t.Run("invalid send rate", func(t *testing.T) {
+ t.Parallel()
+
+ cfg := DefaultP2PConfig()
+
+ cfg.SendRate = -1
+
+ assert.ErrorIs(t, cfg.ValidateBasic(), ErrInvalidSendRate)
+ })
+
+ t.Run("invalid receive rate", func(t *testing.T) {
+ t.Parallel()
+
+ cfg := DefaultP2PConfig()
+
+ cfg.RecvRate = -1
+
+ assert.ErrorIs(t, cfg.ValidateBasic(), ErrInvalidReceiveRate)
+ })
+
+ t.Run("valid configuration", func(t *testing.T) {
+ t.Parallel()
+
+ cfg := DefaultP2PConfig()
+
+ assert.NoError(t, cfg.ValidateBasic())
+ })
+}
diff --git a/tm2/pkg/p2p/conn/conn.go b/tm2/pkg/p2p/conn/conn.go
new file mode 100644
index 00000000000..3215adc38ca
--- /dev/null
+++ b/tm2/pkg/p2p/conn/conn.go
@@ -0,0 +1,22 @@
+package conn
+
+import (
+ "net"
+ "time"
+)
+
+// pipe wraps the networking conn interface
+type pipe struct {
+ net.Conn
+}
+
+func (p *pipe) SetDeadline(_ time.Time) error {
+ return nil
+}
+
+func NetPipe() (net.Conn, net.Conn) {
+ p1, p2 := net.Pipe()
+ return &pipe{p1}, &pipe{p2}
+}
+
+var _ net.Conn = (*pipe)(nil)
diff --git a/tm2/pkg/p2p/conn/conn_go110.go b/tm2/pkg/p2p/conn/conn_go110.go
deleted file mode 100644
index 37796ac791d..00000000000
--- a/tm2/pkg/p2p/conn/conn_go110.go
+++ /dev/null
@@ -1,15 +0,0 @@
-//go:build go1.10
-
-package conn
-
-// Go1.10 has a proper net.Conn implementation that
-// has the SetDeadline method implemented as per
-// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706
-// lest we run into problems like
-// https://github.com/tendermint/classic/issues/851
-
-import "net"
-
-func NetPipe() (net.Conn, net.Conn) {
- return net.Pipe()
-}
diff --git a/tm2/pkg/p2p/conn/conn_notgo110.go b/tm2/pkg/p2p/conn/conn_notgo110.go
deleted file mode 100644
index f91b0c7ea63..00000000000
--- a/tm2/pkg/p2p/conn/conn_notgo110.go
+++ /dev/null
@@ -1,36 +0,0 @@
-//go:build !go1.10
-
-package conn
-
-import (
- "net"
- "time"
-)
-
-// Only Go1.10 has a proper net.Conn implementation that
-// has the SetDeadline method implemented as per
-//
-// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706
-//
-// lest we run into problems like
-//
-// https://github.com/tendermint/classic/issues/851
-//
-// so for go versions < Go1.10 use our custom net.Conn creator
-// that doesn't return an `Unimplemented error` for net.Conn.
-// Before https://github.com/tendermint/classic/commit/49faa79bdce5663894b3febbf4955fb1d172df04
-// we hadn't cared about errors from SetDeadline so swallow them up anyways.
-type pipe struct {
- net.Conn
-}
-
-func (p *pipe) SetDeadline(t time.Time) error {
- return nil
-}
-
-func NetPipe() (net.Conn, net.Conn) {
- p1, p2 := net.Pipe()
- return &pipe{p1}, &pipe{p2}
-}
-
-var _ net.Conn = (*pipe)(nil)
diff --git a/tm2/pkg/p2p/conn/connection.go b/tm2/pkg/p2p/conn/connection.go
index 6b7400600d3..acbffdb3cd7 100644
--- a/tm2/pkg/p2p/conn/connection.go
+++ b/tm2/pkg/p2p/conn/connection.go
@@ -17,6 +17,7 @@ import (
"github.com/gnolang/gno/tm2/pkg/amino"
"github.com/gnolang/gno/tm2/pkg/errors"
"github.com/gnolang/gno/tm2/pkg/flow"
+ "github.com/gnolang/gno/tm2/pkg/p2p/config"
"github.com/gnolang/gno/tm2/pkg/service"
"github.com/gnolang/gno/tm2/pkg/timer"
)
@@ -31,7 +32,6 @@ const (
// some of these defaults are written in the user config
// flushThrottle, sendRate, recvRate
- // TODO: remove values present in config
defaultFlushThrottle = 100 * time.Millisecond
defaultSendQueueCapacity = 1
@@ -46,7 +46,7 @@ const (
type (
receiveCbFunc func(chID byte, msgBytes []byte)
- errorCbFunc func(interface{})
+ errorCbFunc func(error)
)
/*
@@ -147,6 +147,18 @@ func DefaultMConnConfig() MConnConfig {
}
}
+// MConfigFromP2P returns a multiplex connection configuration
+// with fields updated from the P2PConfig
+func MConfigFromP2P(cfg *config.P2PConfig) MConnConfig {
+ mConfig := DefaultMConnConfig()
+ mConfig.FlushThrottle = cfg.FlushThrottleTimeout
+ mConfig.SendRate = cfg.SendRate
+ mConfig.RecvRate = cfg.RecvRate
+ mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize
+
+ return mConfig
+}
+
// NewMConnection wraps net.Conn and creates multiplex connection
func NewMConnection(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc) *MConnection {
return NewMConnectionWithConfig(
@@ -323,7 +335,7 @@ func (c *MConnection) _recover() {
}
}
-func (c *MConnection) stopForError(r interface{}) {
+func (c *MConnection) stopForError(r error) {
c.Stop()
if atomic.CompareAndSwapUint32(&c.errored, 0, 1) {
if c.onError != nil {
diff --git a/tm2/pkg/p2p/conn/connection_test.go b/tm2/pkg/p2p/conn/connection_test.go
index 7bbe88ded22..58b363b7b78 100644
--- a/tm2/pkg/p2p/conn/connection_test.go
+++ b/tm2/pkg/p2p/conn/connection_test.go
@@ -21,7 +21,7 @@ func createTestMConnection(t *testing.T, conn net.Conn) *MConnection {
onReceive := func(chID byte, msgBytes []byte) {
}
- onError := func(r interface{}) {
+ onError := func(r error) {
}
c := createMConnectionWithCallbacks(t, conn, onReceive, onError)
c.SetLogger(log.NewTestingLogger(t))
@@ -32,7 +32,7 @@ func createMConnectionWithCallbacks(
t *testing.T,
conn net.Conn,
onReceive func(chID byte, msgBytes []byte),
- onError func(r interface{}),
+ onError func(r error),
) *MConnection {
t.Helper()
@@ -137,7 +137,7 @@ func TestMConnectionReceive(t *testing.T) {
onReceive := func(chID byte, msgBytes []byte) {
receivedCh <- msgBytes
}
- onError := func(r interface{}) {
+ onError := func(r error) {
errorsCh <- r
}
mconn1 := createMConnectionWithCallbacks(t, client, onReceive, onError)
@@ -192,7 +192,7 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) {
onReceive := func(chID byte, msgBytes []byte) {
receivedCh <- msgBytes
}
- onError := func(r interface{}) {
+ onError := func(r error) {
errorsCh <- r
}
mconn := createMConnectionWithCallbacks(t, client, onReceive, onError)
@@ -233,7 +233,7 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) {
onReceive := func(chID byte, msgBytes []byte) {
receivedCh <- msgBytes
}
- onError := func(r interface{}) {
+ onError := func(r error) {
errorsCh <- r
}
mconn := createMConnectionWithCallbacks(t, client, onReceive, onError)
@@ -288,7 +288,7 @@ func TestMConnectionMultiplePings(t *testing.T) {
onReceive := func(chID byte, msgBytes []byte) {
receivedCh <- msgBytes
}
- onError := func(r interface{}) {
+ onError := func(r error) {
errorsCh <- r
}
mconn := createMConnectionWithCallbacks(t, client, onReceive, onError)
@@ -331,7 +331,7 @@ func TestMConnectionPingPongs(t *testing.T) {
onReceive := func(chID byte, msgBytes []byte) {
receivedCh <- msgBytes
}
- onError := func(r interface{}) {
+ onError := func(r error) {
errorsCh <- r
}
mconn := createMConnectionWithCallbacks(t, client, onReceive, onError)
@@ -384,7 +384,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
onReceive := func(chID byte, msgBytes []byte) {
receivedCh <- msgBytes
}
- onError := func(r interface{}) {
+ onError := func(r error) {
errorsCh <- r
}
mconn := createMConnectionWithCallbacks(t, client, onReceive, onError)
@@ -413,7 +413,7 @@ func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (
server, client := NetPipe()
onReceive := func(chID byte, msgBytes []byte) {}
- onError := func(r interface{}) {}
+ onError := func(r error) {}
// create client conn with two channels
chDescs := []*ChannelDescriptor{
@@ -428,7 +428,7 @@ func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (
// create server conn with 1 channel
// it fires on chOnErr when there's an error
serverLogger := log.NewNoopLogger().With("module", "server")
- onError = func(r interface{}) {
+ onError = func(_ error) {
chOnErr <- struct{}{}
}
mconnServer := createMConnectionWithCallbacks(t, server, onReceive, onError)
diff --git a/tm2/pkg/p2p/conn/secret_connection.go b/tm2/pkg/p2p/conn/secret_connection.go
index a37788b947d..d45b5b3846a 100644
--- a/tm2/pkg/p2p/conn/secret_connection.go
+++ b/tm2/pkg/p2p/conn/secret_connection.go
@@ -7,6 +7,7 @@ import (
"crypto/sha256"
"crypto/subtle"
"encoding/binary"
+ "fmt"
"io"
"math"
"net"
@@ -128,7 +129,10 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (*
}
// Sign the challenge bytes for authentication.
- locSignature := signChallenge(challenge, locPrivKey)
+ locSignature, err := locPrivKey.Sign(challenge[:])
+ if err != nil {
+ return nil, fmt.Errorf("unable to sign challenge, %w", err)
+ }
// Share (in secret) each other's pubkey & challenge signature
authSigMsg, err := shareAuthSignature(sc, locPubKey, locSignature)
@@ -424,15 +428,6 @@ func sort32(foo, bar *[32]byte) (lo, hi *[32]byte) {
return
}
-func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) (signature []byte) {
- signature, err := locPrivKey.Sign(challenge[:])
- // TODO(ismail): let signChallenge return an error instead
- if err != nil {
- panic(err)
- }
- return
-}
-
type authSigMessage struct {
Key crypto.PubKey
Sig []byte
diff --git a/tm2/pkg/p2p/conn_set.go b/tm2/pkg/p2p/conn_set.go
deleted file mode 100644
index d646227831a..00000000000
--- a/tm2/pkg/p2p/conn_set.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package p2p
-
-import (
- "net"
- "sync"
-)
-
-// ConnSet is a lookup table for connections and all their ips.
-type ConnSet interface {
- Has(net.Conn) bool
- HasIP(net.IP) bool
- Set(net.Conn, []net.IP)
- Remove(net.Conn)
- RemoveAddr(net.Addr)
-}
-
-type connSetItem struct {
- conn net.Conn
- ips []net.IP
-}
-
-type connSet struct {
- sync.RWMutex
-
- conns map[string]connSetItem
-}
-
-// NewConnSet returns a ConnSet implementation.
-func NewConnSet() *connSet {
- return &connSet{
- conns: map[string]connSetItem{},
- }
-}
-
-func (cs *connSet) Has(c net.Conn) bool {
- cs.RLock()
- defer cs.RUnlock()
-
- _, ok := cs.conns[c.RemoteAddr().String()]
-
- return ok
-}
-
-func (cs *connSet) HasIP(ip net.IP) bool {
- cs.RLock()
- defer cs.RUnlock()
-
- for _, c := range cs.conns {
- for _, known := range c.ips {
- if known.Equal(ip) {
- return true
- }
- }
- }
-
- return false
-}
-
-func (cs *connSet) Remove(c net.Conn) {
- cs.Lock()
- defer cs.Unlock()
-
- delete(cs.conns, c.RemoteAddr().String())
-}
-
-func (cs *connSet) RemoveAddr(addr net.Addr) {
- cs.Lock()
- defer cs.Unlock()
-
- delete(cs.conns, addr.String())
-}
-
-func (cs *connSet) Set(c net.Conn, ips []net.IP) {
- cs.Lock()
- defer cs.Unlock()
-
- cs.conns[c.RemoteAddr().String()] = connSetItem{
- conn: c,
- ips: ips,
- }
-}
diff --git a/tm2/pkg/p2p/dial/dial.go b/tm2/pkg/p2p/dial/dial.go
new file mode 100644
index 00000000000..e4a7d6fd445
--- /dev/null
+++ b/tm2/pkg/p2p/dial/dial.go
@@ -0,0 +1,83 @@
+package dial
+
+import (
+ "sync"
+ "time"
+
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+ queue "github.com/sig-0/insertion-queue"
+)
+
+// Item is a single dial queue item, wrapping
+// the approximately appropriate dial time, and the
+// peer dial address
+type Item struct {
+ Time time.Time // appropriate dial time
+ Address *types.NetAddress // the dial address of the peer
+}
+
+// Less is the comparison method for the dial queue Item (time ascending)
+func (i Item) Less(item Item) bool {
+ return i.Time.Before(item.Time)
+}
+
+// Queue is a time-sorted (ascending) dial queue
+type Queue struct {
+ mux sync.RWMutex
+
+ items queue.Queue[Item] // sorted dial queue (by time, ascending)
+}
+
+// NewQueue creates a new dial queue
+func NewQueue() *Queue {
+ return &Queue{
+ items: queue.NewQueue[Item](),
+ }
+}
+
+// Peek returns the first item in the dial queue, if any
+func (q *Queue) Peek() *Item {
+ q.mux.RLock()
+ defer q.mux.RUnlock()
+
+ if q.items.Len() == 0 {
+ return nil
+ }
+
+ item := q.items.Index(0)
+
+ return &item
+}
+
+// Push adds new items to the dial queue
+func (q *Queue) Push(items ...Item) {
+ q.mux.Lock()
+ defer q.mux.Unlock()
+
+ for _, item := range items {
+ q.items.Push(item)
+ }
+}
+
+// Pop removes an item from the dial queue, if any
+func (q *Queue) Pop() *Item {
+ q.mux.Lock()
+ defer q.mux.Unlock()
+
+ return q.items.PopFront()
+}
+
+// Has returns a flag indicating if the given
+// address is in the dial queue
+func (q *Queue) Has(addr *types.NetAddress) bool {
+ q.mux.RLock()
+ defer q.mux.RUnlock()
+
+ for _, i := range q.items {
+ if addr.Equals(*i.Address) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/tm2/pkg/p2p/dial/dial_test.go b/tm2/pkg/p2p/dial/dial_test.go
new file mode 100644
index 00000000000..5e85ec1f95e
--- /dev/null
+++ b/tm2/pkg/p2p/dial/dial_test.go
@@ -0,0 +1,147 @@
+package dial
+
+import (
+ "crypto/rand"
+ "math/big"
+ "slices"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// generateRandomTimes generates random time intervals
+func generateRandomTimes(t *testing.T, count int) []time.Time {
+ t.Helper()
+
+ const timeRange = 94608000 // 3 years
+
+ var (
+ maxRange = big.NewInt(time.Now().Unix() - timeRange)
+ times = make([]time.Time, 0, count)
+ )
+
+ for range count {
+ n, err := rand.Int(rand.Reader, maxRange)
+ require.NoError(t, err)
+
+ randTime := time.Unix(n.Int64()+timeRange, 0)
+
+ times = append(times, randTime)
+ }
+
+ return times
+}
+
+func TestQueue_Push(t *testing.T) {
+ t.Parallel()
+
+ var (
+ timestamps = generateRandomTimes(t, 10)
+ q = NewQueue()
+ )
+
+ // Add the dial items
+ for _, timestamp := range timestamps {
+ q.Push(Item{
+ Time: timestamp,
+ })
+ }
+
+ assert.Len(t, q.items, len(timestamps))
+}
+
+func TestQueue_Peek(t *testing.T) {
+ t.Parallel()
+
+ t.Run("empty queue", func(t *testing.T) {
+ t.Parallel()
+
+ q := NewQueue()
+
+ assert.Nil(t, q.Peek())
+ })
+
+ t.Run("existing item", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ timestamps = generateRandomTimes(t, 100)
+ q = NewQueue()
+ )
+
+ // Add the dial items
+ for _, timestamp := range timestamps {
+ q.Push(Item{
+ Time: timestamp,
+ })
+ }
+
+ // Sort the initial list to find the best timestamp
+ slices.SortFunc(timestamps, func(a, b time.Time) int {
+ if a.Before(b) {
+ return -1
+ }
+
+ if a.After(b) {
+ return 1
+ }
+
+ return 0
+ })
+
+ assert.Equal(t, q.Peek().Time.Unix(), timestamps[0].Unix())
+ })
+}
+
+func TestQueue_Pop(t *testing.T) {
+ t.Parallel()
+
+ t.Run("empty queue", func(t *testing.T) {
+ t.Parallel()
+
+ q := NewQueue()
+
+ assert.Nil(t, q.Pop())
+ })
+
+ t.Run("existing item", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ timestamps = generateRandomTimes(t, 100)
+ q = NewQueue()
+ )
+
+ // Add the dial items
+ for _, timestamp := range timestamps {
+ q.Push(Item{
+ Time: timestamp,
+ })
+ }
+
+ assert.Len(t, q.items, len(timestamps))
+
+ // Sort the initial list to find the best timestamp
+ slices.SortFunc(timestamps, func(a, b time.Time) int {
+ if a.Before(b) {
+ return -1
+ }
+
+ if a.After(b) {
+ return 1
+ }
+
+ return 0
+ })
+
+ for index, timestamp := range timestamps {
+ item := q.Pop()
+
+ require.Len(t, q.items, len(timestamps)-1-index)
+
+ assert.Equal(t, item.Time.Unix(), timestamp.Unix())
+ }
+ })
+}
diff --git a/tm2/pkg/p2p/dial/doc.go b/tm2/pkg/p2p/dial/doc.go
new file mode 100644
index 00000000000..069160e73e6
--- /dev/null
+++ b/tm2/pkg/p2p/dial/doc.go
@@ -0,0 +1,10 @@
+// Package dial contains an implementation of a thread-safe priority dial queue. The queue is sorted by
+// dial items, time ascending.
+// The behavior of the dial queue is the following:
+//
+// - Peeking the dial queue will return the most urgent dial item, or nil if the queue is empty.
+//
+// - Popping the dial queue will return the most urgent dial item or nil if the queue is empty. Popping removes the dial item.
+//
+// - Push will push a new item to the dial queue, upon which the queue will find an adequate place for it.
+package dial
diff --git a/tm2/pkg/p2p/discovery/discovery.go b/tm2/pkg/p2p/discovery/discovery.go
new file mode 100644
index 00000000000..d884b118c75
--- /dev/null
+++ b/tm2/pkg/p2p/discovery/discovery.go
@@ -0,0 +1,242 @@
+package discovery
+
+import (
+ "context"
+ "crypto/rand"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/gnolang/gno/tm2/pkg/amino"
+ "github.com/gnolang/gno/tm2/pkg/p2p"
+ "github.com/gnolang/gno/tm2/pkg/p2p/conn"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+ "golang.org/x/exp/slices"
+)
+
+const (
+ // Channel is the unique channel for the peer discovery protocol
+ Channel = byte(0x50)
+
+ // discoveryInterval is the peer discovery interval, for random peers
+ discoveryInterval = time.Second * 3
+
+ // maxPeersShared is the maximum number of peers shared in the discovery request
+ maxPeersShared = 30
+)
+
+// descriptor is the constant peer discovery protocol descriptor
+var descriptor = &conn.ChannelDescriptor{
+ ID: Channel,
+ Priority: 1, // peer discovery is high priority
+ SendQueueCapacity: 20, // more than enough active conns
+ RecvMessageCapacity: 5242880, // 5MB
+}
+
+// Reactor wraps the logic for the peer exchange protocol
+type Reactor struct {
+ // This embed and the usage of "services"
+ // like the peer discovery reactor highlight the
+ // flipped design of the p2p package.
+ // The peer exchange service needs to be instantiated _outside_
+ // the p2p module, because of this flipped design.
+ // Peers communicate with each other through Reactor channels,
+ // which are instantiated outside the p2p module
+ p2p.BaseReactor
+
+ ctx context.Context
+ cancelFn context.CancelFunc
+
+ discoveryInterval time.Duration
+}
+
+// NewReactor creates a new peer discovery reactor
+func NewReactor(opts ...Option) *Reactor {
+ ctx, cancelFn := context.WithCancel(context.Background())
+
+ r := &Reactor{
+ ctx: ctx,
+ cancelFn: cancelFn,
+ discoveryInterval: discoveryInterval,
+ }
+
+ r.BaseReactor = *p2p.NewBaseReactor("Reactor", r)
+
+ // Apply the options
+ for _, opt := range opts {
+ opt(r)
+ }
+
+ return r
+}
+
+// OnStart runs the peer discovery protocol
+func (r *Reactor) OnStart() error {
+ go func() {
+ ticker := time.NewTicker(r.discoveryInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-r.ctx.Done():
+ r.Logger.Debug("discovery service stopped")
+
+ return
+ case <-ticker.C:
+ // Run the discovery protocol //
+
+ // Grab a random peer, and engage
+ // them for peer discovery
+ peers := r.Switch.Peers().List()
+
+ if len(peers) == 0 {
+ // No discovery to run
+ continue
+ }
+
+ // Generate a random peer index
+ randomPeer, _ := rand.Int(
+ rand.Reader,
+ big.NewInt(int64(len(peers))),
+ )
+
+ // Request peers, async
+ go r.requestPeers(peers[randomPeer.Int64()])
+ }
+ }
+ }()
+
+ return nil
+}
+
+// OnStop stops the peer discovery protocol
+func (r *Reactor) OnStop() {
+ r.cancelFn()
+}
+
+// requestPeers requests the peer set from the given peer
+func (r *Reactor) requestPeers(peer p2p.PeerConn) {
+ // Initiate peer discovery
+ r.Logger.Debug("running peer discovery", "peer", peer.ID())
+
+ // Prepare the request
+ // (empty, as it's a notification)
+ req := &Request{}
+
+ reqBytes, err := amino.MarshalAny(req)
+ if err != nil {
+ r.Logger.Error("unable to marshal discovery request", "err", err)
+
+ return
+ }
+
+ // Send the request
+ if !peer.Send(Channel, reqBytes) {
+ r.Logger.Warn("unable to send discovery request", "peer", peer.ID())
+ }
+}
+
+// GetChannels returns the channels associated with peer discovery
+func (r *Reactor) GetChannels() []*conn.ChannelDescriptor {
+ return []*conn.ChannelDescriptor{descriptor}
+}
+
+// Receive handles incoming messages for the peer discovery reactor
+func (r *Reactor) Receive(chID byte, peer p2p.PeerConn, msgBytes []byte) {
+ r.Logger.Debug(
+ "received message",
+ "peerID", peer.ID(),
+ "chID", chID,
+ )
+
+ // Unmarshal the message
+ var msg Message
+
+ if err := amino.UnmarshalAny(msgBytes, &msg); err != nil {
+ r.Logger.Error("unable to unmarshal discovery message", "err", err)
+
+ return
+ }
+
+ // Validate the message
+ if err := msg.ValidateBasic(); err != nil {
+ r.Logger.Error("unable to validate discovery message", "err", err)
+
+ return
+ }
+
+ switch msg := msg.(type) {
+ case *Request:
+ if err := r.handleDiscoveryRequest(peer); err != nil {
+ r.Logger.Error("unable to handle discovery request", "err", err)
+ }
+ case *Response:
+ // Make the peers available for dialing on the switch
+ r.Switch.DialPeers(msg.Peers...)
+ default:
+ r.Logger.Warn("invalid message received", "msg", msgBytes)
+ }
+}
+
+// handleDiscoveryRequest prepares a peer list that can be shared
+// with the peer requesting discovery
+func (r *Reactor) handleDiscoveryRequest(peer p2p.PeerConn) error {
+ var (
+ localPeers = r.Switch.Peers().List()
+ peers = make([]*types.NetAddress, 0, len(localPeers))
+ )
+
+ // Exclude the private peers from being shared
+ localPeers = slices.DeleteFunc(localPeers, func(p p2p.PeerConn) bool {
+ return p.IsPrivate()
+ })
+
+ // Check if there is anything to share,
+ // to avoid useless traffic
+ if len(localPeers) == 0 {
+ r.Logger.Warn("no peers to share in discovery request")
+
+ return nil
+ }
+
+ // Shuffle and limit the peers shared
+ shufflePeers(localPeers)
+
+ if len(localPeers) > maxPeersShared {
+ localPeers = localPeers[:maxPeersShared]
+ }
+
+ for _, p := range localPeers {
+ peers = append(peers, p.SocketAddr())
+ }
+
+ // Create the response, and marshal
+ // it to Amino binary
+ resp := &Response{
+ Peers: peers,
+ }
+
+ preparedResp, err := amino.MarshalAny(resp)
+ if err != nil {
+ return fmt.Errorf("unable to marshal discovery response, %w", err)
+ }
+
+ // Send the response to the peer
+ if !peer.Send(Channel, preparedResp) {
+ return fmt.Errorf("unable to send discovery response to peer %s", peer.ID())
+ }
+
+ return nil
+}
+
+// shufflePeers shuffles the peer list in-place
+func shufflePeers(peers []p2p.PeerConn) {
+ for i := len(peers) - 1; i > 0; i-- {
+ jBig, _ := rand.Int(rand.Reader, big.NewInt(int64(i+1)))
+
+ j := int(jBig.Int64())
+
+ // Swap elements
+ peers[i], peers[j] = peers[j], peers[i]
+ }
+}
diff --git a/tm2/pkg/p2p/discovery/discovery_test.go b/tm2/pkg/p2p/discovery/discovery_test.go
new file mode 100644
index 00000000000..17404e6039a
--- /dev/null
+++ b/tm2/pkg/p2p/discovery/discovery_test.go
@@ -0,0 +1,453 @@
+package discovery
+
+import (
+ "slices"
+ "testing"
+ "time"
+
+ "github.com/gnolang/gno/tm2/pkg/amino"
+ "github.com/gnolang/gno/tm2/pkg/p2p"
+ "github.com/gnolang/gno/tm2/pkg/p2p/mock"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestReactor_DiscoveryRequest(t *testing.T) {
+ t.Parallel()
+
+ var (
+ notifCh = make(chan struct{}, 1)
+
+ capturedSend []byte
+
+ mockPeer = &mock.Peer{
+ SendFn: func(chID byte, data []byte) bool {
+ require.Equal(t, Channel, chID)
+
+ capturedSend = data
+
+ notifCh <- struct{}{}
+
+ return true
+ },
+ }
+
+ ps = &mockPeerSet{
+ listFn: func() []p2p.PeerConn {
+ return []p2p.PeerConn{mockPeer}
+ },
+ }
+
+ mockSwitch = &mockSwitch{
+ peersFn: func() p2p.PeerSet {
+ return ps
+ },
+ }
+ )
+
+ r := NewReactor(
+ WithDiscoveryInterval(10 * time.Millisecond),
+ )
+
+ // Set the mock switch
+ r.SetSwitch(mockSwitch)
+
+ // Start the discovery service
+ require.NoError(t, r.Start())
+ t.Cleanup(func() {
+ require.NoError(t, r.Stop())
+ })
+
+ select {
+ case <-notifCh:
+ case <-time.After(5 * time.Second):
+ }
+
+ // Make sure the adequate message was captured
+ require.NotNil(t, capturedSend)
+
+ // Parse the message
+ var msg Message
+
+ require.NoError(t, amino.Unmarshal(capturedSend, &msg))
+
+ // Make sure the base message is valid
+ require.NoError(t, msg.ValidateBasic())
+
+ _, ok := msg.(*Request)
+
+ require.True(t, ok)
+}
+
+func TestReactor_DiscoveryResponse(t *testing.T) {
+ t.Parallel()
+
+ t.Run("discovery request received", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ peers = mock.GeneratePeers(t, 50)
+ notifCh = make(chan struct{}, 1)
+
+ capturedSend []byte
+
+ mockPeer = &mock.Peer{
+ SendFn: func(chID byte, data []byte) bool {
+ require.Equal(t, Channel, chID)
+
+ capturedSend = data
+
+ notifCh <- struct{}{}
+
+ return true
+ },
+ }
+
+ ps = &mockPeerSet{
+ listFn: func() []p2p.PeerConn {
+ listed := make([]p2p.PeerConn, 0, len(peers))
+
+ for _, peer := range peers {
+ listed = append(listed, peer)
+ }
+
+ return listed
+ },
+ numInboundFn: func() uint64 {
+ return uint64(len(peers))
+ },
+ }
+
+ mockSwitch = &mockSwitch{
+ peersFn: func() p2p.PeerSet {
+ return ps
+ },
+ }
+ )
+
+ r := NewReactor(
+ WithDiscoveryInterval(10 * time.Millisecond),
+ )
+
+ // Set the mock switch
+ r.SetSwitch(mockSwitch)
+
+ // Prepare the message
+ req := &Request{}
+
+ preparedReq, err := amino.MarshalAny(req)
+ require.NoError(t, err)
+
+ // Receive the message
+ r.Receive(Channel, mockPeer, preparedReq)
+
+ select {
+ case <-notifCh:
+ case <-time.After(5 * time.Second):
+ }
+
+ // Make sure the adequate message was captured
+ require.NotNil(t, capturedSend)
+
+ // Parse the message
+ var msg Message
+
+ require.NoError(t, amino.Unmarshal(capturedSend, &msg))
+
+ // Make sure the base message is valid
+ require.NoError(t, msg.ValidateBasic())
+
+ resp, ok := msg.(*Response)
+ require.True(t, ok)
+
+ // Make sure the peers are valid
+ require.Len(t, resp.Peers, maxPeersShared)
+
+ slices.ContainsFunc(resp.Peers, func(addr *types.NetAddress) bool {
+ for _, localP := range peers {
+ if localP.SocketAddr().Equals(*addr) {
+ return true
+ }
+ }
+
+ return false
+ })
+ })
+
+ t.Run("empty peers on discover", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ capturedSend []byte
+
+ mockPeer = &mock.Peer{
+ SendFn: func(chID byte, data []byte) bool {
+ require.Equal(t, Channel, chID)
+
+ capturedSend = data
+
+ return true
+ },
+ }
+
+ ps = &mockPeerSet{
+ listFn: func() []p2p.PeerConn {
+ return make([]p2p.PeerConn, 0)
+ },
+ }
+
+ mockSwitch = &mockSwitch{
+ peersFn: func() p2p.PeerSet {
+ return ps
+ },
+ }
+ )
+
+ r := NewReactor(
+ WithDiscoveryInterval(10 * time.Millisecond),
+ )
+
+ // Set the mock switch
+ r.SetSwitch(mockSwitch)
+
+ // Prepare the message
+ req := &Request{}
+
+ preparedReq, err := amino.MarshalAny(req)
+ require.NoError(t, err)
+
+ // Receive the message
+ r.Receive(Channel, mockPeer, preparedReq)
+
+ // Make sure no message was captured
+ assert.Nil(t, capturedSend)
+ })
+
+ t.Run("private peers not shared", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ publicPeers = 1
+ privatePeers = 50
+
+ peers = mock.GeneratePeers(t, publicPeers+privatePeers)
+ notifCh = make(chan struct{}, 1)
+
+ capturedSend []byte
+
+ mockPeer = &mock.Peer{
+ SendFn: func(chID byte, data []byte) bool {
+ require.Equal(t, Channel, chID)
+
+ capturedSend = data
+
+ notifCh <- struct{}{}
+
+ return true
+ },
+ }
+
+ ps = &mockPeerSet{
+ listFn: func() []p2p.PeerConn {
+ listed := make([]p2p.PeerConn, 0, len(peers))
+
+ for _, peer := range peers {
+ listed = append(listed, peer)
+ }
+
+ return listed
+ },
+ numInboundFn: func() uint64 {
+ return uint64(len(peers))
+ },
+ }
+
+ mockSwitch = &mockSwitch{
+ peersFn: func() p2p.PeerSet {
+ return ps
+ },
+ }
+ )
+
+ // Mark all except the last X peers as private
+ for _, peer := range peers[:privatePeers] {
+ peer.IsPrivateFn = func() bool {
+ return true
+ }
+ }
+
+ r := NewReactor(
+ WithDiscoveryInterval(10 * time.Millisecond),
+ )
+
+ // Set the mock switch
+ r.SetSwitch(mockSwitch)
+
+ // Prepare the message
+ req := &Request{}
+
+ preparedReq, err := amino.MarshalAny(req)
+ require.NoError(t, err)
+
+ // Receive the message
+ r.Receive(Channel, mockPeer, preparedReq)
+
+ select {
+ case <-notifCh:
+ case <-time.After(5 * time.Second):
+ }
+
+ // Make sure the adequate message was captured
+ require.NotNil(t, capturedSend)
+
+ // Parse the message
+ var msg Message
+
+ require.NoError(t, amino.Unmarshal(capturedSend, &msg))
+
+ // Make sure the base message is valid
+ require.NoError(t, msg.ValidateBasic())
+
+ resp, ok := msg.(*Response)
+ require.True(t, ok)
+
+ // Make sure the peers are valid
+ require.Len(t, resp.Peers, publicPeers)
+
+ slices.ContainsFunc(resp.Peers, func(addr *types.NetAddress) bool {
+ for _, localP := range peers {
+ if localP.SocketAddr().Equals(*addr) {
+ return true
+ }
+ }
+
+ return false
+ })
+ })
+
+ t.Run("peer response received", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ peers = mock.GeneratePeers(t, 50)
+ notifCh = make(chan struct{}, 1)
+
+ capturedDials []*types.NetAddress
+
+ ps = &mockPeerSet{
+ listFn: func() []p2p.PeerConn {
+ listed := make([]p2p.PeerConn, 0, len(peers))
+
+ for _, peer := range peers {
+ listed = append(listed, peer)
+ }
+
+ return listed
+ },
+ numInboundFn: func() uint64 {
+ return uint64(len(peers))
+ },
+ }
+
+ mockSwitch = &mockSwitch{
+ peersFn: func() p2p.PeerSet {
+ return ps
+ },
+ dialPeersFn: func(addresses ...*types.NetAddress) {
+ capturedDials = append(capturedDials, addresses...)
+
+ notifCh <- struct{}{}
+ },
+ }
+ )
+
+ r := NewReactor(
+ WithDiscoveryInterval(10 * time.Millisecond),
+ )
+
+ // Set the mock switch
+ r.SetSwitch(mockSwitch)
+
+ // Prepare the addresses
+ peerAddrs := make([]*types.NetAddress, 0, len(peers))
+
+ for _, p := range peers {
+ peerAddrs = append(peerAddrs, p.SocketAddr())
+ }
+
+ // Prepare the message
+ req := &Response{
+ Peers: peerAddrs,
+ }
+
+ preparedReq, err := amino.MarshalAny(req)
+ require.NoError(t, err)
+
+ // Receive the message
+ r.Receive(Channel, &mock.Peer{}, preparedReq)
+
+ select {
+ case <-notifCh:
+ case <-time.After(5 * time.Second):
+ }
+
+ // Make sure the correct peers were dialed
+ assert.Equal(t, capturedDials, peerAddrs)
+ })
+
+ t.Run("invalid peer response received", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ peers = mock.GeneratePeers(t, 50)
+
+ capturedDials []*types.NetAddress
+
+ ps = &mockPeerSet{
+ listFn: func() []p2p.PeerConn {
+ listed := make([]p2p.PeerConn, 0, len(peers))
+
+ for _, peer := range peers {
+ listed = append(listed, peer)
+ }
+
+ return listed
+ },
+ numInboundFn: func() uint64 {
+ return uint64(len(peers))
+ },
+ }
+
+ mockSwitch = &mockSwitch{
+ peersFn: func() p2p.PeerSet {
+ return ps
+ },
+ dialPeersFn: func(addresses ...*types.NetAddress) {
+ capturedDials = append(capturedDials, addresses...)
+ },
+ }
+ )
+
+ r := NewReactor(
+ WithDiscoveryInterval(10 * time.Millisecond),
+ )
+
+ // Set the mock switch
+ r.SetSwitch(mockSwitch)
+
+ // Prepare the message
+ req := &Response{
+ Peers: make([]*types.NetAddress, 0), // empty
+ }
+
+ preparedReq, err := amino.MarshalAny(req)
+ require.NoError(t, err)
+
+ // Receive the message
+ r.Receive(Channel, &mock.Peer{}, preparedReq)
+
+ // Make sure no peers were dialed
+ assert.Empty(t, capturedDials)
+ })
+}
diff --git a/tm2/pkg/p2p/discovery/doc.go b/tm2/pkg/p2p/discovery/doc.go
new file mode 100644
index 00000000000..5426bb41277
--- /dev/null
+++ b/tm2/pkg/p2p/discovery/doc.go
@@ -0,0 +1,9 @@
+// Package discovery contains the p2p peer discovery service (Reactor).
+// The purpose of the peer discovery service is to gather peer lists from known peers,
+// and attempt to fill out open peer connection slots in order to build out a fuller mesh.
+//
+// The implementation of the peer discovery protocol is relatively simple.
+// In essence, it pings a random peer at a specific interval (3s), for a list of their known peers (max 30).
+// After receiving the list, and verifying it, the node attempts to establish outbound connections to the
+// given peers.
+package discovery
diff --git a/tm2/pkg/p2p/discovery/mock_test.go b/tm2/pkg/p2p/discovery/mock_test.go
new file mode 100644
index 00000000000..cd543428f86
--- /dev/null
+++ b/tm2/pkg/p2p/discovery/mock_test.go
@@ -0,0 +1,135 @@
+package discovery
+
+import (
+ "net"
+
+ "github.com/gnolang/gno/tm2/pkg/p2p"
+ "github.com/gnolang/gno/tm2/pkg/p2p/events"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+)
+
+type (
+ broadcastDelegate func(byte, []byte)
+ peersDelegate func() p2p.PeerSet
+ stopPeerForErrorDelegate func(p2p.PeerConn, error)
+ dialPeersDelegate func(...*types.NetAddress)
+ subscribeDelegate func(events.EventFilter) (<-chan events.Event, func())
+)
+
+type mockSwitch struct {
+ broadcastFn broadcastDelegate
+ peersFn peersDelegate
+ stopPeerForErrorFn stopPeerForErrorDelegate
+ dialPeersFn dialPeersDelegate
+ subscribeFn subscribeDelegate
+}
+
+func (m *mockSwitch) Broadcast(chID byte, data []byte) {
+ if m.broadcastFn != nil {
+ m.broadcastFn(chID, data)
+ }
+}
+
+func (m *mockSwitch) Peers() p2p.PeerSet {
+ if m.peersFn != nil {
+ return m.peersFn()
+ }
+
+ return nil
+}
+
+func (m *mockSwitch) StopPeerForError(peer p2p.PeerConn, err error) {
+ if m.stopPeerForErrorFn != nil {
+ m.stopPeerForErrorFn(peer, err)
+ }
+}
+
+func (m *mockSwitch) DialPeers(peerAddrs ...*types.NetAddress) {
+ if m.dialPeersFn != nil {
+ m.dialPeersFn(peerAddrs...)
+ }
+}
+
+func (m *mockSwitch) Subscribe(filter events.EventFilter) (<-chan events.Event, func()) {
+ if m.subscribeFn != nil {
+ m.subscribeFn(filter)
+ }
+
+ return nil, func() {}
+}
+
+type (
+ addDelegate func(p2p.PeerConn)
+ removeDelegate func(types.ID) bool
+ hasDelegate func(types.ID) bool
+ hasIPDelegate func(net.IP) bool
+ getPeerDelegate func(types.ID) p2p.PeerConn
+ listDelegate func() []p2p.PeerConn
+ numInboundDelegate func() uint64
+ numOutboundDelegate func() uint64
+)
+
+type mockPeerSet struct {
+ addFn addDelegate
+ removeFn removeDelegate
+ hasFn hasDelegate
+ hasIPFn hasIPDelegate
+ getFn getPeerDelegate
+ listFn listDelegate
+ numInboundFn numInboundDelegate
+ numOutboundFn numOutboundDelegate
+}
+
+func (m *mockPeerSet) Add(peer p2p.PeerConn) {
+ if m.addFn != nil {
+ m.addFn(peer)
+ }
+}
+
+func (m *mockPeerSet) Remove(key types.ID) bool {
+ if m.removeFn != nil {
+ m.removeFn(key)
+ }
+
+ return false
+}
+
+func (m *mockPeerSet) Has(key types.ID) bool {
+ if m.hasFn != nil {
+ return m.hasFn(key)
+ }
+
+ return false
+}
+
+func (m *mockPeerSet) Get(key types.ID) p2p.PeerConn {
+ if m.getFn != nil {
+ return m.getFn(key)
+ }
+
+ return nil
+}
+
+func (m *mockPeerSet) List() []p2p.PeerConn {
+ if m.listFn != nil {
+ return m.listFn()
+ }
+
+ return nil
+}
+
+func (m *mockPeerSet) NumInbound() uint64 {
+ if m.numInboundFn != nil {
+ return m.numInboundFn()
+ }
+
+ return 0
+}
+
+func (m *mockPeerSet) NumOutbound() uint64 {
+ if m.numOutboundFn != nil {
+ return m.numOutboundFn()
+ }
+
+ return 0
+}
diff --git a/tm2/pkg/p2p/discovery/option.go b/tm2/pkg/p2p/discovery/option.go
new file mode 100644
index 00000000000..dc0fb95b109
--- /dev/null
+++ b/tm2/pkg/p2p/discovery/option.go
@@ -0,0 +1,12 @@
+package discovery
+
+import "time"
+
+type Option func(*Reactor)
+
+// WithDiscoveryInterval sets the discovery crawl interval
+func WithDiscoveryInterval(interval time.Duration) Option {
+ return func(r *Reactor) {
+ r.discoveryInterval = interval
+ }
+}
diff --git a/tm2/pkg/p2p/discovery/package.go b/tm2/pkg/p2p/discovery/package.go
new file mode 100644
index 00000000000..a3865fdf5d2
--- /dev/null
+++ b/tm2/pkg/p2p/discovery/package.go
@@ -0,0 +1,16 @@
+package discovery
+
+import (
+ "github.com/gnolang/gno/tm2/pkg/amino"
+)
+
+var Package = amino.RegisterPackage(amino.NewPackage(
+ "github.com/gnolang/gno/tm2/pkg/p2p/discovery",
+ "p2p",
+ amino.GetCallersDirname(),
+).
+ WithTypes(
+ &Request{},
+ &Response{},
+ ),
+)
diff --git a/tm2/pkg/p2p/discovery/types.go b/tm2/pkg/p2p/discovery/types.go
new file mode 100644
index 00000000000..87ea936ebb5
--- /dev/null
+++ b/tm2/pkg/p2p/discovery/types.go
@@ -0,0 +1,44 @@
+package discovery
+
+import (
+ "github.com/gnolang/gno/tm2/pkg/errors"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+)
+
+var errNoPeers = errors.New("no peers received")
+
+// Message is the wrapper for the discovery message
+type Message interface {
+ ValidateBasic() error
+}
+
+// Request is the peer discovery request.
+// It is empty by design, since it's used as
+// a notification type
+type Request struct{}
+
+func (r *Request) ValidateBasic() error {
+ return nil
+}
+
+// Response is the peer discovery response
+type Response struct {
+ Peers []*types.NetAddress // the peer set returned by the peer
+}
+
+func (r *Response) ValidateBasic() error {
+ // Make sure at least some peers were received
+ if len(r.Peers) == 0 {
+ return errNoPeers
+ }
+
+ // Make sure the returned peer dial
+ // addresses are valid
+ for _, peer := range r.Peers {
+ if err := peer.Validate(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/tm2/pkg/p2p/discovery/types_test.go b/tm2/pkg/p2p/discovery/types_test.go
new file mode 100644
index 00000000000..0ac2c16f4e5
--- /dev/null
+++ b/tm2/pkg/p2p/discovery/types_test.go
@@ -0,0 +1,80 @@
+package discovery
+
+import (
+ "net"
+ "testing"
+
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// generateNetAddrs generates random net addresses
+func generateNetAddrs(t *testing.T, count int) []*types.NetAddress {
+ t.Helper()
+
+ addrs := make([]*types.NetAddress, count)
+
+ for i := 0; i < count; i++ {
+ var (
+ key = types.GenerateNodeKey()
+ address = "127.0.0.1:8080"
+ )
+
+ tcpAddr, err := net.ResolveTCPAddr("tcp", address)
+ require.NoError(t, err)
+
+ addr, err := types.NewNetAddress(key.ID(), tcpAddr)
+ require.NoError(t, err)
+
+ addrs[i] = addr
+ }
+
+ return addrs
+}
+
+func TestRequest_ValidateBasic(t *testing.T) {
+ t.Parallel()
+
+ r := &Request{}
+
+ assert.NoError(t, r.ValidateBasic())
+}
+
+func TestResponse_ValidateBasic(t *testing.T) {
+ t.Parallel()
+
+ t.Run("empty peer set", func(t *testing.T) {
+ t.Parallel()
+
+ r := &Response{
+ Peers: make([]*types.NetAddress, 0),
+ }
+
+ assert.ErrorIs(t, r.ValidateBasic(), errNoPeers)
+ })
+
+ t.Run("invalid peer dial address", func(t *testing.T) {
+ t.Parallel()
+
+ r := &Response{
+ Peers: []*types.NetAddress{
+ {
+ ID: "", // invalid ID
+ },
+ },
+ }
+
+ assert.Error(t, r.ValidateBasic())
+ })
+
+ t.Run("valid peer set", func(t *testing.T) {
+ t.Parallel()
+
+ r := &Response{
+ Peers: generateNetAddrs(t, 10),
+ }
+
+ assert.NoError(t, r.ValidateBasic())
+ })
+}
diff --git a/tm2/pkg/p2p/errors.go b/tm2/pkg/p2p/errors.go
deleted file mode 100644
index d4ad58e8ab5..00000000000
--- a/tm2/pkg/p2p/errors.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package p2p
-
-import (
- "fmt"
- "net"
-)
-
-// FilterTimeoutError indicates that a filter operation timed out.
-type FilterTimeoutError struct{}
-
-func (e FilterTimeoutError) Error() string {
- return "filter timed out"
-}
-
-// RejectedError indicates that a Peer was rejected carrying additional
-// information as to the reason.
-type RejectedError struct {
- addr NetAddress
- conn net.Conn
- err error
- id ID
- isAuthFailure bool
- isDuplicate bool
- isFiltered bool
- isIncompatible bool
- isNodeInfoInvalid bool
- isSelf bool
-}
-
-// Addr returns the NetAddress for the rejected Peer.
-func (e RejectedError) Addr() NetAddress {
- return e.addr
-}
-
-func (e RejectedError) Error() string {
- if e.isAuthFailure {
- return fmt.Sprintf("auth failure: %s", e.err)
- }
-
- if e.isDuplicate {
- if e.conn != nil {
- return fmt.Sprintf(
- "duplicate CONN<%s>",
- e.conn.RemoteAddr().String(),
- )
- }
- if !e.id.IsZero() {
- return fmt.Sprintf("duplicate ID<%v>", e.id)
- }
- }
-
- if e.isFiltered {
- if e.conn != nil {
- return fmt.Sprintf(
- "filtered CONN<%s>: %s",
- e.conn.RemoteAddr().String(),
- e.err,
- )
- }
-
- if !e.id.IsZero() {
- return fmt.Sprintf("filtered ID<%v>: %s", e.id, e.err)
- }
- }
-
- if e.isIncompatible {
- return fmt.Sprintf("incompatible: %s", e.err)
- }
-
- if e.isNodeInfoInvalid {
- return fmt.Sprintf("invalid NodeInfo: %s", e.err)
- }
-
- if e.isSelf {
- return fmt.Sprintf("self ID<%v>", e.id)
- }
-
- return fmt.Sprintf("%s", e.err)
-}
-
-// IsAuthFailure when Peer authentication was unsuccessful.
-func (e RejectedError) IsAuthFailure() bool { return e.isAuthFailure }
-
-// IsDuplicate when Peer ID or IP are present already.
-func (e RejectedError) IsDuplicate() bool { return e.isDuplicate }
-
-// IsFiltered when Peer ID or IP was filtered.
-func (e RejectedError) IsFiltered() bool { return e.isFiltered }
-
-// IsIncompatible when Peer NodeInfo is not compatible with our own.
-func (e RejectedError) IsIncompatible() bool { return e.isIncompatible }
-
-// IsNodeInfoInvalid when the sent NodeInfo is not valid.
-func (e RejectedError) IsNodeInfoInvalid() bool { return e.isNodeInfoInvalid }
-
-// IsSelf when Peer is our own node.
-func (e RejectedError) IsSelf() bool { return e.isSelf }
-
-// SwitchDuplicatePeerIDError to be raised when a peer is connecting with a known
-// ID.
-type SwitchDuplicatePeerIDError struct {
- ID ID
-}
-
-func (e SwitchDuplicatePeerIDError) Error() string {
- return fmt.Sprintf("duplicate peer ID %v", e.ID)
-}
-
-// SwitchDuplicatePeerIPError to be raised when a peer is connecting with a known
-// IP.
-type SwitchDuplicatePeerIPError struct {
- IP net.IP
-}
-
-func (e SwitchDuplicatePeerIPError) Error() string {
- return fmt.Sprintf("duplicate peer IP %v", e.IP.String())
-}
-
-// SwitchConnectToSelfError to be raised when trying to connect to itself.
-type SwitchConnectToSelfError struct {
- Addr *NetAddress
-}
-
-func (e SwitchConnectToSelfError) Error() string {
- return fmt.Sprintf("connect to self: %v", e.Addr)
-}
-
-type SwitchAuthenticationFailureError struct {
- Dialed *NetAddress
- Got ID
-}
-
-func (e SwitchAuthenticationFailureError) Error() string {
- return fmt.Sprintf(
- "failed to authenticate peer. Dialed %v, but got peer with ID %s",
- e.Dialed,
- e.Got,
- )
-}
-
-// TransportClosedError is raised when the Transport has been closed.
-type TransportClosedError struct{}
-
-func (e TransportClosedError) Error() string {
- return "transport has been closed"
-}
-
-// -------------------------------------------------------------------
-
-type NetAddressNoIDError struct {
- Addr string
-}
-
-func (e NetAddressNoIDError) Error() string {
- return fmt.Sprintf("address (%s) does not contain ID", e.Addr)
-}
-
-type NetAddressInvalidError struct {
- Addr string
- Err error
-}
-
-func (e NetAddressInvalidError) Error() string {
- return fmt.Sprintf("invalid address (%s): %v", e.Addr, e.Err)
-}
-
-type NetAddressLookupError struct {
- Addr string
- Err error
-}
-
-func (e NetAddressLookupError) Error() string {
- return fmt.Sprintf("error looking up host (%s): %v", e.Addr, e.Err)
-}
-
-// CurrentlyDialingOrExistingAddressError indicates that we're currently
-// dialing this address or it belongs to an existing peer.
-type CurrentlyDialingOrExistingAddressError struct {
- Addr string
-}
-
-func (e CurrentlyDialingOrExistingAddressError) Error() string {
- return fmt.Sprintf("connection with %s has been established or dialed", e.Addr)
-}
diff --git a/tm2/pkg/p2p/events/doc.go b/tm2/pkg/p2p/events/doc.go
new file mode 100644
index 00000000000..a624102379e
--- /dev/null
+++ b/tm2/pkg/p2p/events/doc.go
@@ -0,0 +1,3 @@
+// Package events contains a simple p2p event system implementation, that simplifies asynchronous event flows in the
+// p2p module. The event subscriptions allow for event filtering, which eases the load on the event notification flow.
+package events
diff --git a/tm2/pkg/p2p/events/events.go b/tm2/pkg/p2p/events/events.go
new file mode 100644
index 00000000000..1eb4699fb45
--- /dev/null
+++ b/tm2/pkg/p2p/events/events.go
@@ -0,0 +1,112 @@
+package events
+
+import (
+ "sync"
+
+ "github.com/rs/xid"
+)
+
+// EventFilter is the filter function used to
+// filter incoming p2p events. A false flag will
+// consider the event as irrelevant
+type EventFilter func(Event) bool
+
+// Events is the p2p event switch
+type Events struct {
+ subs subscriptions
+ subscriptionsMux sync.RWMutex
+}
+
+// New creates a new event subscription manager
+func New() *Events {
+ return &Events{
+ subs: make(subscriptions),
+ }
+}
+
+// Subscribe registers a new filtered event listener
+func (es *Events) Subscribe(filterFn EventFilter) (<-chan Event, func()) {
+ es.subscriptionsMux.Lock()
+ defer es.subscriptionsMux.Unlock()
+
+ // Create a new subscription
+ id, ch := es.subs.add(filterFn)
+
+ // Create the unsubscribe callback
+ unsubscribeFn := func() {
+ es.subscriptionsMux.Lock()
+ defer es.subscriptionsMux.Unlock()
+
+ es.subs.remove(id)
+ }
+
+ return ch, unsubscribeFn
+}
+
+// Notify notifies all subscribers of an incoming event [BLOCKING]
+func (es *Events) Notify(event Event) {
+ es.subscriptionsMux.RLock()
+ defer es.subscriptionsMux.RUnlock()
+
+ es.subs.notify(event)
+}
+
+type (
+ // subscriptions holds the corresponding subscription information
+ subscriptions map[string]subscription // subscription ID -> subscription
+
+ // subscription wraps the subscription notification channel,
+ // and the event filter
+ subscription struct {
+ ch chan Event
+ filterFn EventFilter
+ }
+)
+
+// add adds a new subscription to the subscription map.
+// Returns the subscription ID, and update channel
+func (s *subscriptions) add(filterFn EventFilter) (string, chan Event) {
+ var (
+ id = xid.New().String()
+ // Since the event stream is non-blocking,
+ // the event buffer should be sufficiently
+ // large for most use-cases. Subscribers can
+ // handle large event load caller-side to mitigate
+ // events potentially being missed
+ ch = make(chan Event, 100)
+ )
+
+ (*s)[id] = subscription{
+ ch: ch,
+ filterFn: filterFn,
+ }
+
+ return id, ch
+}
+
+// remove removes the given subscription
+func (s *subscriptions) remove(id string) {
+ if sub, exists := (*s)[id]; exists {
+ // Close the notification channel
+ close(sub.ch)
+ }
+
+ // Delete the subscription
+ delete(*s, id)
+}
+
+// notify notifies all subscription listeners,
+// if their filters pass
+func (s *subscriptions) notify(event Event) {
+ // Notify the listeners
+ for _, sub := range *s {
+ if !sub.filterFn(event) {
+ continue
+ }
+
+ select {
+ case sub.ch <- event:
+ default: // non-blocking
+ }
+ }
+}
diff --git a/tm2/pkg/p2p/events/events_test.go b/tm2/pkg/p2p/events/events_test.go
new file mode 100644
index 00000000000..a0feafceddb
--- /dev/null
+++ b/tm2/pkg/p2p/events/events_test.go
@@ -0,0 +1,94 @@
+package events
+
+import (
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// generateEvents generates p2p events
+func generateEvents(count int) []Event {
+ events := make([]Event, 0, count)
+
+ for i := range count {
+ var event Event
+
+ if i%2 == 0 {
+ event = PeerConnectedEvent{
+ PeerID: types.ID(fmt.Sprintf("peer-%d", i)),
+ }
+ } else {
+ event = PeerDisconnectedEvent{
+ PeerID: types.ID(fmt.Sprintf("peer-%d", i)),
+ }
+ }
+
+ events = append(events, event)
+ }
+
+ return events
+}
+
+func TestEvents_Subscribe(t *testing.T) {
+ t.Parallel()
+
+ var (
+ capturedEvents []Event
+
+ events = generateEvents(10)
+ subFn = func(e Event) bool {
+ return e.Type() == PeerDisconnected
+ }
+ )
+
+ // Create the events manager
+ e := New()
+
+ // Subscribe to events
+ ch, unsubFn := e.Subscribe(subFn)
+ defer unsubFn()
+
+ // Listen for the events
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+
+ timeout := time.After(5 * time.Second)
+
+ for {
+ select {
+ case ev := <-ch:
+ capturedEvents = append(capturedEvents, ev)
+
+ if len(capturedEvents) == len(events)/2 {
+ return
+ }
+ case <-timeout:
+ return
+ }
+ }
+ }()
+
+ // Send out the events
+ for _, ev := range events {
+ e.Notify(ev)
+ }
+
+ wg.Wait()
+
+ // Make sure the events were captured
+ // and filtered properly
+ require.Len(t, capturedEvents, len(events)/2)
+
+ for _, ev := range capturedEvents {
+ assert.Equal(t, ev.Type(), PeerDisconnected)
+ }
+}
diff --git a/tm2/pkg/p2p/events/types.go b/tm2/pkg/p2p/events/types.go
new file mode 100644
index 00000000000..cbaac1816ff
--- /dev/null
+++ b/tm2/pkg/p2p/events/types.go
@@ -0,0 +1,39 @@
+package events
+
+import (
+ "net"
+
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+)
+
+type EventType string
+
+const (
+ PeerConnected EventType = "PeerConnected" // emitted when a fresh peer connects
+ PeerDisconnected EventType = "PeerDisconnected" // emitted when a peer disconnects
+)
+
+// Event is a generic p2p event
+type Event interface {
+ // Type returns the type information for the event
+ Type() EventType
+}
+
+type PeerConnectedEvent struct {
+ PeerID types.ID // the ID of the peer
+ Address net.Addr // the remote address of the peer
+}
+
+func (p PeerConnectedEvent) Type() EventType {
+ return PeerConnected
+}
+
+type PeerDisconnectedEvent struct {
+ PeerID types.ID // the ID of the peer
+ Address net.Addr // the remote address of the peer
+ Reason error // the disconnect reason, if any
+}
+
+func (p PeerDisconnectedEvent) Type() EventType {
+ return PeerDisconnected
+}
diff --git a/tm2/pkg/p2p/fuzz.go b/tm2/pkg/p2p/fuzz.go
deleted file mode 100644
index 03cf88cf750..00000000000
--- a/tm2/pkg/p2p/fuzz.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package p2p
-
-import (
- "net"
- "sync"
- "time"
-
- "github.com/gnolang/gno/tm2/pkg/p2p/config"
- "github.com/gnolang/gno/tm2/pkg/random"
-)
-
-// FuzzedConnection wraps any net.Conn and depending on the mode either delays
-// reads/writes or randomly drops reads/writes/connections.
-type FuzzedConnection struct {
- conn net.Conn
-
- mtx sync.Mutex
- start <-chan time.Time
- active bool
-
- config *config.FuzzConnConfig
-}
-
-// FuzzConnAfterFromConfig creates a new FuzzedConnection from a config.
-// Fuzzing starts when the duration elapses.
-func FuzzConnAfterFromConfig(
- conn net.Conn,
- d time.Duration,
- config *config.FuzzConnConfig,
-) net.Conn {
- return &FuzzedConnection{
- conn: conn,
- start: time.After(d),
- active: false,
- config: config,
- }
-}
-
-// Config returns the connection's config.
-func (fc *FuzzedConnection) Config() *config.FuzzConnConfig {
- return fc.config
-}
-
-// Read implements net.Conn.
-func (fc *FuzzedConnection) Read(data []byte) (n int, err error) {
- if fc.fuzz() {
- return 0, nil
- }
- return fc.conn.Read(data)
-}
-
-// Write implements net.Conn.
-func (fc *FuzzedConnection) Write(data []byte) (n int, err error) {
- if fc.fuzz() {
- return 0, nil
- }
- return fc.conn.Write(data)
-}
-
-// Close implements net.Conn.
-func (fc *FuzzedConnection) Close() error { return fc.conn.Close() }
-
-// LocalAddr implements net.Conn.
-func (fc *FuzzedConnection) LocalAddr() net.Addr { return fc.conn.LocalAddr() }
-
-// RemoteAddr implements net.Conn.
-func (fc *FuzzedConnection) RemoteAddr() net.Addr { return fc.conn.RemoteAddr() }
-
-// SetDeadline implements net.Conn.
-func (fc *FuzzedConnection) SetDeadline(t time.Time) error { return fc.conn.SetDeadline(t) }
-
-// SetReadDeadline implements net.Conn.
-func (fc *FuzzedConnection) SetReadDeadline(t time.Time) error {
- return fc.conn.SetReadDeadline(t)
-}
-
-// SetWriteDeadline implements net.Conn.
-func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error {
- return fc.conn.SetWriteDeadline(t)
-}
-
-func (fc *FuzzedConnection) randomDuration() time.Duration {
- maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000)
- return time.Millisecond * time.Duration(random.RandInt()%maxDelayMillis) //nolint: gas
-}
-
-// implements the fuzz (delay, kill conn)
-// and returns whether or not the read/write should be ignored
-func (fc *FuzzedConnection) fuzz() bool {
- if !fc.shouldFuzz() {
- return false
- }
-
- switch fc.config.Mode {
- case config.FuzzModeDrop:
- // randomly drop the r/w, drop the conn, or sleep
- r := random.RandFloat64()
- switch {
- case r <= fc.config.ProbDropRW:
- return true
- case r < fc.config.ProbDropRW+fc.config.ProbDropConn:
- // XXX: can't this fail because machine precision?
- // XXX: do we need an error?
- fc.Close() //nolint: errcheck, gas
- return true
- case r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep:
- time.Sleep(fc.randomDuration())
- }
- case config.FuzzModeDelay:
- // sleep a bit
- time.Sleep(fc.randomDuration())
- }
- return false
-}
-
-func (fc *FuzzedConnection) shouldFuzz() bool {
- if fc.active {
- return true
- }
-
- fc.mtx.Lock()
- defer fc.mtx.Unlock()
-
- select {
- case <-fc.start:
- fc.active = true
- return true
- default:
- return false
- }
-}
diff --git a/tm2/pkg/p2p/key.go b/tm2/pkg/p2p/key.go
deleted file mode 100644
index a41edeb07f8..00000000000
--- a/tm2/pkg/p2p/key.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package p2p
-
-import (
- "bytes"
- "fmt"
- "os"
-
- "github.com/gnolang/gno/tm2/pkg/amino"
- "github.com/gnolang/gno/tm2/pkg/crypto"
- "github.com/gnolang/gno/tm2/pkg/crypto/ed25519"
- osm "github.com/gnolang/gno/tm2/pkg/os"
-)
-
-// ------------------------------------------------------------------------------
-// Persistent peer ID
-// TODO: encrypt on disk
-
-// NodeKey is the persistent peer key.
-// It contains the nodes private key for authentication.
-// NOTE: keep in sync with gno.land/cmd/gnoland/secrets.go
-type NodeKey struct {
- crypto.PrivKey `json:"priv_key"` // our priv key
-}
-
-func (nk NodeKey) ID() ID {
- return nk.PubKey().Address().ID()
-}
-
-// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath.
-// If the file does not exist, it generates and saves a new NodeKey.
-func LoadOrGenNodeKey(filePath string) (*NodeKey, error) {
- if osm.FileExists(filePath) {
- nodeKey, err := LoadNodeKey(filePath)
- if err != nil {
- return nil, err
- }
- return nodeKey, nil
- }
- return genNodeKey(filePath)
-}
-
-func LoadNodeKey(filePath string) (*NodeKey, error) {
- jsonBytes, err := os.ReadFile(filePath)
- if err != nil {
- return nil, err
- }
- nodeKey := new(NodeKey)
- err = amino.UnmarshalJSON(jsonBytes, nodeKey)
- if err != nil {
- return nil, fmt.Errorf("Error reading NodeKey from %v: %w", filePath, err)
- }
- return nodeKey, nil
-}
-
-func genNodeKey(filePath string) (*NodeKey, error) {
- privKey := ed25519.GenPrivKey()
- nodeKey := &NodeKey{
- PrivKey: privKey,
- }
-
- jsonBytes, err := amino.MarshalJSON(nodeKey)
- if err != nil {
- return nil, err
- }
- err = os.WriteFile(filePath, jsonBytes, 0o600)
- if err != nil {
- return nil, err
- }
- return nodeKey, nil
-}
-
-// ------------------------------------------------------------------------------
-
-// MakePoWTarget returns the big-endian encoding of 2^(targetBits - difficulty) - 1.
-// It can be used as a Proof of Work target.
-// NOTE: targetBits must be a multiple of 8 and difficulty must be less than targetBits.
-func MakePoWTarget(difficulty, targetBits uint) []byte {
- if targetBits%8 != 0 {
- panic(fmt.Sprintf("targetBits (%d) not a multiple of 8", targetBits))
- }
- if difficulty >= targetBits {
- panic(fmt.Sprintf("difficulty (%d) >= targetBits (%d)", difficulty, targetBits))
- }
- targetBytes := targetBits / 8
- zeroPrefixLen := (int(difficulty) / 8)
- prefix := bytes.Repeat([]byte{0}, zeroPrefixLen)
- mod := (difficulty % 8)
- if mod > 0 {
- nonZeroPrefix := byte(1<<(8-mod) - 1)
- prefix = append(prefix, nonZeroPrefix)
- }
- tailLen := int(targetBytes) - len(prefix)
- return append(prefix, bytes.Repeat([]byte{0xFF}, tailLen)...)
-}
diff --git a/tm2/pkg/p2p/key_test.go b/tm2/pkg/p2p/key_test.go
deleted file mode 100644
index 4f67cc0a5da..00000000000
--- a/tm2/pkg/p2p/key_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package p2p
-
-import (
- "bytes"
- "os"
- "path/filepath"
- "testing"
-
- "github.com/gnolang/gno/tm2/pkg/random"
- "github.com/stretchr/testify/assert"
-)
-
-func TestLoadOrGenNodeKey(t *testing.T) {
- t.Parallel()
-
- filePath := filepath.Join(os.TempDir(), random.RandStr(12)+"_peer_id.json")
-
- nodeKey, err := LoadOrGenNodeKey(filePath)
- assert.Nil(t, err)
-
- nodeKey2, err := LoadOrGenNodeKey(filePath)
- assert.Nil(t, err)
-
- assert.Equal(t, nodeKey, nodeKey2)
-}
-
-// ----------------------------------------------------------
-
-func padBytes(bz []byte, targetBytes int) []byte {
- return append(bz, bytes.Repeat([]byte{0xFF}, targetBytes-len(bz))...)
-}
-
-func TestPoWTarget(t *testing.T) {
- t.Parallel()
-
- targetBytes := 20
- cases := []struct {
- difficulty uint
- target []byte
- }{
- {0, padBytes([]byte{}, targetBytes)},
- {1, padBytes([]byte{127}, targetBytes)},
- {8, padBytes([]byte{0}, targetBytes)},
- {9, padBytes([]byte{0, 127}, targetBytes)},
- {10, padBytes([]byte{0, 63}, targetBytes)},
- {16, padBytes([]byte{0, 0}, targetBytes)},
- {17, padBytes([]byte{0, 0, 127}, targetBytes)},
- }
-
- for _, c := range cases {
- assert.Equal(t, MakePoWTarget(c.difficulty, 20*8), c.target)
- }
-}
diff --git a/tm2/pkg/p2p/mock/peer.go b/tm2/pkg/p2p/mock/peer.go
index 906c168c3a8..e5a01952831 100644
--- a/tm2/pkg/p2p/mock/peer.go
+++ b/tm2/pkg/p2p/mock/peer.go
@@ -1,68 +1,377 @@
package mock
import (
+ "fmt"
+ "io"
+ "log/slog"
"net"
+ "testing"
+ "time"
- "github.com/gnolang/gno/tm2/pkg/crypto/ed25519"
- "github.com/gnolang/gno/tm2/pkg/p2p"
"github.com/gnolang/gno/tm2/pkg/p2p/conn"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/gnolang/gno/tm2/pkg/service"
+ "github.com/stretchr/testify/require"
)
+type (
+ flushStopDelegate func()
+ idDelegate func() types.ID
+ remoteIPDelegate func() net.IP
+ remoteAddrDelegate func() net.Addr
+ isOutboundDelegate func() bool
+ isPersistentDelegate func() bool
+ isPrivateDelegate func() bool
+ closeConnDelegate func() error
+ nodeInfoDelegate func() types.NodeInfo
+ statusDelegate func() conn.ConnectionStatus
+ socketAddrDelegate func() *types.NetAddress
+ sendDelegate func(byte, []byte) bool
+ trySendDelegate func(byte, []byte) bool
+ setDelegate func(string, any)
+ getDelegate func(string) any
+ stopDelegate func() error
+)
+
+// GeneratePeers generates random peers
+func GeneratePeers(t *testing.T, count int) []*Peer {
+ t.Helper()
+
+ peers := make([]*Peer, count)
+
+ for i := range count {
+ var (
+ key = types.GenerateNodeKey()
+ address = "127.0.0.1:8080"
+ )
+
+ tcpAddr, err := net.ResolveTCPAddr("tcp", address)
+ require.NoError(t, err)
+
+ addr, err := types.NewNetAddress(key.ID(), tcpAddr)
+ require.NoError(t, err)
+
+ p := &Peer{
+ IDFn: func() types.ID {
+ return key.ID()
+ },
+ NodeInfoFn: func() types.NodeInfo {
+ return types.NodeInfo{
+ PeerID: key.ID(),
+ }
+ },
+ SocketAddrFn: func() *types.NetAddress {
+ return addr
+ },
+ }
+
+ p.BaseService = *service.NewBaseService(
+ slog.New(slog.NewTextHandler(io.Discard, nil)),
+ fmt.Sprintf("peer-%d", i),
+ p,
+ )
+
+ peers[i] = p
+ }
+
+ return peers
+}
+
type Peer struct {
- *service.BaseService
- ip net.IP
- id p2p.ID
- addr *p2p.NetAddress
- kv map[string]interface{}
- Outbound, Persistent bool
-}
-
-// NewPeer creates and starts a new mock peer. If the ip
-// is nil, random routable address is used.
-func NewPeer(ip net.IP) *Peer {
- var netAddr *p2p.NetAddress
- if ip == nil {
- _, netAddr = p2p.CreateRoutableAddr()
- } else {
- netAddr = p2p.NewNetAddressFromIPPort("", ip, 26656)
- }
- nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
- netAddr.ID = nodeKey.ID()
- mp := &Peer{
- ip: ip,
- id: nodeKey.ID(),
- addr: netAddr,
- kv: make(map[string]interface{}),
- }
- mp.BaseService = service.NewBaseService(nil, "MockPeer", mp)
- mp.Start()
- return mp
-}
-
-func (mp *Peer) FlushStop() { mp.Stop() }
-func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true }
-func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true }
-func (mp *Peer) NodeInfo() p2p.NodeInfo {
- return p2p.NodeInfo{
- NetAddress: mp.addr,
- }
-}
-func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
-func (mp *Peer) ID() p2p.ID { return mp.id }
-func (mp *Peer) IsOutbound() bool { return mp.Outbound }
-func (mp *Peer) IsPersistent() bool { return mp.Persistent }
-func (mp *Peer) Get(key string) interface{} {
- if value, ok := mp.kv[key]; ok {
- return value
+ service.BaseService
+
+ FlushStopFn flushStopDelegate
+ IDFn idDelegate
+ RemoteIPFn remoteIPDelegate
+ RemoteAddrFn remoteAddrDelegate
+ IsOutboundFn isOutboundDelegate
+ IsPersistentFn isPersistentDelegate
+ IsPrivateFn isPrivateDelegate
+ CloseConnFn closeConnDelegate
+ NodeInfoFn nodeInfoDelegate
+ StopFn stopDelegate
+ StatusFn statusDelegate
+ SocketAddrFn socketAddrDelegate
+ SendFn sendDelegate
+ TrySendFn trySendDelegate
+ SetFn setDelegate
+ GetFn getDelegate
+}
+
+func (m *Peer) FlushStop() {
+ if m.FlushStopFn != nil {
+ m.FlushStopFn()
+ }
+}
+
+func (m *Peer) ID() types.ID {
+ if m.IDFn != nil {
+ return m.IDFn()
+ }
+
+ return ""
+}
+
+func (m *Peer) RemoteIP() net.IP {
+ if m.RemoteIPFn != nil {
+ return m.RemoteIPFn()
+ }
+
+ return nil
+}
+
+func (m *Peer) RemoteAddr() net.Addr {
+ if m.RemoteAddrFn != nil {
+ return m.RemoteAddrFn()
+ }
+
+ return nil
+}
+
+func (m *Peer) Stop() error {
+ if m.StopFn != nil {
+ return m.StopFn()
+ }
+
+ return nil
+}
+
+func (m *Peer) IsOutbound() bool {
+ if m.IsOutboundFn != nil {
+ return m.IsOutboundFn()
+ }
+
+ return false
+}
+
+func (m *Peer) IsPersistent() bool {
+ if m.IsPersistentFn != nil {
+ return m.IsPersistentFn()
+ }
+
+ return false
+}
+
+func (m *Peer) IsPrivate() bool {
+ if m.IsPrivateFn != nil {
+ return m.IsPrivateFn()
+ }
+
+ return false
+}
+
+func (m *Peer) CloseConn() error {
+ if m.CloseConnFn != nil {
+ return m.CloseConnFn()
+ }
+
+ return nil
+}
+
+func (m *Peer) NodeInfo() types.NodeInfo {
+ if m.NodeInfoFn != nil {
+ return m.NodeInfoFn()
+ }
+
+ return types.NodeInfo{}
+}
+
+func (m *Peer) Status() conn.ConnectionStatus {
+ if m.StatusFn != nil {
+ return m.StatusFn()
+ }
+
+ return conn.ConnectionStatus{}
+}
+
+func (m *Peer) SocketAddr() *types.NetAddress {
+ if m.SocketAddrFn != nil {
+ return m.SocketAddrFn()
+ }
+
+ return nil
+}
+
+func (m *Peer) Send(classifier byte, data []byte) bool {
+ if m.SendFn != nil {
+ return m.SendFn(classifier, data)
+ }
+
+ return false
+}
+
+func (m *Peer) TrySend(classifier byte, data []byte) bool {
+ if m.TrySendFn != nil {
+ return m.TrySendFn(classifier, data)
+ }
+
+ return false
+}
+
+func (m *Peer) Set(key string, data any) {
+ if m.SetFn != nil {
+ m.SetFn(key, data)
+ }
+}
+
+func (m *Peer) Get(key string) any {
+ if m.GetFn != nil {
+ return m.GetFn(key)
+ }
+
+ return nil
+}
+
+type (
+ readDelegate func([]byte) (int, error)
+ writeDelegate func([]byte) (int, error)
+ closeDelegate func() error
+ localAddrDelegate func() net.Addr
+ setDeadlineDelegate func(time.Time) error
+)
+
+type Conn struct {
+ ReadFn readDelegate
+ WriteFn writeDelegate
+ CloseFn closeDelegate
+ LocalAddrFn localAddrDelegate
+ RemoteAddrFn remoteAddrDelegate
+ SetDeadlineFn setDeadlineDelegate
+ SetReadDeadlineFn setDeadlineDelegate
+ SetWriteDeadlineFn setDeadlineDelegate
+}
+
+func (m *Conn) Read(b []byte) (int, error) {
+ if m.ReadFn != nil {
+ return m.ReadFn(b)
+ }
+
+ return 0, nil
+}
+
+func (m *Conn) Write(b []byte) (int, error) {
+ if m.WriteFn != nil {
+ return m.WriteFn(b)
}
+
+ return 0, nil
+}
+
+func (m *Conn) Close() error {
+ if m.CloseFn != nil {
+ return m.CloseFn()
+ }
+
return nil
}
-func (mp *Peer) Set(key string, value interface{}) {
- mp.kv[key] = value
+func (m *Conn) LocalAddr() net.Addr {
+ if m.LocalAddrFn != nil {
+ return m.LocalAddrFn()
+ }
+
+ return nil
+}
+
+func (m *Conn) RemoteAddr() net.Addr {
+ if m.RemoteAddrFn != nil {
+ return m.RemoteAddrFn()
+ }
+
+ return nil
+}
+
+func (m *Conn) SetDeadline(t time.Time) error {
+ if m.SetDeadlineFn != nil {
+ return m.SetDeadlineFn(t)
+ }
+
+ return nil
+}
+
+func (m *Conn) SetReadDeadline(t time.Time) error {
+ if m.SetReadDeadlineFn != nil {
+ return m.SetReadDeadlineFn(t)
+ }
+
+ return nil
+}
+
+func (m *Conn) SetWriteDeadline(t time.Time) error {
+ if m.SetWriteDeadlineFn != nil {
+ return m.SetWriteDeadlineFn(t)
+ }
+
+ return nil
+}
+
+type (
+ startDelegate func() error
+ stringDelegate func() string
+)
+
+type MConn struct {
+ FlushFn flushStopDelegate
+ StartFn startDelegate
+ StopFn stopDelegate
+ SendFn sendDelegate
+ TrySendFn trySendDelegate
+ StatusFn statusDelegate
+ StringFn stringDelegate
+}
+
+func (m *MConn) FlushStop() {
+ if m.FlushFn != nil {
+ m.FlushFn()
+ }
+}
+
+func (m *MConn) Start() error {
+ if m.StartFn != nil {
+ return m.StartFn()
+ }
+
+ return nil
+}
+
+func (m *MConn) Stop() error {
+ if m.StopFn != nil {
+ return m.StopFn()
+ }
+
+ return nil
+}
+
+func (m *MConn) Send(ch byte, data []byte) bool {
+ if m.SendFn != nil {
+ return m.SendFn(ch, data)
+ }
+
+ return false
+}
+
+func (m *MConn) TrySend(ch byte, data []byte) bool {
+ if m.TrySendFn != nil {
+ return m.TrySendFn(ch, data)
+ }
+
+ return false
+}
+
+func (m *MConn) SetLogger(_ *slog.Logger) {}
+
+func (m *MConn) Status() conn.ConnectionStatus {
+ if m.StatusFn != nil {
+ return m.StatusFn()
+ }
+
+ return conn.ConnectionStatus{}
+}
+
+func (m *MConn) String() string {
+ if m.StringFn != nil {
+ return m.StringFn()
+ }
+
+ return ""
}
-func (mp *Peer) RemoteIP() net.IP { return mp.ip }
-func (mp *Peer) SocketAddr() *p2p.NetAddress { return mp.addr }
-func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} }
-func (mp *Peer) CloseConn() error { return nil }
diff --git a/tm2/pkg/p2p/mock/reactor.go b/tm2/pkg/p2p/mock/reactor.go
deleted file mode 100644
index fe123fdc0b2..00000000000
--- a/tm2/pkg/p2p/mock/reactor.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package mock
-
-import (
- "github.com/gnolang/gno/tm2/pkg/log"
- "github.com/gnolang/gno/tm2/pkg/p2p"
- "github.com/gnolang/gno/tm2/pkg/p2p/conn"
-)
-
-type Reactor struct {
- p2p.BaseReactor
-}
-
-func NewReactor() *Reactor {
- r := &Reactor{}
- r.BaseReactor = *p2p.NewBaseReactor("Reactor", r)
- r.SetLogger(log.NewNoopLogger())
- return r
-}
-
-func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return []*conn.ChannelDescriptor{} }
-func (r *Reactor) AddPeer(peer p2p.Peer) {}
-func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {}
-func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {}
diff --git a/tm2/pkg/p2p/mock_test.go b/tm2/pkg/p2p/mock_test.go
new file mode 100644
index 00000000000..5fd7f947b71
--- /dev/null
+++ b/tm2/pkg/p2p/mock_test.go
@@ -0,0 +1,404 @@
+package p2p
+
+import (
+ "context"
+ "log/slog"
+ "net"
+ "time"
+
+ "github.com/gnolang/gno/tm2/pkg/p2p/conn"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+)
+
+type (
+ netAddressDelegate func() types.NetAddress
+ acceptDelegate func(context.Context, PeerBehavior) (PeerConn, error)
+ dialDelegate func(context.Context, types.NetAddress, PeerBehavior) (PeerConn, error)
+ removeDelegate func(PeerConn)
+)
+
+type mockTransport struct {
+ netAddressFn netAddressDelegate
+ acceptFn acceptDelegate
+ dialFn dialDelegate
+ removeFn removeDelegate
+}
+
+func (m *mockTransport) NetAddress() types.NetAddress {
+ if m.netAddressFn != nil {
+ return m.netAddressFn()
+ }
+
+ return types.NetAddress{}
+}
+
+func (m *mockTransport) Accept(ctx context.Context, behavior PeerBehavior) (PeerConn, error) {
+ if m.acceptFn != nil {
+ return m.acceptFn(ctx, behavior)
+ }
+
+ return nil, nil
+}
+
+func (m *mockTransport) Dial(ctx context.Context, address types.NetAddress, behavior PeerBehavior) (PeerConn, error) {
+ if m.dialFn != nil {
+ return m.dialFn(ctx, address, behavior)
+ }
+
+ return nil, nil
+}
+
+func (m *mockTransport) Remove(p PeerConn) {
+ if m.removeFn != nil {
+ m.removeFn(p)
+ }
+}
+
+type (
+ addDelegate func(PeerConn)
+ removePeerDelegate func(types.ID) bool
+ hasDelegate func(types.ID) bool
+ hasIPDelegate func(net.IP) bool
+ getDelegate func(types.ID) PeerConn
+ listDelegate func() []PeerConn
+ numInboundDelegate func() uint64
+ numOutboundDelegate func() uint64
+)
+
+type mockSet struct {
+ addFn addDelegate
+ removeFn removePeerDelegate
+ hasFn hasDelegate
+ hasIPFn hasIPDelegate
+ listFn listDelegate
+ getFn getDelegate
+ numInboundFn numInboundDelegate
+ numOutboundFn numOutboundDelegate
+}
+
+func (m *mockSet) Add(peer PeerConn) {
+ if m.addFn != nil {
+ m.addFn(peer)
+ }
+}
+
+func (m *mockSet) Remove(key types.ID) bool {
+ if m.removeFn != nil {
+ m.removeFn(key)
+ }
+
+ return false
+}
+
+func (m *mockSet) Has(key types.ID) bool {
+ if m.hasFn != nil {
+ return m.hasFn(key)
+ }
+
+ return false
+}
+
+func (m *mockSet) Get(key types.ID) PeerConn {
+ if m.getFn != nil {
+ return m.getFn(key)
+ }
+
+ return nil
+}
+
+func (m *mockSet) List() []PeerConn {
+ if m.listFn != nil {
+ return m.listFn()
+ }
+
+ return nil
+}
+
+func (m *mockSet) NumInbound() uint64 {
+ if m.numInboundFn != nil {
+ return m.numInboundFn()
+ }
+
+ return 0
+}
+
+func (m *mockSet) NumOutbound() uint64 {
+ if m.numOutboundFn != nil {
+ return m.numOutboundFn()
+ }
+
+ return 0
+}
+
+type (
+ listenerAcceptDelegate func() (net.Conn, error)
+ closeDelegate func() error
+ addrDelegate func() net.Addr
+)
+
+type mockListener struct {
+ acceptFn listenerAcceptDelegate
+ closeFn closeDelegate
+ addrFn addrDelegate
+}
+
+func (m *mockListener) Accept() (net.Conn, error) {
+ if m.acceptFn != nil {
+ return m.acceptFn()
+ }
+
+ return nil, nil
+}
+
+func (m *mockListener) Close() error {
+ if m.closeFn != nil {
+ return m.closeFn()
+ }
+
+ return nil
+}
+
+func (m *mockListener) Addr() net.Addr {
+ if m.addrFn != nil {
+ return m.addrFn()
+ }
+
+ return nil
+}
+
+type (
+ readDelegate func([]byte) (int, error)
+ writeDelegate func([]byte) (int, error)
+ localAddrDelegate func() net.Addr
+ remoteAddrDelegate func() net.Addr
+ setDeadlineDelegate func(time.Time) error
+)
+
+type mockConn struct {
+ readFn readDelegate
+ writeFn writeDelegate
+ closeFn closeDelegate
+ localAddrFn localAddrDelegate
+ remoteAddrFn remoteAddrDelegate
+ setDeadlineFn setDeadlineDelegate
+ setReadDeadlineFn setDeadlineDelegate
+ setWriteDeadlineFn setDeadlineDelegate
+}
+
+func (m *mockConn) Read(buff []byte) (int, error) {
+ if m.readFn != nil {
+ return m.readFn(buff)
+ }
+
+ return 0, nil
+}
+
+func (m *mockConn) Write(buff []byte) (int, error) {
+ if m.writeFn != nil {
+ return m.writeFn(buff)
+ }
+
+ return 0, nil
+}
+
+func (m *mockConn) Close() error {
+ if m.closeFn != nil {
+ return m.closeFn()
+ }
+
+ return nil
+}
+
+func (m *mockConn) LocalAddr() net.Addr {
+ if m.localAddrFn != nil {
+ return m.localAddrFn()
+ }
+
+ return nil
+}
+
+func (m *mockConn) RemoteAddr() net.Addr {
+ if m.remoteAddrFn != nil {
+ return m.remoteAddrFn()
+ }
+
+ return nil
+}
+
+func (m *mockConn) SetDeadline(t time.Time) error {
+ if m.setDeadlineFn != nil {
+ return m.setDeadlineFn(t)
+ }
+
+ return nil
+}
+
+func (m *mockConn) SetReadDeadline(t time.Time) error {
+ if m.setReadDeadlineFn != nil {
+ return m.setReadDeadlineFn(t)
+ }
+
+ return nil
+}
+
+func (m *mockConn) SetWriteDeadline(t time.Time) error {
+ if m.setWriteDeadlineFn != nil {
+ return m.setWriteDeadlineFn(t)
+ }
+
+ return nil
+}
+
+type (
+ startDelegate func() error
+ onStartDelegate func() error
+ stopDelegate func() error
+ onStopDelegate func()
+ resetDelegate func() error
+ onResetDelegate func() error
+ isRunningDelegate func() bool
+ quitDelegate func() <-chan struct{}
+ stringDelegate func() string
+ setLoggerDelegate func(*slog.Logger)
+ setSwitchDelegate func(Switch)
+ getChannelsDelegate func() []*conn.ChannelDescriptor
+ initPeerDelegate func(PeerConn)
+ addPeerDelegate func(PeerConn)
+ removeSwitchPeerDelegate func(PeerConn, any)
+ receiveDelegate func(byte, PeerConn, []byte)
+)
+
+type mockReactor struct {
+ startFn startDelegate
+ onStartFn onStartDelegate
+ stopFn stopDelegate
+ onStopFn onStopDelegate
+ resetFn resetDelegate
+ onResetFn onResetDelegate
+ isRunningFn isRunningDelegate
+ quitFn quitDelegate
+ stringFn stringDelegate
+ setLoggerFn setLoggerDelegate
+ setSwitchFn setSwitchDelegate
+ getChannelsFn getChannelsDelegate
+ initPeerFn initPeerDelegate
+ addPeerFn addPeerDelegate
+ removePeerFn removeSwitchPeerDelegate
+ receiveFn receiveDelegate
+}
+
+func (m *mockReactor) Start() error {
+ if m.startFn != nil {
+ return m.startFn()
+ }
+
+ return nil
+}
+
+func (m *mockReactor) OnStart() error {
+ if m.onStartFn != nil {
+ return m.onStartFn()
+ }
+
+ return nil
+}
+
+func (m *mockReactor) Stop() error {
+ if m.stopFn != nil {
+ return m.stopFn()
+ }
+
+ return nil
+}
+
+func (m *mockReactor) OnStop() {
+ if m.onStopFn != nil {
+ m.onStopFn()
+ }
+}
+
+func (m *mockReactor) Reset() error {
+ if m.resetFn != nil {
+ return m.resetFn()
+ }
+
+ return nil
+}
+
+func (m *mockReactor) OnReset() error {
+ if m.onResetFn != nil {
+ return m.onResetFn()
+ }
+
+ return nil
+}
+
+func (m *mockReactor) IsRunning() bool {
+ if m.isRunningFn != nil {
+ return m.isRunningFn()
+ }
+
+ return false
+}
+
+func (m *mockReactor) Quit() <-chan struct{} {
+ if m.quitFn != nil {
+ return m.quitFn()
+ }
+
+ return nil
+}
+
+func (m *mockReactor) String() string {
+ if m.stringFn != nil {
+ return m.stringFn()
+ }
+
+ return ""
+}
+
+func (m *mockReactor) SetLogger(logger *slog.Logger) {
+ if m.setLoggerFn != nil {
+ m.setLoggerFn(logger)
+ }
+}
+
+func (m *mockReactor) SetSwitch(s Switch) {
+ if m.setSwitchFn != nil {
+ m.setSwitchFn(s)
+ }
+}
+
+func (m *mockReactor) GetChannels() []*conn.ChannelDescriptor {
+ if m.getChannelsFn != nil {
+ return m.getChannelsFn()
+ }
+
+ return nil
+}
+
+func (m *mockReactor) InitPeer(peer PeerConn) PeerConn {
+ if m.initPeerFn != nil {
+ m.initPeerFn(peer)
+ }
+
+ return nil
+}
+
+func (m *mockReactor) AddPeer(peer PeerConn) {
+ if m.addPeerFn != nil {
+ m.addPeerFn(peer)
+ }
+}
+
+func (m *mockReactor) RemovePeer(peer PeerConn, reason any) {
+ if m.removePeerFn != nil {
+ m.removePeerFn(peer, reason)
+ }
+}
+
+func (m *mockReactor) Receive(chID byte, peer PeerConn, msgBytes []byte) {
+ if m.receiveFn != nil {
+ m.receiveFn(chID, peer, msgBytes)
+ }
+}
diff --git a/tm2/pkg/p2p/netaddress_test.go b/tm2/pkg/p2p/netaddress_test.go
deleted file mode 100644
index 413d020c153..00000000000
--- a/tm2/pkg/p2p/netaddress_test.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package p2p
-
-import (
- "encoding/hex"
- "net"
- "testing"
-
- "github.com/gnolang/gno/tm2/pkg/crypto"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func TestAddress2ID(t *testing.T) {
- t.Parallel()
-
- idbz, _ := hex.DecodeString("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
- id := crypto.AddressFromBytes(idbz).ID()
- assert.Equal(t, crypto.ID("g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6"), id)
-
- idbz, _ = hex.DecodeString("deadbeefdeadbeefdeadbeefdeadbeefdead0000")
- id = crypto.AddressFromBytes(idbz).ID()
- assert.Equal(t, crypto.ID("g1m6kmam774klwlh4dhmhaatd7al026qqqq9c22r"), id)
-}
-
-func TestNewNetAddress(t *testing.T) {
- t.Parallel()
-
- tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080")
- require.Nil(t, err)
-
- assert.Panics(t, func() {
- NewNetAddress("", tcpAddr)
- })
-
- idbz, _ := hex.DecodeString("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
- id := crypto.AddressFromBytes(idbz).ID()
- // ^-- is "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6"
-
- addr := NewNetAddress(id, tcpAddr)
- assert.Equal(t, "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", addr.String())
-
- assert.NotPanics(t, func() {
- NewNetAddress("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000})
- }, "Calling NewNetAddress with UDPAddr should not panic in testing")
-}
-
-func TestNewNetAddressFromString(t *testing.T) {
- t.Parallel()
-
- testCases := []struct {
- name string
- addr string
- expected string
- correct bool
- }{
- {"no node id and no protocol", "127.0.0.1:8080", "", false},
- {"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false},
- {"no node id w/ udp input", "udp://127.0.0.1:8080", "", false},
-
- {"no protocol", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", true},
- {"tcp input", "tcp://g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", true},
- {"udp input", "udp://g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", true},
- {"malformed tcp input", "tcp//g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "", false},
- {"malformed udp input", "udp//g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "", false},
-
- // {"127.0.0:8080", false},
- {"invalid host", "notahost", "", false},
- {"invalid port", "127.0.0.1:notapath", "", false},
- {"invalid host w/ port", "notahost:8080", "", false},
- {"just a port", "8082", "", false},
- {"non-existent port", "127.0.0:8080000", "", false},
-
- {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false},
- {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false},
- {"not bech32 nodeId", "xxxm6kmam774klwlh4dhmhaatd7al02m0h0hdap9l@127.0.0.1:8080", "", false},
-
- {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false},
- {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false},
- {"not bech32 nodeId w/tcp", "tcp://xxxxm6kmam774klwlh4dhmhaatd7al02m0h0hdap9l@127.0.0.1:8080", "", false},
- {"correct nodeId w/tcp", "tcp://g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", true},
-
- {"no node id", "tcp://@127.0.0.1:8080", "", false},
- {"no node id or IP", "tcp://@", "", false},
- {"tcp no host, w/ port", "tcp://:26656", "", false},
- {"empty", "", "", false},
- {"node id delimiter 1", "@", "", false},
- {"node id delimiter 2", " @", "", false},
- {"node id delimiter 3", " @ ", "", false},
- }
-
- for _, tc := range testCases {
- tc := tc
- t.Run(tc.name, func(t *testing.T) {
- t.Parallel()
-
- addr, err := NewNetAddressFromString(tc.addr)
- if tc.correct {
- if assert.Nil(t, err, tc.addr) {
- assert.Equal(t, tc.expected, addr.String())
- }
- } else {
- assert.NotNil(t, err, tc.addr)
- }
- })
- }
-}
-
-func TestNewNetAddressFromStrings(t *testing.T) {
- t.Parallel()
-
- addrs, errs := NewNetAddressFromStrings([]string{
- "127.0.0.1:8080",
- "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080",
- "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.2:8080",
- })
- assert.Len(t, errs, 1)
- assert.Equal(t, 2, len(addrs))
-}
-
-func TestNewNetAddressFromIPPort(t *testing.T) {
- t.Parallel()
-
- addr := NewNetAddressFromIPPort("", net.ParseIP("127.0.0.1"), 8080)
- assert.Equal(t, "127.0.0.1:8080", addr.String())
-}
-
-func TestNetAddressProperties(t *testing.T) {
- t.Parallel()
-
- // TODO add more test cases
- testCases := []struct {
- addr string
- valid bool
- local bool
- routable bool
- }{
- {"g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", true, true, false},
- {"g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@ya.ru:80", true, false, true},
- }
-
- for _, tc := range testCases {
- addr, err := NewNetAddressFromString(tc.addr)
- require.Nil(t, err)
-
- err = addr.Validate()
- if tc.valid {
- assert.NoError(t, err)
- } else {
- assert.Error(t, err)
- }
- assert.Equal(t, tc.local, addr.Local())
- assert.Equal(t, tc.routable, addr.Routable())
- }
-}
-
-func TestNetAddressReachabilityTo(t *testing.T) {
- t.Parallel()
-
- // TODO add more test cases
- testCases := []struct {
- addr string
- other string
- reachability int
- }{
- {"g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8081", 0},
- {"g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@ya.ru:80", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", 1},
- }
-
- for _, tc := range testCases {
- addr, err := NewNetAddressFromString(tc.addr)
- require.Nil(t, err)
-
- other, err := NewNetAddressFromString(tc.other)
- require.Nil(t, err)
-
- assert.Equal(t, tc.reachability, addr.ReachabilityTo(other))
- }
-}
diff --git a/tm2/pkg/p2p/node_info.go b/tm2/pkg/p2p/node_info.go
deleted file mode 100644
index 48ba8f7776b..00000000000
--- a/tm2/pkg/p2p/node_info.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package p2p
-
-import (
- "fmt"
-
- "github.com/gnolang/gno/tm2/pkg/bft/state/eventstore"
- "github.com/gnolang/gno/tm2/pkg/strings"
- "github.com/gnolang/gno/tm2/pkg/versionset"
-)
-
-const (
- maxNodeInfoSize = 10240 // 10KB
- maxNumChannels = 16 // plenty of room for upgrades, for now
-)
-
-// Max size of the NodeInfo struct
-func MaxNodeInfoSize() int {
- return maxNodeInfoSize
-}
-
-// -------------------------------------------------------------
-
-// NodeInfo is the basic node information exchanged
-// between two peers during the Tendermint P2P handshake.
-type NodeInfo struct {
- // Set of protocol versions
- VersionSet versionset.VersionSet `json:"version_set"`
-
- // Authenticate
- NetAddress *NetAddress `json:"net_address"`
-
- // Check compatibility.
- // Channels are HexBytes so easier to read as JSON
- Network string `json:"network"` // network/chain ID
- Software string `json:"software"` // name of immediate software
- Version string `json:"version"` // software major.minor.revision
- Channels []byte `json:"channels"` // channels this node knows about
-
- // ASCIIText fields
- Moniker string `json:"moniker"` // arbitrary moniker
- Other NodeInfoOther `json:"other"` // other application specific data
-}
-
-// NodeInfoOther is the misc. application specific data
-type NodeInfoOther struct {
- TxIndex string `json:"tx_index"`
- RPCAddress string `json:"rpc_address"`
-}
-
-// Validate checks the self-reported NodeInfo is safe.
-// It returns an error if there
-// are too many Channels, if there are any duplicate Channels,
-// if the ListenAddr is malformed, or if the ListenAddr is a host name
-// that can not be resolved to some IP.
-// TODO: constraints for Moniker/Other? Or is that for the UI ?
-// JAE: It needs to be done on the client, but to prevent ambiguous
-// unicode characters, maybe it's worth sanitizing it here.
-// In the future we might want to validate these, once we have a
-// name-resolution system up.
-// International clients could then use punycode (or we could use
-// url-encoding), and we just need to be careful with how we handle that in our
-// clients. (e.g. off by default).
-func (info NodeInfo) Validate() error {
- // ID is already validated. TODO validate
-
- // Validate ListenAddr.
- if info.NetAddress == nil {
- return fmt.Errorf("info.NetAddress cannot be nil")
- }
- if err := info.NetAddress.ValidateLocal(); err != nil {
- return err
- }
-
- // Network is validated in CompatibleWith.
-
- // Validate Version
- if len(info.Version) > 0 &&
- (!strings.IsASCIIText(info.Version) || strings.ASCIITrim(info.Version) == "") {
- return fmt.Errorf("info.Version must be valid ASCII text without tabs, but got %v", info.Version)
- }
-
- // Validate Channels - ensure max and check for duplicates.
- if len(info.Channels) > maxNumChannels {
- return fmt.Errorf("info.Channels is too long (%v). Max is %v", len(info.Channels), maxNumChannels)
- }
- channels := make(map[byte]struct{})
- for _, ch := range info.Channels {
- _, ok := channels[ch]
- if ok {
- return fmt.Errorf("info.Channels contains duplicate channel id %v", ch)
- }
- channels[ch] = struct{}{}
- }
-
- // Validate Moniker.
- if !strings.IsASCIIText(info.Moniker) || strings.ASCIITrim(info.Moniker) == "" {
- return fmt.Errorf("info.Moniker must be valid non-empty ASCII text without tabs, but got %v", info.Moniker)
- }
-
- // Validate Other.
- other := info.Other
- txIndex := other.TxIndex
- switch txIndex {
- case "", eventstore.StatusOn, eventstore.StatusOff:
- default:
- return fmt.Errorf("info.Other.TxIndex should be either 'on', 'off', or empty string, got '%v'", txIndex)
- }
- // XXX: Should we be more strict about address formats?
- rpcAddr := other.RPCAddress
- if len(rpcAddr) > 0 && (!strings.IsASCIIText(rpcAddr) || strings.ASCIITrim(rpcAddr) == "") {
- return fmt.Errorf("info.Other.RPCAddress=%v must be valid ASCII text without tabs", rpcAddr)
- }
-
- return nil
-}
-
-func (info NodeInfo) ID() ID {
- return info.NetAddress.ID
-}
-
-// CompatibleWith checks if two NodeInfo are compatible with eachother.
-// CONTRACT: two nodes are compatible if the Block version and network match
-// and they have at least one channel in common.
-func (info NodeInfo) CompatibleWith(other NodeInfo) error {
- // check protocol versions
- _, err := info.VersionSet.CompatibleWith(other.VersionSet)
- if err != nil {
- return err
- }
-
- // nodes must be on the same network
- if info.Network != other.Network {
- return fmt.Errorf("Peer is on a different network. Got %v, expected %v", other.Network, info.Network)
- }
-
- // if we have no channels, we're just testing
- if len(info.Channels) == 0 {
- return nil
- }
-
- // for each of our channels, check if they have it
- found := false
-OUTER_LOOP:
- for _, ch1 := range info.Channels {
- for _, ch2 := range other.Channels {
- if ch1 == ch2 {
- found = true
- break OUTER_LOOP // only need one
- }
- }
- }
- if !found {
- return fmt.Errorf("Peer has no common channels. Our channels: %v ; Peer channels: %v", info.Channels, other.Channels)
- }
- return nil
-}
diff --git a/tm2/pkg/p2p/node_info_test.go b/tm2/pkg/p2p/node_info_test.go
deleted file mode 100644
index 58f1dab8854..00000000000
--- a/tm2/pkg/p2p/node_info_test.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package p2p
-
-import (
- "fmt"
- "net"
- "testing"
-
- "github.com/gnolang/gno/tm2/pkg/crypto/ed25519"
- "github.com/gnolang/gno/tm2/pkg/versionset"
- "github.com/stretchr/testify/assert"
-)
-
-func TestNodeInfoValidate(t *testing.T) {
- t.Parallel()
-
- // empty fails
- ni := NodeInfo{}
- assert.Error(t, ni.Validate())
-
- channels := make([]byte, maxNumChannels)
- for i := 0; i < maxNumChannels; i++ {
- channels[i] = byte(i)
- }
- dupChannels := make([]byte, 5)
- copy(dupChannels, channels[:5])
- dupChannels = append(dupChannels, testCh)
-
- nonAscii := "¢§µ"
- emptyTab := fmt.Sprintf("\t")
- emptySpace := fmt.Sprintf(" ")
-
- testCases := []struct {
- testName string
- malleateNodeInfo func(*NodeInfo)
- expectErr bool
- }{
- {"Too Many Channels", func(ni *NodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, true}, //nolint: gocritic
- {"Duplicate Channel", func(ni *NodeInfo) { ni.Channels = dupChannels }, true},
- {"Good Channels", func(ni *NodeInfo) { ni.Channels = ni.Channels[:5] }, false},
-
- {"Nil NetAddress", func(ni *NodeInfo) { ni.NetAddress = nil }, true},
- {"Zero NetAddress ID", func(ni *NodeInfo) { ni.NetAddress.ID = "" }, true},
- {"Invalid NetAddress IP", func(ni *NodeInfo) { ni.NetAddress.IP = net.IP([]byte{0x00}) }, true},
-
- {"Non-ASCII Version", func(ni *NodeInfo) { ni.Version = nonAscii }, true},
- {"Empty tab Version", func(ni *NodeInfo) { ni.Version = emptyTab }, true},
- {"Empty space Version", func(ni *NodeInfo) { ni.Version = emptySpace }, true},
- {"Empty Version", func(ni *NodeInfo) { ni.Version = "" }, false},
-
- {"Non-ASCII Moniker", func(ni *NodeInfo) { ni.Moniker = nonAscii }, true},
- {"Empty tab Moniker", func(ni *NodeInfo) { ni.Moniker = emptyTab }, true},
- {"Empty space Moniker", func(ni *NodeInfo) { ni.Moniker = emptySpace }, true},
- {"Empty Moniker", func(ni *NodeInfo) { ni.Moniker = "" }, true},
- {"Good Moniker", func(ni *NodeInfo) { ni.Moniker = "hey its me" }, false},
-
- {"Non-ASCII TxIndex", func(ni *NodeInfo) { ni.Other.TxIndex = nonAscii }, true},
- {"Empty tab TxIndex", func(ni *NodeInfo) { ni.Other.TxIndex = emptyTab }, true},
- {"Empty space TxIndex", func(ni *NodeInfo) { ni.Other.TxIndex = emptySpace }, true},
- {"Empty TxIndex", func(ni *NodeInfo) { ni.Other.TxIndex = "" }, false},
- {"Off TxIndex", func(ni *NodeInfo) { ni.Other.TxIndex = "off" }, false},
-
- {"Non-ASCII RPCAddress", func(ni *NodeInfo) { ni.Other.RPCAddress = nonAscii }, true},
- {"Empty tab RPCAddress", func(ni *NodeInfo) { ni.Other.RPCAddress = emptyTab }, true},
- {"Empty space RPCAddress", func(ni *NodeInfo) { ni.Other.RPCAddress = emptySpace }, true},
- {"Empty RPCAddress", func(ni *NodeInfo) { ni.Other.RPCAddress = "" }, false},
- {"Good RPCAddress", func(ni *NodeInfo) { ni.Other.RPCAddress = "0.0.0.0:26657" }, false},
- }
-
- nodeKey := NodeKey{PrivKey: ed25519.GenPrivKey()}
- name := "testing"
-
- // test case passes
- ni = testNodeInfo(nodeKey.ID(), name)
- ni.Channels = channels
- assert.NoError(t, ni.Validate())
-
- for _, tc := range testCases {
- ni := testNodeInfo(nodeKey.ID(), name)
- ni.Channels = channels
- tc.malleateNodeInfo(&ni)
- err := ni.Validate()
- if tc.expectErr {
- assert.Error(t, err, tc.testName)
- } else {
- assert.NoError(t, err, tc.testName)
- }
- }
-}
-
-func TestNodeInfoCompatible(t *testing.T) {
- t.Parallel()
-
- nodeKey1 := NodeKey{PrivKey: ed25519.GenPrivKey()}
- nodeKey2 := NodeKey{PrivKey: ed25519.GenPrivKey()}
- name := "testing"
-
- var newTestChannel byte = 0x2
-
- // test NodeInfo is compatible
- ni1 := testNodeInfo(nodeKey1.ID(), name)
- ni2 := testNodeInfo(nodeKey2.ID(), name)
- assert.NoError(t, ni1.CompatibleWith(ni2))
-
- // add another channel; still compatible
- ni2.Channels = []byte{newTestChannel, testCh}
- assert.NoError(t, ni1.CompatibleWith(ni2))
-
- // wrong NodeInfo type is not compatible
- _, netAddr := CreateRoutableAddr()
- ni3 := NodeInfo{NetAddress: netAddr}
- assert.Error(t, ni1.CompatibleWith(ni3))
-
- testCases := []struct {
- testName string
- malleateNodeInfo func(*NodeInfo)
- }{
- {"Bad block version", func(ni *NodeInfo) {
- ni.VersionSet.Set(versionset.VersionInfo{Name: "Block", Version: "badversion"})
- }},
- {"Wrong block version", func(ni *NodeInfo) {
- ni.VersionSet.Set(versionset.VersionInfo{Name: "Block", Version: "v999.999.999-wrong"})
- }},
- {"Wrong network", func(ni *NodeInfo) { ni.Network += "-wrong" }},
- {"No common channels", func(ni *NodeInfo) { ni.Channels = []byte{newTestChannel} }},
- }
-
- for i, tc := range testCases {
- t.Logf("case #%v", i)
- ni := testNodeInfo(nodeKey2.ID(), name)
- tc.malleateNodeInfo(&ni)
- fmt.Printf("case #%v\n", i)
- assert.Error(t, ni1.CompatibleWith(ni))
- }
-}
diff --git a/tm2/pkg/p2p/peer.go b/tm2/pkg/p2p/peer.go
index ef2ddcf2c25..135bf4b250c 100644
--- a/tm2/pkg/p2p/peer.go
+++ b/tm2/pkg/p2p/peer.go
@@ -6,169 +6,132 @@ import (
"net"
"github.com/gnolang/gno/tm2/pkg/cmap"
- connm "github.com/gnolang/gno/tm2/pkg/p2p/conn"
+ "github.com/gnolang/gno/tm2/pkg/p2p/conn"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/gnolang/gno/tm2/pkg/service"
)
-// Peer is an interface representing a peer connected on a reactor.
-type Peer interface {
- service.Service
- FlushStop()
-
- ID() ID // peer's cryptographic ID
- RemoteIP() net.IP // remote IP of the connection
- RemoteAddr() net.Addr // remote address of the connection
-
- IsOutbound() bool // did we dial the peer
- IsPersistent() bool // do we redial this peer when we disconnect
-
- CloseConn() error // close original connection
-
- NodeInfo() NodeInfo // peer's info
- Status() connm.ConnectionStatus
- SocketAddr() *NetAddress // actual address of the socket
-
- Send(byte, []byte) bool
- TrySend(byte, []byte) bool
-
- Set(string, interface{})
- Get(string) interface{}
+type ConnConfig struct {
+ MConfig conn.MConnConfig
+ ReactorsByCh map[byte]Reactor
+ ChDescs []*conn.ChannelDescriptor
+ OnPeerError func(PeerConn, error)
}
-// ----------------------------------------------------------
-
-// peerConn contains the raw connection and its config.
-type peerConn struct {
- outbound bool
- persistent bool
- conn net.Conn // source connection
-
- socketAddr *NetAddress
-
- // cached RemoteIP()
- ip net.IP
+// ConnInfo wraps the remote peer connection
+type ConnInfo struct {
+ Outbound bool // flag indicating if the connection is dialed
+ Persistent bool // flag indicating if the connection is persistent
+ Private bool // flag indicating if the peer is private (not shared)
+ Conn net.Conn // the source connection
+ RemoteIP net.IP // the remote IP of the peer
+ SocketAddr *types.NetAddress
}
-func newPeerConn(
- outbound, persistent bool,
- conn net.Conn,
- socketAddr *NetAddress,
-) peerConn {
- return peerConn{
- outbound: outbound,
- persistent: persistent,
- conn: conn,
- socketAddr: socketAddr,
- }
-}
-
-// ID only exists for SecretConnection.
-// NOTE: Will panic if conn is not *SecretConnection.
-func (pc peerConn) ID() ID {
- return (pc.conn.(*connm.SecretConnection).RemotePubKey()).Address().ID()
-}
-
-// Return the IP from the connection RemoteAddr
-func (pc peerConn) RemoteIP() net.IP {
- if pc.ip != nil {
- return pc.ip
- }
-
- host, _, err := net.SplitHostPort(pc.conn.RemoteAddr().String())
- if err != nil {
- panic(err)
- }
-
- ips, err := net.LookupIP(host)
- if err != nil {
- panic(err)
- }
-
- pc.ip = ips[0]
-
- return pc.ip
+type multiplexConn interface {
+ FlushStop()
+ Start() error
+ Stop() error
+ Send(byte, []byte) bool
+ TrySend(byte, []byte) bool
+ SetLogger(*slog.Logger)
+ Status() conn.ConnectionStatus
+ String() string
}
-// peer implements Peer.
-//
+// peer is a wrapper for a remote peer
// Before using a peer, you will need to perform a handshake on connection.
type peer struct {
service.BaseService
- // raw peerConn and the multiplex connection
- peerConn
- mconn *connm.MConnection
-
- // peer's node info and the channel it knows about
- // channels = nodeInfo.Channels
- // cached to avoid copying nodeInfo in hasChannel
- nodeInfo NodeInfo
- channels []byte
+ connInfo *ConnInfo // Metadata about the connection
+ nodeInfo types.NodeInfo // Information about the peer's node
+ mConn multiplexConn // The multiplexed connection
- // User data
- Data *cmap.CMap
+ data *cmap.CMap // Arbitrary data store associated with the peer
}
-type PeerOption func(*peer)
-
+// newPeer creates an uninitialized peer instance
func newPeer(
- pc peerConn,
- mConfig connm.MConnConfig,
- nodeInfo NodeInfo,
- reactorsByCh map[byte]Reactor,
- chDescs []*connm.ChannelDescriptor,
- onPeerError func(Peer, interface{}),
- options ...PeerOption,
-) *peer {
+ connInfo *ConnInfo,
+ nodeInfo types.NodeInfo,
+ mConfig *ConnConfig,
+) PeerConn {
p := &peer{
- peerConn: pc,
+ connInfo: connInfo,
nodeInfo: nodeInfo,
- channels: nodeInfo.Channels, // TODO
- Data: cmap.NewCMap(),
+ data: cmap.NewCMap(),
}
- p.mconn = createMConnection(
- pc.conn,
- p,
- reactorsByCh,
- chDescs,
- onPeerError,
+ p.mConn = p.createMConnection(
+ connInfo.Conn,
mConfig,
)
+
p.BaseService = *service.NewBaseService(nil, "Peer", p)
- for _, option := range options {
- option(p)
- }
return p
}
-// String representation.
+// RemoteIP returns the IP from the remote connection
+func (p *peer) RemoteIP() net.IP {
+ return p.connInfo.RemoteIP
+}
+
+// RemoteAddr returns the address from the remote connection
+func (p *peer) RemoteAddr() net.Addr {
+ return p.connInfo.Conn.RemoteAddr()
+}
+
func (p *peer) String() string {
- if p.outbound {
- return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.ID())
+ if p.connInfo.Outbound {
+ return fmt.Sprintf("Peer{%s %s out}", p.mConn, p.ID())
}
- return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.ID())
+ return fmt.Sprintf("Peer{%s %s in}", p.mConn, p.ID())
}
-// ---------------------------------------------------
-// Implements service.Service
+// IsOutbound returns true if the connection is outbound, false otherwise.
+func (p *peer) IsOutbound() bool {
+ return p.connInfo.Outbound
+}
+
+// IsPersistent returns true if the peer is persistent, false otherwise.
+func (p *peer) IsPersistent() bool {
+ return p.connInfo.Persistent
+}
+
+// IsPrivate returns true if the peer is private, false otherwise.
+func (p *peer) IsPrivate() bool {
+ return p.connInfo.Private
+}
+
+// SocketAddr returns the address of the socket.
+// For outbound peers, it's the address dialed (after DNS resolution).
+// For inbound peers, it's the address returned by the underlying connection
+// (not what's reported in the peer's NodeInfo).
+func (p *peer) SocketAddr() *types.NetAddress {
+ return p.connInfo.SocketAddr
+}
+
+// CloseConn closes original connection.
+// Used for cleaning up in cases where the peer had not been started at all.
+func (p *peer) CloseConn() error {
+ return p.connInfo.Conn.Close()
+}
-// SetLogger implements BaseService.
func (p *peer) SetLogger(l *slog.Logger) {
p.Logger = l
- p.mconn.SetLogger(l)
+ p.mConn.SetLogger(l)
}
-// OnStart implements BaseService.
func (p *peer) OnStart() error {
if err := p.BaseService.OnStart(); err != nil {
- return err
+ return fmt.Errorf("unable to start base service, %w", err)
}
- if err := p.mconn.Start(); err != nil {
- return err
+ if err := p.mConn.Start(); err != nil {
+ return fmt.Errorf("unable to start multiplex connection, %w", err)
}
return nil
@@ -179,164 +142,105 @@ func (p *peer) OnStart() error {
// NOTE: it is not safe to call this method more than once.
func (p *peer) FlushStop() {
p.BaseService.OnStop()
- p.mconn.FlushStop() // stop everything and close the conn
+ p.mConn.FlushStop() // stop everything and close the conn
}
// OnStop implements BaseService.
func (p *peer) OnStop() {
p.BaseService.OnStop()
- p.mconn.Stop() // stop everything and close the conn
-}
-// ---------------------------------------------------
-// Implements Peer
-
-// ID returns the peer's ID - the hex encoded hash of its pubkey.
-func (p *peer) ID() ID {
- return p.nodeInfo.NetAddress.ID
-}
-
-// IsOutbound returns true if the connection is outbound, false otherwise.
-func (p *peer) IsOutbound() bool {
- return p.peerConn.outbound
+ if err := p.mConn.Stop(); err != nil {
+ p.Logger.Error(
+ "unable to gracefully close mConn",
+ "err",
+ err,
+ )
+ }
}
-// IsPersistent returns true if the peer is persistent, false otherwise.
-func (p *peer) IsPersistent() bool {
- return p.peerConn.persistent
+// ID returns the peer's ID - the hex encoded hash of its pubkey.
+func (p *peer) ID() types.ID {
+ return p.nodeInfo.PeerID
}
// NodeInfo returns a copy of the peer's NodeInfo.
-func (p *peer) NodeInfo() NodeInfo {
+func (p *peer) NodeInfo() types.NodeInfo {
return p.nodeInfo
}
-// SocketAddr returns the address of the socket.
-// For outbound peers, it's the address dialed (after DNS resolution).
-// For inbound peers, it's the address returned by the underlying connection
-// (not what's reported in the peer's NodeInfo).
-func (p *peer) SocketAddr() *NetAddress {
- return p.peerConn.socketAddr
-}
-
// Status returns the peer's ConnectionStatus.
-func (p *peer) Status() connm.ConnectionStatus {
- return p.mconn.Status()
+func (p *peer) Status() conn.ConnectionStatus {
+ return p.mConn.Status()
}
// Send msg bytes to the channel identified by chID byte. Returns false if the
// send queue is full after timeout, specified by MConnection.
func (p *peer) Send(chID byte, msgBytes []byte) bool {
- if !p.IsRunning() {
- // see Switch#Broadcast, where we fetch the list of peers and loop over
+ if !p.IsRunning() || !p.hasChannel(chID) {
+ // see MultiplexSwitch#Broadcast, where we fetch the list of peers and loop over
// them - while we're looping, one peer may be removed and stopped.
return false
- } else if !p.hasChannel(chID) {
- return false
}
- res := p.mconn.Send(chID, msgBytes)
- return res
+
+ return p.mConn.Send(chID, msgBytes)
}
// TrySend msg bytes to the channel identified by chID byte. Immediately returns
// false if the send queue is full.
func (p *peer) TrySend(chID byte, msgBytes []byte) bool {
- if !p.IsRunning() {
- return false
- } else if !p.hasChannel(chID) {
+ if !p.IsRunning() || !p.hasChannel(chID) {
return false
}
- res := p.mconn.TrySend(chID, msgBytes)
- return res
+
+ return p.mConn.TrySend(chID, msgBytes)
}
// Get the data for a given key.
-func (p *peer) Get(key string) interface{} {
- return p.Data.Get(key)
+func (p *peer) Get(key string) any {
+ return p.data.Get(key)
}
// Set sets the data for the given key.
-func (p *peer) Set(key string, data interface{}) {
- p.Data.Set(key, data)
+func (p *peer) Set(key string, data any) {
+ p.data.Set(key, data)
}
// hasChannel returns true if the peer reported
// knowing about the given chID.
func (p *peer) hasChannel(chID byte) bool {
- for _, ch := range p.channels {
+ for _, ch := range p.nodeInfo.Channels {
if ch == chID {
return true
}
}
- // NOTE: probably will want to remove this
- // but could be helpful while the feature is new
- p.Logger.Debug(
- "Unknown channel for peer",
- "channel",
- chID,
- "channels",
- p.channels,
- )
- return false
-}
-
-// CloseConn closes original connection. Used for cleaning up in cases where the peer had not been started at all.
-func (p *peer) CloseConn() error {
- return p.peerConn.conn.Close()
-}
-
-// ---------------------------------------------------
-// methods only used for testing
-// TODO: can we remove these?
-
-// CloseConn closes the underlying connection
-func (pc *peerConn) CloseConn() {
- pc.conn.Close() //nolint: errcheck
-}
-// RemoteAddr returns peer's remote network address.
-func (p *peer) RemoteAddr() net.Addr {
- return p.peerConn.conn.RemoteAddr()
-}
-
-// CanSend returns true if the send queue is not full, false otherwise.
-func (p *peer) CanSend(chID byte) bool {
- if !p.IsRunning() {
- return false
- }
- return p.mconn.CanSend(chID)
+ return false
}
-// ------------------------------------------------------------------
-// helper funcs
-
-func createMConnection(
- conn net.Conn,
- p *peer,
- reactorsByCh map[byte]Reactor,
- chDescs []*connm.ChannelDescriptor,
- onPeerError func(Peer, interface{}),
- config connm.MConnConfig,
-) *connm.MConnection {
+func (p *peer) createMConnection(
+ c net.Conn,
+ config *ConnConfig,
+) *conn.MConnection {
onReceive := func(chID byte, msgBytes []byte) {
- reactor := reactorsByCh[chID]
+ reactor := config.ReactorsByCh[chID]
if reactor == nil {
// Note that its ok to panic here as it's caught in the connm._recover,
// which does onPeerError.
panic(fmt.Sprintf("Unknown channel %X", chID))
}
+
reactor.Receive(chID, p, msgBytes)
}
- onError := func(r interface{}) {
- onPeerError(p, r)
+ onError := func(r error) {
+ config.OnPeerError(p, r)
}
- return connm.NewMConnectionWithConfig(
- conn,
- chDescs,
+ return conn.NewMConnectionWithConfig(
+ c,
+ config.ChDescs,
onReceive,
onError,
- config,
+ config.MConfig,
)
}
diff --git a/tm2/pkg/p2p/peer_set.go b/tm2/pkg/p2p/peer_set.go
deleted file mode 100644
index 396ba56da11..00000000000
--- a/tm2/pkg/p2p/peer_set.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package p2p
-
-import (
- "net"
- "sync"
-)
-
-// IPeerSet has a (immutable) subset of the methods of PeerSet.
-type IPeerSet interface {
- Has(key ID) bool
- HasIP(ip net.IP) bool
- Get(key ID) Peer
- List() []Peer
- Size() int
-}
-
-// -----------------------------------------------------------------------------
-
-// PeerSet is a special structure for keeping a table of peers.
-// Iteration over the peers is super fast and thread-safe.
-type PeerSet struct {
- mtx sync.Mutex
- lookup map[ID]*peerSetItem
- list []Peer
-}
-
-type peerSetItem struct {
- peer Peer
- index int
-}
-
-// NewPeerSet creates a new peerSet with a list of initial capacity of 256 items.
-func NewPeerSet() *PeerSet {
- return &PeerSet{
- lookup: make(map[ID]*peerSetItem),
- list: make([]Peer, 0, 256),
- }
-}
-
-// Add adds the peer to the PeerSet.
-// It returns an error carrying the reason, if the peer is already present.
-func (ps *PeerSet) Add(peer Peer) error {
- ps.mtx.Lock()
- defer ps.mtx.Unlock()
-
- if ps.lookup[peer.ID()] != nil {
- return SwitchDuplicatePeerIDError{peer.ID()}
- }
-
- index := len(ps.list)
- // Appending is safe even with other goroutines
- // iterating over the ps.list slice.
- ps.list = append(ps.list, peer)
- ps.lookup[peer.ID()] = &peerSetItem{peer, index}
- return nil
-}
-
-// Has returns true if the set contains the peer referred to by this
-// peerKey, otherwise false.
-func (ps *PeerSet) Has(peerKey ID) bool {
- ps.mtx.Lock()
- _, ok := ps.lookup[peerKey]
- ps.mtx.Unlock()
- return ok
-}
-
-// HasIP returns true if the set contains the peer referred to by this IP
-// address, otherwise false.
-func (ps *PeerSet) HasIP(peerIP net.IP) bool {
- ps.mtx.Lock()
- defer ps.mtx.Unlock()
-
- return ps.hasIP(peerIP)
-}
-
-// hasIP does not acquire a lock so it can be used in public methods which
-// already lock.
-func (ps *PeerSet) hasIP(peerIP net.IP) bool {
- for _, item := range ps.lookup {
- if item.peer.RemoteIP().Equal(peerIP) {
- return true
- }
- }
-
- return false
-}
-
-// Get looks up a peer by the provided peerKey. Returns nil if peer is not
-// found.
-func (ps *PeerSet) Get(peerKey ID) Peer {
- ps.mtx.Lock()
- defer ps.mtx.Unlock()
- item, ok := ps.lookup[peerKey]
- if ok {
- return item.peer
- }
- return nil
-}
-
-// Remove discards peer by its Key, if the peer was previously memoized.
-// Returns true if the peer was removed, and false if it was not found.
-// in the set.
-func (ps *PeerSet) Remove(peer Peer) bool {
- ps.mtx.Lock()
- defer ps.mtx.Unlock()
-
- item := ps.lookup[peer.ID()]
- if item == nil {
- return false
- }
-
- index := item.index
- // Create a new copy of the list but with one less item.
- // (we must copy because we'll be mutating the list).
- newList := make([]Peer, len(ps.list)-1)
- copy(newList, ps.list)
- // If it's the last peer, that's an easy special case.
- if index == len(ps.list)-1 {
- ps.list = newList
- delete(ps.lookup, peer.ID())
- return true
- }
-
- // Replace the popped item with the last item in the old list.
- lastPeer := ps.list[len(ps.list)-1]
- lastPeerKey := lastPeer.ID()
- lastPeerItem := ps.lookup[lastPeerKey]
- newList[index] = lastPeer
- lastPeerItem.index = index
- ps.list = newList
- delete(ps.lookup, peer.ID())
- return true
-}
-
-// Size returns the number of unique items in the peerSet.
-func (ps *PeerSet) Size() int {
- ps.mtx.Lock()
- defer ps.mtx.Unlock()
- return len(ps.list)
-}
-
-// List returns the threadsafe list of peers.
-func (ps *PeerSet) List() []Peer {
- ps.mtx.Lock()
- defer ps.mtx.Unlock()
- return ps.list
-}
diff --git a/tm2/pkg/p2p/peer_set_test.go b/tm2/pkg/p2p/peer_set_test.go
deleted file mode 100644
index 7aca84d59b0..00000000000
--- a/tm2/pkg/p2p/peer_set_test.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package p2p
-
-import (
- "net"
- "sync"
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/gnolang/gno/tm2/pkg/crypto/ed25519"
- "github.com/gnolang/gno/tm2/pkg/service"
-)
-
-// mockPeer for testing the PeerSet
-type mockPeer struct {
- service.BaseService
- ip net.IP
- id ID
-}
-
-func (mp *mockPeer) FlushStop() { mp.Stop() }
-func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true }
-func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true }
-func (mp *mockPeer) NodeInfo() NodeInfo { return NodeInfo{} }
-func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} }
-func (mp *mockPeer) ID() ID { return mp.id }
-func (mp *mockPeer) IsOutbound() bool { return false }
-func (mp *mockPeer) IsPersistent() bool { return true }
-func (mp *mockPeer) Get(s string) interface{} { return s }
-func (mp *mockPeer) Set(string, interface{}) {}
-func (mp *mockPeer) RemoteIP() net.IP { return mp.ip }
-func (mp *mockPeer) SocketAddr() *NetAddress { return nil }
-func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} }
-func (mp *mockPeer) CloseConn() error { return nil }
-
-// Returns a mock peer
-func newMockPeer(ip net.IP) *mockPeer {
- if ip == nil {
- ip = net.IP{127, 0, 0, 1}
- }
- nodeKey := NodeKey{PrivKey: ed25519.GenPrivKey()}
- return &mockPeer{
- ip: ip,
- id: nodeKey.ID(),
- }
-}
-
-func TestPeerSetAddRemoveOne(t *testing.T) {
- t.Parallel()
-
- peerSet := NewPeerSet()
-
- var peerList []Peer
- for i := 0; i < 5; i++ {
- p := newMockPeer(net.IP{127, 0, 0, byte(i)})
- if err := peerSet.Add(p); err != nil {
- t.Error(err)
- }
- peerList = append(peerList, p)
- }
-
- n := len(peerList)
- // 1. Test removing from the front
- for i, peerAtFront := range peerList {
- removed := peerSet.Remove(peerAtFront)
- assert.True(t, removed)
- wantSize := n - i - 1
- for j := 0; j < 2; j++ {
- assert.Equal(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j)
- assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j)
- // Test the route of removing the now non-existent element
- removed := peerSet.Remove(peerAtFront)
- assert.False(t, removed)
- }
- }
-
- // 2. Next we are testing removing the peer at the end
- // a) Replenish the peerSet
- for _, peer := range peerList {
- if err := peerSet.Add(peer); err != nil {
- t.Error(err)
- }
- }
-
- // b) In reverse, remove each element
- for i := n - 1; i >= 0; i-- {
- peerAtEnd := peerList[i]
- removed := peerSet.Remove(peerAtEnd)
- assert.True(t, removed)
- assert.Equal(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i)
- assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i)
- }
-}
-
-func TestPeerSetAddRemoveMany(t *testing.T) {
- t.Parallel()
- peerSet := NewPeerSet()
-
- peers := []Peer{}
- N := 100
- for i := 0; i < N; i++ {
- peer := newMockPeer(net.IP{127, 0, 0, byte(i)})
- if err := peerSet.Add(peer); err != nil {
- t.Errorf("Failed to add new peer")
- }
- if peerSet.Size() != i+1 {
- t.Errorf("Failed to add new peer and increment size")
- }
- peers = append(peers, peer)
- }
-
- for i, peer := range peers {
- removed := peerSet.Remove(peer)
- assert.True(t, removed)
- if peerSet.Has(peer.ID()) {
- t.Errorf("Failed to remove peer")
- }
- if peerSet.Size() != len(peers)-i-1 {
- t.Errorf("Failed to remove peer and decrement size")
- }
- }
-}
-
-func TestPeerSetAddDuplicate(t *testing.T) {
- t.Parallel()
- peerSet := NewPeerSet()
- peer := newMockPeer(nil)
-
- n := 20
- errsChan := make(chan error)
- // Add the same asynchronously to test the
- // concurrent guarantees of our APIs, and
- // our expectation in the end is that only
- // one addition succeeded, but the rest are
- // instances of ErrSwitchDuplicatePeer.
- for i := 0; i < n; i++ {
- go func() {
- errsChan <- peerSet.Add(peer)
- }()
- }
-
- // Now collect and tally the results
- errsTally := make(map[string]int)
- for i := 0; i < n; i++ {
- err := <-errsChan
-
- switch err.(type) {
- case SwitchDuplicatePeerIDError:
- errsTally["duplicateID"]++
- default:
- errsTally["other"]++
- }
- }
-
- // Our next procedure is to ensure that only one addition
- // succeeded and that the rest are each ErrSwitchDuplicatePeer.
- wantErrCount, gotErrCount := n-1, errsTally["duplicateID"]
- assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count")
-
- wantNilErrCount, gotNilErrCount := 1, errsTally["other"]
- assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount")
-}
-
-func TestPeerSetGet(t *testing.T) {
- t.Parallel()
-
- var (
- peerSet = NewPeerSet()
- peer = newMockPeer(nil)
- )
-
- assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add")
-
- if err := peerSet.Add(peer); err != nil {
- t.Fatalf("Failed to add new peer: %v", err)
- }
-
- var wg sync.WaitGroup
- for i := 0; i < 10; i++ {
- // Add them asynchronously to test the
- // concurrent guarantees of our APIs.
- wg.Add(1)
- go func(i int) {
- defer wg.Done()
- have, want := peerSet.Get(peer.ID()), peer
- assert.Equal(t, have, want, "%d: have %v, want %v", i, have, want)
- }(i)
- }
- wg.Wait()
-}
diff --git a/tm2/pkg/p2p/peer_test.go b/tm2/pkg/p2p/peer_test.go
index 28217c4486e..a74ea9e96a4 100644
--- a/tm2/pkg/p2p/peer_test.go
+++ b/tm2/pkg/p2p/peer_test.go
@@ -1,233 +1,630 @@
package p2p
import (
+ "errors"
"fmt"
- golog "log"
+ "io"
+ "log/slog"
"net"
"testing"
"time"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/gnolang/gno/tm2/pkg/crypto"
- "github.com/gnolang/gno/tm2/pkg/crypto/ed25519"
- "github.com/gnolang/gno/tm2/pkg/errors"
- "github.com/gnolang/gno/tm2/pkg/log"
+ "github.com/gnolang/gno/tm2/pkg/cmap"
"github.com/gnolang/gno/tm2/pkg/p2p/config"
"github.com/gnolang/gno/tm2/pkg/p2p/conn"
+ "github.com/gnolang/gno/tm2/pkg/p2p/mock"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+ "github.com/gnolang/gno/tm2/pkg/service"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
-func TestPeerBasic(t *testing.T) {
+func TestPeer_Properties(t *testing.T) {
t.Parallel()
- assert, require := assert.New(t), require.New(t)
+ t.Run("connection info", func(t *testing.T) {
+ t.Parallel()
- // simulate remote peer
- rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
- rp.Start()
- defer rp.Stop()
+ t.Run("remote IP", func(t *testing.T) {
+ t.Parallel()
- p, err := createOutboundPeerAndPerformHandshake(t, rp.Addr(), cfg, conn.DefaultMConnConfig())
- require.Nil(err)
+ var (
+ info = &ConnInfo{
+ RemoteIP: net.IP{127, 0, 0, 1},
+ }
- err = p.Start()
- require.Nil(err)
- defer p.Stop()
+ p = &peer{
+ connInfo: info,
+ }
+ )
- assert.True(p.IsRunning())
- assert.True(p.IsOutbound())
- assert.False(p.IsPersistent())
- p.persistent = true
- assert.True(p.IsPersistent())
- assert.Equal(rp.Addr().DialString(), p.RemoteAddr().String())
- assert.Equal(rp.ID(), p.ID())
-}
+ assert.Equal(t, info.RemoteIP, p.RemoteIP())
+ })
-func TestPeerSend(t *testing.T) {
- t.Parallel()
+ t.Run("remote address", func(t *testing.T) {
+ t.Parallel()
- assert, require := assert.New(t), require.New(t)
+ tcpAddr, err := net.ResolveTCPAddr("tcp", "localhost:8080")
+ require.NoError(t, err)
- config := cfg
+ var (
+ info = &ConnInfo{
+ Conn: &mock.Conn{
+ RemoteAddrFn: func() net.Addr {
+ return tcpAddr
+ },
+ },
+ }
- // simulate remote peer
- rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: config}
- rp.Start()
- defer rp.Stop()
+ p = &peer{
+ connInfo: info,
+ }
+ )
- p, err := createOutboundPeerAndPerformHandshake(t, rp.Addr(), config, conn.DefaultMConnConfig())
- require.Nil(err)
+ assert.Equal(t, tcpAddr.String(), p.RemoteAddr().String())
+ })
- err = p.Start()
- require.Nil(err)
+ t.Run("socket address", func(t *testing.T) {
+ t.Parallel()
- defer p.Stop()
+ tcpAddr, err := net.ResolveTCPAddr("tcp", "localhost:8080")
+ require.NoError(t, err)
- assert.True(p.CanSend(testCh))
- assert.True(p.Send(testCh, []byte("Asylum")))
-}
+ netAddr, err := types.NewNetAddress(types.GenerateNodeKey().ID(), tcpAddr)
+ require.NoError(t, err)
-func createOutboundPeerAndPerformHandshake(
- t *testing.T,
- addr *NetAddress,
- config *config.P2PConfig,
- mConfig conn.MConnConfig,
-) (*peer, error) {
- t.Helper()
-
- chDescs := []*conn.ChannelDescriptor{
- {ID: testCh, Priority: 1},
- }
- reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)}
- pk := ed25519.GenPrivKey()
- pc, err := testOutboundPeerConn(addr, config, false, pk)
- if err != nil {
- return nil, err
- }
- timeout := 1 * time.Second
- ourNodeInfo := testNodeInfo(addr.ID, "host_peer")
- peerNodeInfo, err := handshake(pc.conn, timeout, ourNodeInfo)
- if err != nil {
- return nil, err
- }
-
- p := newPeer(pc, mConfig, peerNodeInfo, reactorsByCh, chDescs, func(p Peer, r interface{}) {})
- p.SetLogger(log.NewTestingLogger(t).With("peer", addr))
- return p, nil
-}
+ var (
+ info = &ConnInfo{
+ SocketAddr: netAddr,
+ }
+
+ p = &peer{
+ connInfo: info,
+ }
+ )
+
+ assert.Equal(t, netAddr.String(), p.SocketAddr().String())
+ })
+
+ t.Run("set logger", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ l = slog.New(slog.NewTextHandler(io.Discard, nil))
+
+ p = &peer{
+ mConn: &mock.MConn{},
+ }
+ )
+
+ p.SetLogger(l)
+
+ assert.Equal(t, l, p.Logger)
+ })
+
+ t.Run("peer start", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ expectedErr = errors.New("some error")
+
+ mConn = &mock.MConn{
+ StartFn: func() error {
+ return expectedErr
+ },
+ }
+
+ p = &peer{
+ mConn: mConn,
+ }
+ )
+
+ assert.ErrorIs(t, p.OnStart(), expectedErr)
+ })
+
+ t.Run("peer stop", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ stopCalled = false
+ expectedErr = errors.New("some error")
+
+ mConn = &mock.MConn{
+ StopFn: func() error {
+ stopCalled = true
+
+ return expectedErr
+ },
+ }
+
+ p = &peer{
+ mConn: mConn,
+ }
+ )
+
+ p.BaseService = *service.NewBaseService(nil, "Peer", p)
+
+ p.OnStop()
+
+ assert.True(t, stopCalled)
+ })
+
+ t.Run("flush stop", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ stopCalled = false
+
+ mConn = &mock.MConn{
+ FlushFn: func() {
+ stopCalled = true
+ },
+ }
+
+ p = &peer{
+ mConn: mConn,
+ }
+ )
+
+ p.BaseService = *service.NewBaseService(nil, "Peer", p)
+
+ p.FlushStop()
+
+ assert.True(t, stopCalled)
+ })
-func testDial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) {
- if cfg.TestDialFail {
- return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)")
- }
+ t.Run("node info fetch", func(t *testing.T) {
+ t.Parallel()
- conn, err := addr.DialTimeout(cfg.DialTimeout)
- if err != nil {
- return nil, err
- }
- return conn, nil
+ var (
+ info = types.NodeInfo{
+ Network: "gnoland",
+ }
+
+ p = &peer{
+ nodeInfo: info,
+ }
+ )
+
+ assert.Equal(t, info, p.NodeInfo())
+ })
+
+ t.Run("node status fetch", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ status = conn.ConnectionStatus{
+ Duration: 5 * time.Second,
+ }
+
+ mConn = &mock.MConn{
+ StatusFn: func() conn.ConnectionStatus {
+ return status
+ },
+ }
+
+ p = &peer{
+ mConn: mConn,
+ }
+ )
+
+ assert.Equal(t, status, p.Status())
+ })
+
+ t.Run("string representation", func(t *testing.T) {
+ t.Parallel()
+
+ testTable := []struct {
+ name string
+ outbound bool
+ }{
+ {
+ "outbound",
+ true,
+ },
+ {
+ "inbound",
+ false,
+ },
+ }
+
+ for _, testCase := range testTable {
+ t.Run(testCase.name, func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ id = types.GenerateNodeKey().ID()
+ mConnStr = "description"
+
+ p = &peer{
+ mConn: &mock.MConn{
+ StringFn: func() string {
+ return mConnStr
+ },
+ },
+ nodeInfo: types.NodeInfo{
+ PeerID: id,
+ },
+ connInfo: &ConnInfo{
+ Outbound: testCase.outbound,
+ },
+ }
+
+ direction = "in"
+ )
+
+ if testCase.outbound {
+ direction = "out"
+ }
+
+ assert.Contains(
+ t,
+ p.String(),
+ fmt.Sprintf(
+ "Peer{%s %s %s}",
+ mConnStr,
+ id,
+ direction,
+ ),
+ )
+ })
+ }
+ })
+
+ t.Run("outbound information", func(t *testing.T) {
+ t.Parallel()
+
+ p := &peer{
+ connInfo: &ConnInfo{
+ Outbound: true,
+ },
+ }
+
+ assert.True(
+ t,
+ p.IsOutbound(),
+ )
+ })
+
+ t.Run("persistent information", func(t *testing.T) {
+ t.Parallel()
+
+ p := &peer{
+ connInfo: &ConnInfo{
+ Persistent: true,
+ },
+ }
+
+ assert.True(t, p.IsPersistent())
+ })
+
+ t.Run("initial conn close", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ closeErr = errors.New("close error")
+
+ mockConn = &mock.Conn{
+ CloseFn: func() error {
+ return closeErr
+ },
+ }
+
+ p = &peer{
+ connInfo: &ConnInfo{
+ Conn: mockConn,
+ },
+ }
+ )
+
+ assert.ErrorIs(t, p.CloseConn(), closeErr)
+ })
+ })
}
-func testOutboundPeerConn(
- addr *NetAddress,
- config *config.P2PConfig,
- persistent bool,
- ourNodePrivKey crypto.PrivKey,
-) (peerConn, error) {
- var pc peerConn
- conn, err := testDial(addr, config)
- if err != nil {
- return pc, errors.Wrap(err, "Error creating peer")
- }
-
- pc, err = testPeerConn(conn, config, true, persistent, ourNodePrivKey, addr)
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- return pc, errors.Wrap(err, cerr.Error())
- }
- return pc, err
- }
+func TestPeer_GetSet(t *testing.T) {
+ t.Parallel()
- // ensure dialed ID matches connection ID
- if addr.ID != pc.ID() {
- if cerr := conn.Close(); cerr != nil {
- return pc, errors.Wrap(err, cerr.Error())
+ var (
+ key = "key"
+ data = []byte("random")
+
+ p = &peer{
+ data: cmap.NewCMap(),
}
- return pc, SwitchAuthenticationFailureError{addr, pc.ID()}
- }
+ )
- return pc, nil
-}
+ assert.Nil(t, p.Get(key))
-type remotePeer struct {
- PrivKey crypto.PrivKey
- Config *config.P2PConfig
- addr *NetAddress
- channels []byte
- listenAddr string
- listener net.Listener
-}
+ // Set the key
+ p.Set(key, data)
-func (rp *remotePeer) Addr() *NetAddress {
- return rp.addr
+ assert.Equal(t, data, p.Get(key))
}
-func (rp *remotePeer) ID() ID {
- return rp.PrivKey.PubKey().Address().ID()
-}
+func TestPeer_Send(t *testing.T) {
+ t.Parallel()
-func (rp *remotePeer) Start() {
- if rp.listenAddr == "" {
- rp.listenAddr = "127.0.0.1:0"
- }
-
- l, e := net.Listen("tcp", rp.listenAddr) // any available address
- if e != nil {
- golog.Fatalf("net.Listen tcp :0: %+v", e)
- }
- rp.listener = l
- rp.addr = NewNetAddress(rp.PrivKey.PubKey().Address().ID(), l.Addr())
- if rp.channels == nil {
- rp.channels = []byte{testCh}
- }
- go rp.accept()
-}
+ t.Run("peer not running", func(t *testing.T) {
+ t.Parallel()
-func (rp *remotePeer) Stop() {
- rp.listener.Close()
-}
+ var (
+ chID = byte(10)
+ data = []byte("random")
+
+ capturedSendID byte
+ capturedSendData []byte
+
+ mockConn = &mock.MConn{
+ SendFn: func(c byte, d []byte) bool {
+ capturedSendID = c
+ capturedSendData = d
+
+ return true
+ },
+ }
+
+ p = &peer{
+ nodeInfo: types.NodeInfo{
+ Channels: []byte{
+ chID,
+ },
+ },
+ mConn: mockConn,
+ }
+ )
+
+ p.BaseService = *service.NewBaseService(nil, "Peer", p)
+
+ // Make sure the send fails
+ require.False(t, p.Send(chID, data))
+
+ assert.Empty(t, capturedSendID)
+ assert.Nil(t, capturedSendData)
+ })
+
+ t.Run("peer doesn't have channel", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ chID = byte(10)
+ data = []byte("random")
+
+ capturedSendID byte
+ capturedSendData []byte
+
+ mockConn = &mock.MConn{
+ SendFn: func(c byte, d []byte) bool {
+ capturedSendID = c
+ capturedSendData = d
+
+ return true
+ },
+ }
+
+ p = &peer{
+ nodeInfo: types.NodeInfo{
+ Channels: []byte{},
+ },
+ mConn: mockConn,
+ }
+ )
+
+ p.BaseService = *service.NewBaseService(nil, "Peer", p)
+
+ // Start the peer "multiplexing"
+ require.NoError(t, p.Start())
+ t.Cleanup(func() {
+ require.NoError(t, p.Stop())
+ })
+
+ // Make sure the send fails
+ require.False(t, p.Send(chID, data))
+
+ assert.Empty(t, capturedSendID)
+ assert.Nil(t, capturedSendData)
+ })
-func (rp *remotePeer) Dial(addr *NetAddress) (net.Conn, error) {
- conn, err := addr.DialTimeout(1 * time.Second)
- if err != nil {
- return nil, err
- }
- pc, err := testInboundPeerConn(conn, rp.Config, rp.PrivKey)
- if err != nil {
- return nil, err
- }
- _, err = handshake(pc.conn, time.Second, rp.nodeInfo())
- if err != nil {
- return nil, err
- }
- return conn, err
+ t.Run("valid peer data send", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ chID = byte(10)
+ data = []byte("random")
+
+ capturedSendID byte
+ capturedSendData []byte
+
+ mockConn = &mock.MConn{
+ SendFn: func(c byte, d []byte) bool {
+ capturedSendID = c
+ capturedSendData = d
+
+ return true
+ },
+ }
+
+ p = &peer{
+ nodeInfo: types.NodeInfo{
+ Channels: []byte{
+ chID,
+ },
+ },
+ mConn: mockConn,
+ }
+ )
+
+ p.BaseService = *service.NewBaseService(nil, "Peer", p)
+
+ // Start the peer "multiplexing"
+ require.NoError(t, p.Start())
+ t.Cleanup(func() {
+ require.NoError(t, p.Stop())
+ })
+
+ // Make sure the send is valid
+ require.True(t, p.Send(chID, data))
+
+ assert.Equal(t, chID, capturedSendID)
+ assert.Equal(t, data, capturedSendData)
+ })
}
-func (rp *remotePeer) accept() {
- conns := []net.Conn{}
+func TestPeer_TrySend(t *testing.T) {
+ t.Parallel()
+
+ t.Run("peer not running", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ chID = byte(10)
+ data = []byte("random")
- for {
- conn, err := rp.listener.Accept()
- if err != nil {
- golog.Printf("Failed to accept conn: %+v", err)
- for _, conn := range conns {
- _ = conn.Close()
+ capturedSendID byte
+ capturedSendData []byte
+
+ mockConn = &mock.MConn{
+ TrySendFn: func(c byte, d []byte) bool {
+ capturedSendID = c
+ capturedSendData = d
+
+ return true
+ },
}
- return
- }
- pc, err := testInboundPeerConn(conn, rp.Config, rp.PrivKey)
- if err != nil {
- golog.Fatalf("Failed to create a peer: %+v", err)
- }
+ p = &peer{
+ nodeInfo: types.NodeInfo{
+ Channels: []byte{
+ chID,
+ },
+ },
+ mConn: mockConn,
+ }
+ )
- _, err = handshake(pc.conn, time.Second, rp.nodeInfo())
- if err != nil {
- golog.Fatalf("Failed to perform handshake: %+v", err)
- }
+ p.BaseService = *service.NewBaseService(nil, "Peer", p)
+
+ // Make sure the send fails
+ require.False(t, p.TrySend(chID, data))
+
+ assert.Empty(t, capturedSendID)
+ assert.Nil(t, capturedSendData)
+ })
+
+ t.Run("peer doesn't have channel", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ chID = byte(10)
+ data = []byte("random")
+
+ capturedSendID byte
+ capturedSendData []byte
+
+ mockConn = &mock.MConn{
+ TrySendFn: func(c byte, d []byte) bool {
+ capturedSendID = c
+ capturedSendData = d
+
+ return true
+ },
+ }
+
+ p = &peer{
+ nodeInfo: types.NodeInfo{
+ Channels: []byte{},
+ },
+ mConn: mockConn,
+ }
+ )
+
+ p.BaseService = *service.NewBaseService(nil, "Peer", p)
+
+ // Start the peer "multiplexing"
+ require.NoError(t, p.Start())
+ t.Cleanup(func() {
+ require.NoError(t, p.Stop())
+ })
+
+ // Make sure the send fails
+ require.False(t, p.TrySend(chID, data))
+
+ assert.Empty(t, capturedSendID)
+ assert.Nil(t, capturedSendData)
+ })
+
+ t.Run("valid peer data send", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ chID = byte(10)
+ data = []byte("random")
- conns = append(conns, conn)
- }
+ capturedSendID byte
+ capturedSendData []byte
+
+ mockConn = &mock.MConn{
+ TrySendFn: func(c byte, d []byte) bool {
+ capturedSendID = c
+ capturedSendData = d
+
+ return true
+ },
+ }
+
+ p = &peer{
+ nodeInfo: types.NodeInfo{
+ Channels: []byte{
+ chID,
+ },
+ },
+ mConn: mockConn,
+ }
+ )
+
+ p.BaseService = *service.NewBaseService(nil, "Peer", p)
+
+ // Start the peer "multiplexing"
+ require.NoError(t, p.Start())
+ t.Cleanup(func() {
+ require.NoError(t, p.Stop())
+ })
+
+ // Make sure the send is valid
+ require.True(t, p.TrySend(chID, data))
+
+ assert.Equal(t, chID, capturedSendID)
+ assert.Equal(t, data, capturedSendData)
+ })
}
-func (rp *remotePeer) nodeInfo() NodeInfo {
- return NodeInfo{
- VersionSet: testVersionSet(),
- NetAddress: rp.Addr(),
- Network: "testing",
- Version: "1.2.3-rc0-deadbeef",
- Channels: rp.channels,
- Moniker: "remote_peer",
- }
+func TestPeer_NewPeer(t *testing.T) {
+ t.Parallel()
+
+ tcpAddr, err := net.ResolveTCPAddr("tcp", "localhost:8080")
+ require.NoError(t, err)
+
+ netAddr, err := types.NewNetAddress(types.GenerateNodeKey().ID(), tcpAddr)
+ require.NoError(t, err)
+
+ var (
+ connInfo = &ConnInfo{
+ Outbound: false,
+ Persistent: true,
+ Conn: &mock.Conn{},
+ RemoteIP: tcpAddr.IP,
+ SocketAddr: netAddr,
+ }
+
+ mConfig = &ConnConfig{
+ MConfig: conn.MConfigFromP2P(config.DefaultP2PConfig()),
+ ReactorsByCh: make(map[byte]Reactor),
+ ChDescs: make([]*conn.ChannelDescriptor, 0),
+ OnPeerError: nil,
+ }
+ )
+
+ assert.NotPanics(t, func() {
+ _ = newPeer(connInfo, types.NodeInfo{}, mConfig)
+ })
}
diff --git a/tm2/pkg/p2p/set.go b/tm2/pkg/p2p/set.go
new file mode 100644
index 00000000000..b347c480b7e
--- /dev/null
+++ b/tm2/pkg/p2p/set.go
@@ -0,0 +1,121 @@
+package p2p
+
+import (
+ "sync"
+
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+)
+
+type set struct {
+ mux sync.RWMutex
+
+ peers map[types.ID]PeerConn
+ outbound uint64
+ inbound uint64
+}
+
+// newSet creates an empty peer set
+func newSet() *set {
+ return &set{
+ peers: make(map[types.ID]PeerConn),
+ outbound: 0,
+ inbound: 0,
+ }
+}
+
+// Add adds the peer to the set
+func (s *set) Add(peer PeerConn) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+
+ s.peers[peer.ID()] = peer
+
+ if peer.IsOutbound() {
+ s.outbound += 1
+
+ return
+ }
+
+ s.inbound += 1
+}
+
+// Has returns true if the set contains the peer referred to by this
+// peerKey, otherwise false.
+func (s *set) Has(peerKey types.ID) bool {
+ s.mux.RLock()
+ defer s.mux.RUnlock()
+
+ _, exists := s.peers[peerKey]
+
+ return exists
+}
+
+// Get looks up a peer by the peer ID. Returns nil if peer is not
+// found.
+func (s *set) Get(key types.ID) PeerConn {
+ s.mux.RLock()
+ defer s.mux.RUnlock()
+
+ p, found := s.peers[key]
+ if !found {
+ // TODO change this to an error, it doesn't make
+ // sense to propagate an implementation detail like this
+ return nil
+ }
+
+ return p.(PeerConn)
+}
+
+// Remove discards peer by its Key, if the peer was previously memoized.
+// Returns true if the peer was removed, and false if it was not found.
+// in the set.
+func (s *set) Remove(key types.ID) bool {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+
+ p, found := s.peers[key]
+ if !found {
+ return false
+ }
+
+ delete(s.peers, key)
+
+ if p.(PeerConn).IsOutbound() {
+ s.outbound -= 1
+
+ return true
+ }
+
+ s.inbound -= 1
+
+ return true
+}
+
+// NumInbound returns the number of inbound peers
+func (s *set) NumInbound() uint64 {
+ s.mux.RLock()
+ defer s.mux.RUnlock()
+
+ return s.inbound
+}
+
+// NumOutbound returns the number of outbound peers
+func (s *set) NumOutbound() uint64 {
+ s.mux.RLock()
+ defer s.mux.RUnlock()
+
+ return s.outbound
+}
+
+// List returns the list of peers
+func (s *set) List() []PeerConn {
+ s.mux.RLock()
+ defer s.mux.RUnlock()
+
+ peers := make([]PeerConn, 0)
+ for _, p := range s.peers {
+ peers = append(peers, p.(PeerConn))
+ }
+
+ return peers
+}
diff --git a/tm2/pkg/p2p/set_test.go b/tm2/pkg/p2p/set_test.go
new file mode 100644
index 00000000000..ced35538a9b
--- /dev/null
+++ b/tm2/pkg/p2p/set_test.go
@@ -0,0 +1,146 @@
+package p2p
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/gnolang/gno/tm2/pkg/p2p/mock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSet_Add(t *testing.T) {
+ t.Parallel()
+
+ var (
+ numPeers = 100
+ peers = mock.GeneratePeers(t, numPeers)
+
+ s = newSet()
+ )
+
+ for _, peer := range peers {
+ // Add the peer
+ s.Add(peer)
+
+ // Make sure the peer is present
+ assert.True(t, s.Has(peer.ID()))
+ }
+
+ assert.EqualValues(t, numPeers, s.NumInbound()+s.NumOutbound())
+}
+
+func TestSet_Remove(t *testing.T) {
+ t.Parallel()
+
+ var (
+ numPeers = 100
+ peers = mock.GeneratePeers(t, numPeers)
+
+ s = newSet()
+ )
+
+ // Add the initial peers
+ for _, peer := range peers {
+ // Add the peer
+ s.Add(peer)
+
+ // Make sure the peer is present
+ require.True(t, s.Has(peer.ID()))
+ }
+
+ require.EqualValues(t, numPeers, s.NumInbound()+s.NumOutbound())
+
+ // Remove the peers
+ // Add the initial peers
+ for _, peer := range peers {
+ // Add the peer
+ s.Remove(peer.ID())
+
+ // Make sure the peer is present
+ assert.False(t, s.Has(peer.ID()))
+ }
+}
+
+func TestSet_Get(t *testing.T) {
+ t.Parallel()
+
+ t.Run("existing peer", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ peers = mock.GeneratePeers(t, 100)
+ s = newSet()
+ )
+
+ for _, peer := range peers {
+ id := peer.ID()
+ s.Add(peer)
+
+ assert.True(t, s.Get(id).ID() == id)
+ }
+ })
+
+ t.Run("missing peer", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ peers = mock.GeneratePeers(t, 100)
+ s = newSet()
+ )
+
+ for _, peer := range peers {
+ s.Add(peer)
+ }
+
+ p := s.Get("random ID")
+ assert.Nil(t, p)
+ })
+}
+
+func TestSet_List(t *testing.T) {
+ t.Parallel()
+
+ t.Run("empty peer set", func(t *testing.T) {
+ t.Parallel()
+
+ // Empty set
+ s := newSet()
+
+ // Linearize the set
+ assert.Len(t, s.List(), 0)
+ })
+
+ t.Run("existing peer set", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ peers = mock.GeneratePeers(t, 100)
+ s = newSet()
+ )
+
+ for _, peer := range peers {
+ s.Add(peer)
+ }
+
+ // Linearize the set
+ listedPeers := s.List()
+
+ require.Len(t, listedPeers, len(peers))
+
+ // Make sure the lists are sorted
+ // for easier comparison
+ sort.Slice(listedPeers, func(i, j int) bool {
+ return listedPeers[i].ID() < listedPeers[j].ID()
+ })
+
+ sort.Slice(peers, func(i, j int) bool {
+ return peers[i].ID() < peers[j].ID()
+ })
+
+ // Compare the lists
+ for index, listedPeer := range listedPeers {
+ assert.Equal(t, listedPeer.ID(), peers[index].ID())
+ }
+ })
+}
diff --git a/tm2/pkg/p2p/switch.go b/tm2/pkg/p2p/switch.go
index 317f34e496b..5c1c37f7729 100644
--- a/tm2/pkg/p2p/switch.go
+++ b/tm2/pkg/p2p/switch.go
@@ -2,226 +2,164 @@ package p2p
import (
"context"
+ "crypto/rand"
"fmt"
"math"
+ "math/big"
"sync"
"time"
- "github.com/gnolang/gno/tm2/pkg/cmap"
- "github.com/gnolang/gno/tm2/pkg/errors"
"github.com/gnolang/gno/tm2/pkg/p2p/config"
"github.com/gnolang/gno/tm2/pkg/p2p/conn"
- "github.com/gnolang/gno/tm2/pkg/random"
+ "github.com/gnolang/gno/tm2/pkg/p2p/dial"
+ "github.com/gnolang/gno/tm2/pkg/p2p/events"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/gnolang/gno/tm2/pkg/service"
"github.com/gnolang/gno/tm2/pkg/telemetry"
"github.com/gnolang/gno/tm2/pkg/telemetry/metrics"
)
-const (
- // wait a random amount of time from this interval
- // before dialing peers or reconnecting to help prevent DoS
- dialRandomizerIntervalMilliseconds = 3000
+// defaultDialTimeout is the default wait time for a dial to succeed
+var defaultDialTimeout = 3 * time.Second
- // repeatedly try to reconnect for a few minutes
- // ie. 5 * 20 = 100s
- reconnectAttempts = 20
- reconnectInterval = 5 * time.Second
+type reactorPeerBehavior struct {
+ chDescs []*conn.ChannelDescriptor
+ reactorsByCh map[byte]Reactor
- // then move into exponential backoff mode for ~1day
- // ie. 3**10 = 16hrs
- reconnectBackOffAttempts = 10
- reconnectBackOffBaseSeconds = 3
-)
+ handlePeerErrFn func(PeerConn, error)
+ isPersistentPeerFn func(types.ID) bool
+ isPrivatePeerFn func(types.ID) bool
+}
+
+func (r *reactorPeerBehavior) ReactorChDescriptors() []*conn.ChannelDescriptor {
+ return r.chDescs
+}
+
+func (r *reactorPeerBehavior) Reactors() map[byte]Reactor {
+ return r.reactorsByCh
+}
-// MConnConfig returns an MConnConfig with fields updated
-// from the P2PConfig.
-func MConnConfig(cfg *config.P2PConfig) conn.MConnConfig {
- mConfig := conn.DefaultMConnConfig()
- mConfig.FlushThrottle = cfg.FlushThrottleTimeout
- mConfig.SendRate = cfg.SendRate
- mConfig.RecvRate = cfg.RecvRate
- mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize
- return mConfig
+func (r *reactorPeerBehavior) HandlePeerError(p PeerConn, err error) {
+ r.handlePeerErrFn(p, err)
}
-// PeerFilterFunc to be implemented by filter hooks after a new Peer has been
-// fully setup.
-type PeerFilterFunc func(IPeerSet, Peer) error
+func (r *reactorPeerBehavior) IsPersistentPeer(id types.ID) bool {
+ return r.isPersistentPeerFn(id)
+}
-// -----------------------------------------------------------------------------
+func (r *reactorPeerBehavior) IsPrivatePeer(id types.ID) bool {
+ return r.isPrivatePeerFn(id)
+}
-// Switch handles peer connections and exposes an API to receive incoming messages
+// MultiplexSwitch handles peer connections and exposes an API to receive incoming messages
// on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one
// or more `Channels`. So while sending outgoing messages is typically performed on the peer,
// incoming messages are received on the reactor.
-type Switch struct {
+type MultiplexSwitch struct {
service.BaseService
- config *config.P2PConfig
- reactors map[string]Reactor
- chDescs []*conn.ChannelDescriptor
- reactorsByCh map[byte]Reactor
- peers *PeerSet
- dialing *cmap.CMap
- reconnecting *cmap.CMap
- nodeInfo NodeInfo // our node info
- nodeKey *NodeKey // our node privkey
- // peers addresses with whom we'll maintain constant connection
- persistentPeersAddrs []*NetAddress
+ ctx context.Context
+ cancelFn context.CancelFunc
- transport Transport
+ maxInboundPeers uint64
+ maxOutboundPeers uint64
- filterTimeout time.Duration
- peerFilters []PeerFilterFunc
+ reactors map[string]Reactor
+ peerBehavior *reactorPeerBehavior
- rng *random.Rand // seed for randomizing dial times and orders
-}
+ peers PeerSet // currently active peer set (live connections)
+ persistentPeers sync.Map // ID -> *NetAddress; peers whose connections are constant
+ privatePeers sync.Map // ID -> nothing; lookup table of peers who are not shared
+ transport Transport
-// NetAddress returns the address the switch is listening on.
-func (sw *Switch) NetAddress() *NetAddress {
- addr := sw.transport.NetAddress()
- return &addr
+ dialQueue *dial.Queue
+ events *events.Events
}
-// SwitchOption sets an optional parameter on the Switch.
-type SwitchOption func(*Switch)
-
-// NewSwitch creates a new Switch with the given config.
-func NewSwitch(
- cfg *config.P2PConfig,
+// NewMultiplexSwitch creates a new MultiplexSwitch with the given config.
+func NewMultiplexSwitch(
transport Transport,
- options ...SwitchOption,
-) *Switch {
- sw := &Switch{
- config: cfg,
- reactors: make(map[string]Reactor),
- chDescs: make([]*conn.ChannelDescriptor, 0),
- reactorsByCh: make(map[byte]Reactor),
- peers: NewPeerSet(),
- dialing: cmap.NewCMap(),
- reconnecting: cmap.NewCMap(),
- transport: transport,
- filterTimeout: defaultFilterTimeout,
- persistentPeersAddrs: make([]*NetAddress, 0),
- }
-
- // Ensure we have a completely undeterministic PRNG.
- sw.rng = random.NewRand()
-
- sw.BaseService = *service.NewBaseService(nil, "P2P Switch", sw)
-
- for _, option := range options {
- option(sw)
- }
-
- return sw
-}
+ opts ...SwitchOption,
+) *MultiplexSwitch {
+ defaultCfg := config.DefaultP2PConfig()
-// SwitchFilterTimeout sets the timeout used for peer filters.
-func SwitchFilterTimeout(timeout time.Duration) SwitchOption {
- return func(sw *Switch) { sw.filterTimeout = timeout }
-}
-
-// SwitchPeerFilters sets the filters for rejection of new peers.
-func SwitchPeerFilters(filters ...PeerFilterFunc) SwitchOption {
- return func(sw *Switch) { sw.peerFilters = filters }
-}
-
-// ---------------------------------------------------------------------
-// Switch setup
-
-// AddReactor adds the given reactor to the switch.
-// NOTE: Not goroutine safe.
-func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor {
- for _, chDesc := range reactor.GetChannels() {
- chID := chDesc.ID
- // No two reactors can share the same channel.
- if sw.reactorsByCh[chID] != nil {
- panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor))
- }
- sw.chDescs = append(sw.chDescs, chDesc)
- sw.reactorsByCh[chID] = reactor
+ sw := &MultiplexSwitch{
+ reactors: make(map[string]Reactor),
+ peers: newSet(),
+ transport: transport,
+ dialQueue: dial.NewQueue(),
+ events: events.New(),
+ maxInboundPeers: defaultCfg.MaxNumInboundPeers,
+ maxOutboundPeers: defaultCfg.MaxNumOutboundPeers,
}
- sw.reactors[name] = reactor
- reactor.SetSwitch(sw)
- return reactor
-}
-// RemoveReactor removes the given Reactor from the Switch.
-// NOTE: Not goroutine safe.
-func (sw *Switch) RemoveReactor(name string, reactor Reactor) {
- for _, chDesc := range reactor.GetChannels() {
- // remove channel description
- for i := 0; i < len(sw.chDescs); i++ {
- if chDesc.ID == sw.chDescs[i].ID {
- sw.chDescs = append(sw.chDescs[:i], sw.chDescs[i+1:]...)
- break
- }
- }
- delete(sw.reactorsByCh, chDesc.ID)
+ // Set up the peer dial behavior
+ sw.peerBehavior = &reactorPeerBehavior{
+ chDescs: make([]*conn.ChannelDescriptor, 0),
+ reactorsByCh: make(map[byte]Reactor),
+ handlePeerErrFn: sw.StopPeerForError,
+ isPersistentPeerFn: func(id types.ID) bool {
+ return sw.isPersistentPeer(id)
+ },
+ isPrivatePeerFn: func(id types.ID) bool {
+ return sw.isPrivatePeer(id)
+ },
}
- delete(sw.reactors, name)
- reactor.SetSwitch(nil)
-}
-// Reactors returns a map of reactors registered on the switch.
-// NOTE: Not goroutine safe.
-func (sw *Switch) Reactors() map[string]Reactor {
- return sw.reactors
-}
+ sw.BaseService = *service.NewBaseService(nil, "P2P MultiplexSwitch", sw)
-// Reactor returns the reactor with the given name.
-// NOTE: Not goroutine safe.
-func (sw *Switch) Reactor(name string) Reactor {
- return sw.reactors[name]
-}
+ // Set up the context
+ sw.ctx, sw.cancelFn = context.WithCancel(context.Background())
-// SetNodeInfo sets the switch's NodeInfo for checking compatibility and handshaking with other nodes.
-// NOTE: Not goroutine safe.
-func (sw *Switch) SetNodeInfo(nodeInfo NodeInfo) {
- sw.nodeInfo = nodeInfo
-}
+ // Apply the options
+ for _, opt := range opts {
+ opt(sw)
+ }
-// NodeInfo returns the switch's NodeInfo.
-// NOTE: Not goroutine safe.
-func (sw *Switch) NodeInfo() NodeInfo {
- return sw.nodeInfo
+ return sw
}
-// SetNodeKey sets the switch's private key for authenticated encryption.
-// NOTE: Not goroutine safe.
-func (sw *Switch) SetNodeKey(nodeKey *NodeKey) {
- sw.nodeKey = nodeKey
+// Subscribe registers to live events happening on the p2p Switch.
+// Returns the notification channel, along with an unsubscribe method
+func (sw *MultiplexSwitch) Subscribe(filterFn events.EventFilter) (<-chan events.Event, func()) {
+ return sw.events.Subscribe(filterFn)
}
// ---------------------------------------------------------------------
// Service start/stop
// OnStart implements BaseService. It starts all the reactors and peers.
-func (sw *Switch) OnStart() error {
+func (sw *MultiplexSwitch) OnStart() error {
// Start reactors
for _, reactor := range sw.reactors {
- err := reactor.Start()
- if err != nil {
- return errors.Wrapf(err, "failed to start %v", reactor)
+ if err := reactor.Start(); err != nil {
+ return fmt.Errorf("unable to start reactor %w", err)
}
}
- // Start accepting Peers.
- go sw.acceptRoutine()
+ // Run the peer accept routine.
+ // The accept routine asynchronously accepts
+ // and processes incoming peer connections
+ go sw.runAcceptLoop(sw.ctx)
+
+ // Run the dial routine.
+ // The dial routine parses items in the dial queue
+ // and initiates outbound peer connections
+ go sw.runDialLoop(sw.ctx)
+
+ // Run the redial routine.
+ // The redial routine monitors for important
+ // peer disconnects, and attempts to reconnect
+ // to them
+ go sw.runRedialLoop(sw.ctx)
return nil
}
// OnStop implements BaseService. It stops all peers and reactors.
-func (sw *Switch) OnStop() {
- // Stop transport
- if t, ok := sw.transport.(TransportLifecycle); ok {
- err := t.Close()
- if err != nil {
- sw.Logger.Error("Error stopping transport on stop: ", "error", err)
- }
- }
+func (sw *MultiplexSwitch) OnStop() {
+ // Close all hanging threads
+ sw.cancelFn()
// Stop peers
for _, p := range sw.peers.List() {
@@ -229,465 +167,504 @@ func (sw *Switch) OnStop() {
}
// Stop reactors
- sw.Logger.Debug("Switch: Stopping reactors")
for _, reactor := range sw.reactors {
- reactor.Stop()
- }
-}
-
-// ---------------------------------------------------------------------
-// Peers
-
-// Broadcast runs a go routine for each attempted send, which will block trying
-// to send for defaultSendTimeoutSeconds. Returns a channel which receives
-// success values for each attempted send (false if times out). Channel will be
-// closed once msg bytes are sent to all peers (or time out).
-//
-// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved.
-func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool {
- startTime := time.Now()
-
- sw.Logger.Debug(
- "Broadcast",
- "channel", chID,
- "value", fmt.Sprintf("%X", msgBytes),
- )
-
- peers := sw.peers.List()
- var wg sync.WaitGroup
- wg.Add(len(peers))
- successChan := make(chan bool, len(peers))
-
- for _, peer := range peers {
- go func(p Peer) {
- defer wg.Done()
- success := p.Send(chID, msgBytes)
- successChan <- success
- }(peer)
- }
-
- go func() {
- wg.Wait()
- close(successChan)
- if telemetry.MetricsEnabled() {
- metrics.BroadcastTxTimer.Record(context.Background(), time.Since(startTime).Milliseconds())
- }
- }()
-
- return successChan
-}
-
-// NumPeers returns the count of outbound/inbound and outbound-dialing peers.
-func (sw *Switch) NumPeers() (outbound, inbound, dialing int) {
- peers := sw.peers.List()
- for _, peer := range peers {
- if peer.IsOutbound() {
- outbound++
- } else {
- inbound++
+ if err := reactor.Stop(); err != nil {
+ sw.Logger.Error("unable to gracefully stop reactor", "err", err)
}
}
- dialing = sw.dialing.Size()
- return
}
-// MaxNumOutboundPeers returns a maximum number of outbound peers.
-func (sw *Switch) MaxNumOutboundPeers() int {
- return sw.config.MaxNumOutboundPeers
+// Broadcast broadcasts the given data to the given channel,
+// across the entire switch peer set, without blocking
+func (sw *MultiplexSwitch) Broadcast(chID byte, data []byte) {
+ for _, p := range sw.peers.List() {
+ go func() {
+ // This send context is managed internally
+ // by the Peer's underlying connection implementation
+ if !p.Send(chID, data) {
+ sw.Logger.Error(
+ "unable to perform broadcast",
+ "chID", chID,
+ "peerID", p.ID(),
+ )
+ }
+ }()
+ }
}
// Peers returns the set of peers that are connected to the switch.
-func (sw *Switch) Peers() IPeerSet {
+func (sw *MultiplexSwitch) Peers() PeerSet {
return sw.peers
}
// StopPeerForError disconnects from a peer due to external error.
-// If the peer is persistent, it will attempt to reconnect.
-// TODO: make record depending on reason.
-func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) {
- sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason)
- sw.stopAndRemovePeer(peer, reason)
-
- if peer.IsPersistent() {
- var addr *NetAddress
- if peer.IsOutbound() { // socket address for outbound peers
- addr = peer.SocketAddr()
- } else { // self-reported address for inbound peers
- addr = peer.NodeInfo().NetAddress
- }
- go sw.reconnectToPeer(addr)
+// If the peer is persistent, it will attempt to reconnect
+func (sw *MultiplexSwitch) StopPeerForError(peer PeerConn, err error) {
+ sw.Logger.Error("Stopping peer for error", "peer", peer, "err", err)
+
+ sw.stopAndRemovePeer(peer, err)
+
+ if !peer.IsPersistent() {
+ // Peer is not a persistent peer,
+ // no need to initiate a redial
+ return
}
-}
-// StopPeerGracefully disconnects from a peer gracefully.
-// TODO: handle graceful disconnects.
-func (sw *Switch) StopPeerGracefully(peer Peer) {
- sw.Logger.Info("Stopping peer gracefully")
- sw.stopAndRemovePeer(peer, nil)
+ // Add the peer to the dial queue
+ sw.DialPeers(peer.SocketAddr())
}
-func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
- sw.transport.Cleanup(peer)
- peer.Stop()
+func (sw *MultiplexSwitch) stopAndRemovePeer(peer PeerConn, err error) {
+ // Remove the peer from the transport
+ sw.transport.Remove(peer)
+
+ // Close the (original) peer connection
+ if closeErr := peer.CloseConn(); closeErr != nil {
+ sw.Logger.Error(
+ "unable to gracefully close peer connection",
+ "peer", peer,
+ "err", closeErr,
+ )
+ }
+
+ // Stop the peer connection multiplexing
+ if stopErr := peer.Stop(); stopErr != nil {
+ sw.Logger.Error(
+ "unable to gracefully stop peer",
+ "peer", peer,
+ "err", stopErr,
+ )
+ }
+ // Alert the reactors of a peer removal
for _, reactor := range sw.reactors {
- reactor.RemovePeer(peer, reason)
+ reactor.RemovePeer(peer, err)
}
// Removing a peer should go last to avoid a situation where a peer
// reconnect to our node and the switch calls InitPeer before
// RemovePeer is finished.
// https://github.com/tendermint/classic/issues/3338
- sw.peers.Remove(peer)
+ sw.peers.Remove(peer.ID())
+
+ sw.events.Notify(events.PeerDisconnectedEvent{
+ Address: peer.RemoteAddr(),
+ PeerID: peer.ID(),
+ Reason: err,
+ })
}
-// reconnectToPeer tries to reconnect to the addr, first repeatedly
-// with a fixed interval, then with exponential backoff.
-// If no success after all that, it stops trying.
-// NOTE: this will keep trying even if the handshake or auth fails.
-// TODO: be more explicit with error types so we only retry on certain failures
-// - ie. if we're getting ErrDuplicatePeer we can stop
-func (sw *Switch) reconnectToPeer(addr *NetAddress) {
- if sw.reconnecting.Has(addr.ID.String()) {
- return
- }
- sw.reconnecting.Set(addr.ID.String(), addr)
- defer sw.reconnecting.Delete(addr.ID.String())
+// ---------------------------------------------------------------------
+// Dialing
- start := time.Now()
- sw.Logger.Info("Reconnecting to peer", "addr", addr)
- for i := 0; i < reconnectAttempts; i++ {
- if !sw.IsRunning() {
- return
- }
+func (sw *MultiplexSwitch) runDialLoop(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ sw.Logger.Debug("dial context canceled")
- err := sw.DialPeerWithAddress(addr)
- if err == nil {
- return // success
- } else if _, ok := err.(CurrentlyDialingOrExistingAddressError); ok {
return
- }
+ default:
+ // Grab a dial item
+ item := sw.dialQueue.Peek()
+ if item == nil {
+ // Nothing to dial
+ continue
+ }
- sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr)
- // sleep a set amount
- sw.randomSleep(reconnectInterval)
- continue
- }
+ // Check if the dial time is right
+ // for the item
+ if time.Now().Before(item.Time) {
+ // Nothing to dial
+ continue
+ }
- sw.Logger.Error("Failed to reconnect to peer. Beginning exponential backoff",
- "addr", addr, "elapsed", time.Since(start))
- for i := 0; i < reconnectBackOffAttempts; i++ {
- if !sw.IsRunning() {
- return
- }
+ // Pop the item from the dial queue
+ item = sw.dialQueue.Pop()
- // sleep an exponentially increasing amount
- sleepIntervalSeconds := math.Pow(reconnectBackOffBaseSeconds, float64(i))
- sw.randomSleep(time.Duration(sleepIntervalSeconds) * time.Second)
+ // Dial the peer
+ sw.Logger.Info(
+ "dialing peer",
+ "address", item.Address.String(),
+ )
- err := sw.DialPeerWithAddress(addr)
- if err == nil {
- return // success
- } else if _, ok := err.(CurrentlyDialingOrExistingAddressError); ok {
- return
- }
- sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr)
- }
- sw.Logger.Error("Failed to reconnect to peer. Giving up", "addr", addr, "elapsed", time.Since(start))
-}
+ peerAddr := item.Address
-// ---------------------------------------------------------------------
-// Dialing
+ // Check if the peer is already connected
+ ps := sw.Peers()
+ if ps.Has(peerAddr.ID) {
+ sw.Logger.Warn(
+ "ignoring dial request for existing peer",
+ "id", peerAddr.ID,
+ )
-// DialPeersAsync dials a list of peers asynchronously in random order.
-// Used to dial peers from config on startup or from unsafe-RPC (trusted sources).
-// It ignores NetAddressLookupError. However, if there are other errors, first
-// encounter is returned.
-// Nop if there are no peers.
-func (sw *Switch) DialPeersAsync(peers []string) error {
- netAddrs, errs := NewNetAddressFromStrings(peers)
- // report all the errors
- for _, err := range errs {
- sw.Logger.Error("Error in peer's address", "err", err)
- }
- // return first non-NetAddressLookupError error
- for _, err := range errs {
- if _, ok := err.(NetAddressLookupError); ok {
- continue
- }
- return err
- }
- sw.dialPeersAsync(netAddrs)
- return nil
-}
+ continue
+ }
-func (sw *Switch) dialPeersAsync(netAddrs []*NetAddress) {
- ourAddr := sw.NetAddress()
+ // Create a dial context
+ dialCtx, cancelFn := context.WithTimeout(ctx, defaultDialTimeout)
+ defer cancelFn()
- // permute the list, dial them in random order.
- perm := sw.rng.Perm(len(netAddrs))
- for i := 0; i < len(perm); i++ {
- go func(i int) {
- j := perm[i]
- addr := netAddrs[j]
+ p, err := sw.transport.Dial(dialCtx, *peerAddr, sw.peerBehavior)
+ if err != nil {
+ sw.Logger.Error(
+ "unable to dial peer",
+ "peer", peerAddr,
+ "err", err,
+ )
- if addr.Same(ourAddr) {
- sw.Logger.Debug("Ignore attempt to connect to ourselves", "addr", addr, "ourAddr", ourAddr)
- return
+ continue
}
- sw.randomSleep(0)
+ // Register the peer with the switch
+ if err = sw.addPeer(p); err != nil {
+ sw.Logger.Error(
+ "unable to add peer",
+ "peer", p,
+ "err", err,
+ )
- err := sw.DialPeerWithAddress(addr)
- if err != nil {
- switch err.(type) {
- case SwitchConnectToSelfError, SwitchDuplicatePeerIDError, CurrentlyDialingOrExistingAddressError:
- sw.Logger.Debug("Error dialing peer", "err", err)
- default:
- sw.Logger.Error("Error dialing peer", "err", err)
+ sw.transport.Remove(p)
+
+ if !p.IsRunning() {
+ continue
+ }
+
+ if stopErr := p.Stop(); stopErr != nil {
+ sw.Logger.Error(
+ "unable to gracefully stop peer",
+ "peer", p,
+ "err", stopErr,
+ )
}
}
- }(i)
+
+ // Log the telemetry
+ sw.logTelemetry()
+ }
}
}
-// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects
-// and authenticates successfully.
-// If we're currently dialing this address or it belongs to an existing peer,
-// CurrentlyDialingOrExistingAddressError is returned.
-func (sw *Switch) DialPeerWithAddress(addr *NetAddress) error {
- if sw.IsDialingOrExistingAddress(addr) {
- return CurrentlyDialingOrExistingAddressError{addr.String()}
+// runRedialLoop starts the persistent peer redial loop
+func (sw *MultiplexSwitch) runRedialLoop(ctx context.Context) {
+ ticker := time.NewTicker(time.Second * 5)
+ defer ticker.Stop()
+
+ type backoffItem struct {
+ lastDialTime time.Time
+ attempts int
}
- sw.dialing.Set(addr.ID.String(), addr)
- defer sw.dialing.Delete(addr.ID.String())
+ var (
+ backoffMap = make(map[types.ID]*backoffItem)
- return sw.addOutboundPeerWithConfig(addr, sw.config)
-}
+ mux sync.RWMutex
+ )
-// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds]
-func (sw *Switch) randomSleep(interval time.Duration) {
- r := time.Duration(sw.rng.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond
- time.Sleep(r + interval)
-}
+ setBackoffItem := func(id types.ID, item *backoffItem) {
+ mux.Lock()
+ defer mux.Unlock()
-// IsDialingOrExistingAddress returns true if switch has a peer with the given
-// address or dialing it at the moment.
-func (sw *Switch) IsDialingOrExistingAddress(addr *NetAddress) bool {
- return sw.dialing.Has(addr.ID.String()) ||
- sw.peers.Has(addr.ID) ||
- (!sw.config.AllowDuplicateIP && sw.peers.HasIP(addr.IP))
-}
+ backoffMap[id] = item
+ }
-// AddPersistentPeers allows you to set persistent peers. It ignores
-// NetAddressLookupError. However, if there are other errors, first encounter is
-// returned.
-func (sw *Switch) AddPersistentPeers(addrs []string) error {
- sw.Logger.Info("Adding persistent peers", "addrs", addrs)
- netAddrs, errs := NewNetAddressFromStrings(addrs)
- // report all the errors
- for _, err := range errs {
- sw.Logger.Error("Error in peer's address", "err", err)
- }
- // return first non-NetAddressLookupError error
- for _, err := range errs {
- if _, ok := err.(NetAddressLookupError); ok {
- continue
- }
- return err
+ getBackoffItem := func(id types.ID) *backoffItem {
+ mux.RLock()
+ defer mux.RUnlock()
+
+ return backoffMap[id]
}
- sw.persistentPeersAddrs = netAddrs
- return nil
-}
-func (sw *Switch) isPeerPersistentFn() func(*NetAddress) bool {
- return func(na *NetAddress) bool {
- for _, pa := range sw.persistentPeersAddrs {
- if pa.Equals(na) {
+ clearBackoffItem := func(id types.ID) {
+ mux.Lock()
+ defer mux.Unlock()
+
+ delete(backoffMap, id)
+ }
+
+ subCh, unsubFn := sw.Subscribe(func(event events.Event) bool {
+ if event.Type() != events.PeerConnected {
+ return false
+ }
+
+ ev := event.(events.PeerConnectedEvent)
+
+ return sw.isPersistentPeer(ev.PeerID)
+ })
+ defer unsubFn()
+
+ // redialFn goes through the persistent peer list
+ // and dials missing peers
+ redialFn := func() {
+ var (
+ peers = sw.Peers()
+ peersToDial = make([]*types.NetAddress, 0)
+ )
+
+ sw.persistentPeers.Range(func(key, value any) bool {
+ var (
+ id = key.(types.ID)
+ addr = value.(*types.NetAddress)
+ )
+
+ // Check if the peer is part of the peer set
+ // or is scheduled for dialing
+ if peers.Has(id) || sw.dialQueue.Has(addr) {
return true
}
- }
- return false
- }
-}
-func (sw *Switch) acceptRoutine() {
- for {
- p, err := sw.transport.Accept(peerConfig{
- chDescs: sw.chDescs,
- onPeerError: sw.StopPeerForError,
- reactorsByCh: sw.reactorsByCh,
- isPersistent: sw.isPeerPersistentFn(),
+ peersToDial = append(peersToDial, addr)
+
+ return true
})
- if err != nil {
- switch err := err.(type) {
- case RejectedError:
- if err.IsSelf() {
- // TODO: warn?
- }
- sw.Logger.Info(
- "Inbound Peer rejected",
- "err", err,
- "numPeers", sw.peers.Size(),
- )
+ if len(peersToDial) == 0 {
+ // No persistent peers are missing
+ return
+ }
- continue
- case FilterTimeoutError:
- sw.Logger.Error(
- "Peer filter timed out",
- "err", err,
- )
+ // Calculate the dial items
+ dialItems := make([]dial.Item, 0, len(peersToDial))
+ for _, p := range peersToDial {
+ item := getBackoffItem(p.ID)
+ if item == nil {
+ dialItem := dial.Item{
+ Time: time.Now(),
+ Address: p,
+ }
+
+ dialItems = append(dialItems, dialItem)
+ setBackoffItem(p.ID, &backoffItem{dialItem.Time, 0})
continue
- case TransportClosedError:
- sw.Logger.Error(
- "Stopped accept routine, as transport is closed",
- "numPeers", sw.peers.Size(),
- )
- default:
- sw.Logger.Error(
- "Accept on transport errored",
- "err", err,
- "numPeers", sw.peers.Size(),
- )
- // We could instead have a retry loop around the acceptRoutine,
- // but that would need to stop and let the node shutdown eventually.
- // So might as well panic and let process managers restart the node.
- // There's no point in letting the node run without the acceptRoutine,
- // since it won't be able to accept new connections.
- panic(fmt.Errorf("accept routine exited: %w", err))
}
- break
+ setBackoffItem(p.ID, &backoffItem{
+ lastDialTime: time.Now().Add(
+ calculateBackoff(
+ item.attempts,
+ time.Second,
+ 10*time.Minute,
+ ),
+ ),
+ attempts: item.attempts + 1,
+ })
}
- // Ignore connection if we already have enough peers.
- _, in, _ := sw.NumPeers()
- if in >= sw.config.MaxNumInboundPeers {
- sw.Logger.Info(
- "Ignoring inbound connection: already have enough inbound peers",
- "address", p.SocketAddr(),
- "have", in,
- "max", sw.config.MaxNumInboundPeers,
- )
+ // Add the peers to the dial queue
+ sw.dialItems(dialItems...)
+ }
+
+ // Run the initial redial loop on start,
+ // in case persistent peer connections are not
+ // active
+ redialFn()
+
+ for {
+ select {
+ case <-ctx.Done():
+ sw.Logger.Debug("redial crawl context canceled")
+
+ return
+ case <-ticker.C:
+ redialFn()
+ case event := <-subCh:
+ // A persistent peer reconnected,
+ // clear their redial queue
+ ev := event.(events.PeerConnectedEvent)
+
+ clearBackoffItem(ev.PeerID)
+ }
+ }
+}
+
+// calculateBackoff calculates a backoff time,
+// based on the number of attempts and range limits
+func calculateBackoff(
+ attempts int,
+ minTimeout time.Duration,
+ maxTimeout time.Duration,
+) time.Duration {
+ var (
+ minTime = time.Second * 1
+ maxTime = time.Second * 60
+ multiplier = float64(2) // exponential
+ )
+
+ // Check the min limit
+ if minTimeout > 0 {
+ minTime = minTimeout
+ }
+
+ // Check the max limit
+ if maxTimeout > 0 {
+ maxTime = maxTimeout
+ }
+
+ // Sanity check the range
+ if minTime >= maxTime {
+ return maxTime
+ }
+
+ // Calculate the backoff duration
+ var (
+ base = float64(minTime)
+ calculated = base * math.Pow(multiplier, float64(attempts))
+ )
+
+ // Attempt to calculate the jitter factor
+ n, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
+ if err == nil {
+ jitterFactor := float64(n.Int64()) / float64(math.MaxInt64) // range [0, 1]
+
+ calculated = jitterFactor*(calculated-base) + base
+ }
- sw.transport.Cleanup(p)
+ // Prevent overflow for int64 (duration) cast
+ if calculated > float64(math.MaxInt64) {
+ return maxTime
+ }
+
+ duration := time.Duration(calculated)
+
+ // Clamp the duration within bounds
+ if duration < minTime {
+ return minTime
+ }
+ if duration > maxTime {
+ return maxTime
+ }
+
+ return duration
+}
+
+// DialPeers adds the peers to the dial queue for async dialing.
+// To monitor dial progress, subscribe to adequate p2p MultiplexSwitch events
+func (sw *MultiplexSwitch) DialPeers(peerAddrs ...*types.NetAddress) {
+ for _, peerAddr := range peerAddrs {
+ // Check if this is our address
+ if peerAddr.Same(sw.transport.NetAddress()) {
continue
}
- if err := sw.addPeer(p); err != nil {
- sw.transport.Cleanup(p)
- if p.IsRunning() {
- _ = p.Stop()
- }
- sw.Logger.Info(
- "Ignoring inbound connection: error while adding peer",
- "err", err,
- "id", p.ID(),
+ // Ignore dial if the limit is reached
+ if out := sw.Peers().NumOutbound(); out >= sw.maxOutboundPeers {
+ sw.Logger.Warn(
+ "ignoring dial request: already have max outbound peers",
+ "have", out,
+ "max", sw.maxOutboundPeers,
)
+
+ continue
}
+
+ item := dial.Item{
+ Time: time.Now(),
+ Address: peerAddr,
+ }
+
+ sw.dialQueue.Push(item)
}
}
-// dial the peer; make secret connection; authenticate against the dialed ID;
-// add the peer.
-// if dialing fails, start the reconnect loop. If handshake fails, it's over.
-// If peer is started successfully, reconnectLoop will start when
-// StopPeerForError is called.
-func (sw *Switch) addOutboundPeerWithConfig(
- addr *NetAddress,
- cfg *config.P2PConfig,
-) error {
- sw.Logger.Info("Dialing peer", "address", addr)
-
- // XXX(xla): Remove the leakage of test concerns in implementation.
- if cfg.TestDialFail {
- go sw.reconnectToPeer(addr)
- return fmt.Errorf("dial err (peerConfig.DialFail == true)")
- }
-
- p, err := sw.transport.Dial(*addr, peerConfig{
- chDescs: sw.chDescs,
- onPeerError: sw.StopPeerForError,
- isPersistent: sw.isPeerPersistentFn(),
- reactorsByCh: sw.reactorsByCh,
- })
- if err != nil {
- if e, ok := err.(RejectedError); ok {
- if e.IsSelf() {
- // TODO: warn?
- return err
- }
+// dialItems adds custom dial items for the multiplex switch
+func (sw *MultiplexSwitch) dialItems(dialItems ...dial.Item) {
+ for _, dialItem := range dialItems {
+ // Check if this is our address
+ if dialItem.Address.Same(sw.transport.NetAddress()) {
+ continue
}
- // retry persistent peers after
- // any dial error besides IsSelf()
- if sw.isPeerPersistentFn()(addr) {
- go sw.reconnectToPeer(addr)
+ // Ignore dial if the limit is reached
+ if out := sw.Peers().NumOutbound(); out >= sw.maxOutboundPeers {
+ sw.Logger.Warn(
+ "ignoring dial request: already have max outbound peers",
+ "have", out,
+ "max", sw.maxOutboundPeers,
+ )
+
+ continue
}
- return err
+ sw.dialQueue.Push(dialItem)
}
+}
- if err := sw.addPeer(p); err != nil {
- sw.transport.Cleanup(p)
- if p.IsRunning() {
- _ = p.Stop()
- }
- return err
- }
+// isPersistentPeer returns a flag indicating if a peer
+// is present in the persistent peer set
+func (sw *MultiplexSwitch) isPersistentPeer(id types.ID) bool {
+ _, persistent := sw.persistentPeers.Load(id)
- return nil
+ return persistent
}
-func (sw *Switch) filterPeer(p Peer) error {
- // Avoid duplicate
- if sw.peers.Has(p.ID()) {
- return RejectedError{id: p.ID(), isDuplicate: true}
- }
-
- errc := make(chan error, len(sw.peerFilters))
+// isPrivatePeer returns a flag indicating if a peer
+// is present in the private peer set
+func (sw *MultiplexSwitch) isPrivatePeer(id types.ID) bool {
+ _, persistent := sw.privatePeers.Load(id)
- for _, f := range sw.peerFilters {
- go func(f PeerFilterFunc, p Peer, errc chan<- error) {
- errc <- f(sw.peers, p)
- }(f, p, errc)
- }
+ return persistent
+}
- for i := 0; i < cap(errc); i++ {
+// runAcceptLoop is the main powerhouse method
+// for accepting incoming peer connections, filtering them,
+// and persisting them
+func (sw *MultiplexSwitch) runAcceptLoop(ctx context.Context) {
+ for {
select {
- case err := <-errc:
+ case <-ctx.Done():
+ sw.Logger.Debug("switch context close received")
+
+ return
+ default:
+ p, err := sw.transport.Accept(ctx, sw.peerBehavior)
if err != nil {
- return RejectedError{id: p.ID(), err: err, isFiltered: true}
+ sw.Logger.Error(
+ "error encountered during peer connection accept",
+ "err", err,
+ )
+
+ continue
+ }
+
+ // Ignore connection if we already have enough peers.
+ if in := sw.Peers().NumInbound(); in >= sw.maxInboundPeers {
+ sw.Logger.Info(
+ "Ignoring inbound connection: already have enough inbound peers",
+ "address", p.SocketAddr(),
+ "have", in,
+ "max", sw.maxInboundPeers,
+ )
+
+ sw.transport.Remove(p)
+
+ continue
+ }
+
+ // There are open peer slots, add peers
+ if err := sw.addPeer(p); err != nil {
+ sw.transport.Remove(p)
+
+ if p.IsRunning() {
+ _ = p.Stop()
+ }
+
+ sw.Logger.Info(
+ "Ignoring inbound connection: error while adding peer",
+ "err", err,
+ "id", p.ID(),
+ )
}
- case <-time.After(sw.filterTimeout):
- return FilterTimeoutError{}
}
}
-
- return nil
}
-// addPeer starts up the Peer and adds it to the Switch. Error is returned if
+// addPeer starts up the Peer and adds it to the MultiplexSwitch. Error is returned if
// the peer is filtered out or failed to start or can't be added.
-func (sw *Switch) addPeer(p Peer) error {
- if err := sw.filterPeer(p); err != nil {
- return err
- }
-
+func (sw *MultiplexSwitch) addPeer(p PeerConn) error {
p.SetLogger(sw.Logger.With("peer", p.SocketAddr()))
- // Handle the shut down case where the switch has stopped but we're
- // concurrently trying to add a peer.
- if !sw.IsRunning() {
- // XXX should this return an error or just log and terminate?
- sw.Logger.Error("Won't start a peer - switch is not running", "peer", p)
- return nil
- }
-
// Add some data to the peer, which is required by reactors.
for _, reactor := range sw.reactors {
p = reactor.InitPeer(p)
@@ -696,19 +673,15 @@ func (sw *Switch) addPeer(p Peer) error {
// Start the peer's send/recv routines.
// Must start it before adding it to the peer set
// to prevent Start and Stop from being called concurrently.
- err := p.Start()
- if err != nil {
- // Should never happen
+ if err := p.Start(); err != nil {
sw.Logger.Error("Error starting peer", "err", err, "peer", p)
+
return err
}
- // Add the peer to PeerSet. Do this before starting the reactors
+ // Add the peer to the peer set. Do this before starting the reactors
// so that if Receive errors, we will find the peer and remove it.
- // Add should not err since we already checked peers.Has().
- if err := sw.peers.Add(p); err != nil {
- return err
- }
+ sw.peers.Add(p)
// Start all the reactor protocols on the peer.
for _, reactor := range sw.reactors {
@@ -717,29 +690,28 @@ func (sw *Switch) addPeer(p Peer) error {
sw.Logger.Info("Added peer", "peer", p)
- // Update the telemetry data
- sw.logTelemetry()
+ sw.events.Notify(events.PeerConnectedEvent{
+ Address: p.RemoteAddr(),
+ PeerID: p.ID(),
+ })
return nil
}
// logTelemetry logs the switch telemetry data
// to global metrics funnels
-func (sw *Switch) logTelemetry() {
+func (sw *MultiplexSwitch) logTelemetry() {
// Update the telemetry data
if !telemetry.MetricsEnabled() {
return
}
// Fetch the number of peers
- outbound, inbound, dialing := sw.NumPeers()
+ outbound, inbound := sw.peers.NumOutbound(), sw.peers.NumInbound()
// Log the outbound peer count
metrics.OutboundPeers.Record(context.Background(), int64(outbound))
// Log the inbound peer count
metrics.InboundPeers.Record(context.Background(), int64(inbound))
-
- // Log the dialing peer count
- metrics.DialingPeers.Record(context.Background(), int64(dialing))
}
diff --git a/tm2/pkg/p2p/switch_option.go b/tm2/pkg/p2p/switch_option.go
new file mode 100644
index 00000000000..83a6920f2cd
--- /dev/null
+++ b/tm2/pkg/p2p/switch_option.go
@@ -0,0 +1,61 @@
+package p2p
+
+import (
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+)
+
+// SwitchOption is a callback used for configuring the p2p MultiplexSwitch
+type SwitchOption func(*MultiplexSwitch)
+
+// WithReactor sets the p2p switch reactors
+func WithReactor(name string, reactor Reactor) SwitchOption {
+ return func(sw *MultiplexSwitch) {
+ for _, chDesc := range reactor.GetChannels() {
+ chID := chDesc.ID
+
+ // No two reactors can share the same channel
+ if sw.peerBehavior.reactorsByCh[chID] != nil {
+ continue
+ }
+
+ sw.peerBehavior.chDescs = append(sw.peerBehavior.chDescs, chDesc)
+ sw.peerBehavior.reactorsByCh[chID] = reactor
+ }
+
+ sw.reactors[name] = reactor
+
+ reactor.SetSwitch(sw)
+ }
+}
+
+// WithPersistentPeers sets the p2p switch's persistent peer set
+func WithPersistentPeers(peerAddrs []*types.NetAddress) SwitchOption {
+ return func(sw *MultiplexSwitch) {
+ for _, addr := range peerAddrs {
+ sw.persistentPeers.Store(addr.ID, addr)
+ }
+ }
+}
+
+// WithPrivatePeers sets the p2p switch's private peer set
+func WithPrivatePeers(peerIDs []types.ID) SwitchOption {
+ return func(sw *MultiplexSwitch) {
+ for _, id := range peerIDs {
+ sw.privatePeers.Store(id, struct{}{})
+ }
+ }
+}
+
+// WithMaxInboundPeers sets the p2p switch's maximum inbound peer limit
+func WithMaxInboundPeers(maxInbound uint64) SwitchOption {
+ return func(sw *MultiplexSwitch) {
+ sw.maxInboundPeers = maxInbound
+ }
+}
+
+// WithMaxOutboundPeers sets the p2p switch's maximum outbound peer limit
+func WithMaxOutboundPeers(maxOutbound uint64) SwitchOption {
+ return func(sw *MultiplexSwitch) {
+ sw.maxOutboundPeers = maxOutbound
+ }
+}
diff --git a/tm2/pkg/p2p/switch_test.go b/tm2/pkg/p2p/switch_test.go
index a7033b466fe..19a5db2efa5 100644
--- a/tm2/pkg/p2p/switch_test.go
+++ b/tm2/pkg/p2p/switch_test.go
@@ -1,704 +1,825 @@
package p2p
import (
- "bytes"
- "errors"
- "fmt"
- "io"
+ "context"
"net"
"sync"
- "sync/atomic"
"testing"
"time"
+ "github.com/gnolang/gno/tm2/pkg/errors"
+ "github.com/gnolang/gno/tm2/pkg/p2p/dial"
+ "github.com/gnolang/gno/tm2/pkg/p2p/mock"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
-
- "github.com/gnolang/gno/tm2/pkg/crypto/ed25519"
- "github.com/gnolang/gno/tm2/pkg/log"
- "github.com/gnolang/gno/tm2/pkg/p2p/config"
- "github.com/gnolang/gno/tm2/pkg/p2p/conn"
- "github.com/gnolang/gno/tm2/pkg/testutils"
)
-var cfg *config.P2PConfig
+func TestMultiplexSwitch_Options(t *testing.T) {
+ t.Parallel()
-func init() {
- cfg = config.DefaultP2PConfig()
- cfg.PexReactor = true
- cfg.AllowDuplicateIP = true
-}
+ t.Run("custom reactors", func(t *testing.T) {
+ t.Parallel()
-type PeerMessage struct {
- PeerID ID
- Bytes []byte
- Counter int
-}
+ var (
+ name = "custom reactor"
+ mockReactor = &mockReactor{
+ setSwitchFn: func(s Switch) {
+ require.NotNil(t, s)
+ },
+ }
+ )
-type TestReactor struct {
- BaseReactor
+ sw := NewMultiplexSwitch(nil, WithReactor(name, mockReactor))
- mtx sync.Mutex
- channels []*conn.ChannelDescriptor
- logMessages bool
- msgsCounter int
- msgsReceived map[byte][]PeerMessage
-}
+ assert.Equal(t, mockReactor, sw.reactors[name])
+ })
-func NewTestReactor(channels []*conn.ChannelDescriptor, logMessages bool) *TestReactor {
- tr := &TestReactor{
- channels: channels,
- logMessages: logMessages,
- msgsReceived: make(map[byte][]PeerMessage),
- }
- tr.BaseReactor = *NewBaseReactor("TestReactor", tr)
- tr.SetLogger(log.NewNoopLogger())
- return tr
-}
+ t.Run("persistent peers", func(t *testing.T) {
+ t.Parallel()
-func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor {
- return tr.channels
-}
+ peers := generateNetAddr(t, 10)
-func (tr *TestReactor) AddPeer(peer Peer) {}
+ sw := NewMultiplexSwitch(nil, WithPersistentPeers(peers))
-func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {}
+ for _, p := range peers {
+ assert.True(t, sw.isPersistentPeer(p.ID))
+ }
+ })
-func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) {
- if tr.logMessages {
- tr.mtx.Lock()
- defer tr.mtx.Unlock()
- // fmt.Printf("Received: %X, %X\n", chID, msgBytes)
- tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter})
- tr.msgsCounter++
- }
-}
+ t.Run("private peers", func(t *testing.T) {
+ t.Parallel()
-func (tr *TestReactor) getMsgs(chID byte) []PeerMessage {
- tr.mtx.Lock()
- defer tr.mtx.Unlock()
- return tr.msgsReceived[chID]
-}
+ var (
+ peers = generateNetAddr(t, 10)
+ ids = make([]types.ID, 0, len(peers))
+ )
-// -----------------------------------------------------------------------------
+ for _, p := range peers {
+ ids = append(ids, p.ID)
+ }
-// convenience method for creating two switches connected to each other.
-// XXX: note this uses net.Pipe and not a proper TCP conn
-func MakeSwitchPair(_ testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) {
- // Create two switches that will be interconnected.
- switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches)
- return switches[0], switches[1]
-}
+ sw := NewMultiplexSwitch(nil, WithPrivatePeers(ids))
-func initSwitchFunc(i int, sw *Switch) *Switch {
- // Make two reactors of two channels each
- sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{
- {ID: byte(0x00), Priority: 10},
- {ID: byte(0x01), Priority: 10},
- }, true))
- sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{
- {ID: byte(0x02), Priority: 10},
- {ID: byte(0x03), Priority: 10},
- }, true))
-
- return sw
-}
+ for _, p := range peers {
+ assert.True(t, sw.isPrivatePeer(p.ID))
+ }
+ })
-func TestSwitches(t *testing.T) {
- t.Parallel()
+ t.Run("max inbound peers", func(t *testing.T) {
+ t.Parallel()
- s1, s2 := MakeSwitchPair(t, initSwitchFunc)
- defer s1.Stop()
- defer s2.Stop()
+ maxInbound := uint64(500)
- if s1.Peers().Size() != 1 {
- t.Errorf("Expected exactly 1 peer in s1, got %v", s1.Peers().Size())
- }
- if s2.Peers().Size() != 1 {
- t.Errorf("Expected exactly 1 peer in s2, got %v", s2.Peers().Size())
- }
+ sw := NewMultiplexSwitch(nil, WithMaxInboundPeers(maxInbound))
- // Lets send some messages
- ch0Msg := []byte("channel zero")
- ch1Msg := []byte("channel foo")
- ch2Msg := []byte("channel bar")
+ assert.Equal(t, maxInbound, sw.maxInboundPeers)
+ })
+
+ t.Run("max outbound peers", func(t *testing.T) {
+ t.Parallel()
- s1.Broadcast(byte(0x00), ch0Msg)
- s1.Broadcast(byte(0x01), ch1Msg)
- s1.Broadcast(byte(0x02), ch2Msg)
+ maxOutbound := uint64(500)
- assertMsgReceivedWithTimeout(t, ch0Msg, byte(0x00), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
- assertMsgReceivedWithTimeout(t, ch1Msg, byte(0x01), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
- assertMsgReceivedWithTimeout(t, ch2Msg, byte(0x02), s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second)
+ sw := NewMultiplexSwitch(nil, WithMaxOutboundPeers(maxOutbound))
+
+ assert.Equal(t, maxOutbound, sw.maxOutboundPeers)
+ })
}
-func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, reactor *TestReactor, checkPeriod, timeout time.Duration) {
- t.Helper()
+func TestMultiplexSwitch_Broadcast(t *testing.T) {
+ t.Parallel()
- ticker := time.NewTicker(checkPeriod)
- for {
- select {
- case <-ticker.C:
- msgs := reactor.getMsgs(channel)
- if len(msgs) > 0 {
- if !bytes.Equal(msgs[0].Bytes, msgBytes) {
- t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes)
- }
- return
- }
+ var (
+ wg sync.WaitGroup
+
+ expectedChID = byte(10)
+ expectedData = []byte("broadcast data")
- case <-time.After(timeout):
- t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel)
+ mockTransport = &mockTransport{
+ acceptFn: func(_ context.Context, _ PeerBehavior) (PeerConn, error) {
+ return nil, errors.New("constant error")
+ },
}
- }
-}
-func TestSwitchFiltersOutItself(t *testing.T) {
- t.Parallel()
+ peers = mock.GeneratePeers(t, 10)
+ sw = NewMultiplexSwitch(mockTransport)
+ )
- s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc)
+ require.NoError(t, sw.OnStart())
+ t.Cleanup(sw.OnStop)
- // simulate s1 having a public IP by creating a remote peer with the same ID
- rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg}
- rp.Start()
+ // Create a new peer set
+ sw.peers = newSet()
- // addr should be rejected in addPeer based on the same ID
- err := s1.DialPeerWithAddress(rp.Addr())
- if assert.Error(t, err) {
- if err, ok := err.(RejectedError); ok {
- if !err.IsSelf() {
- t.Errorf("expected self to be rejected")
- }
- } else {
- t.Errorf("expected RejectedError")
+ for _, p := range peers {
+ wg.Add(1)
+
+ p.SendFn = func(chID byte, data []byte) bool {
+ wg.Done()
+
+ require.Equal(t, expectedChID, chID)
+ assert.Equal(t, expectedData, data)
+
+ return false
}
+
+ // Load it up with peers
+ sw.peers.Add(p)
}
- rp.Stop()
+ // Broadcast the data
+ sw.Broadcast(expectedChID, expectedData)
- assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond)
+ wg.Wait()
}
-func TestSwitchPeerFilter(t *testing.T) {
+func TestMultiplexSwitch_Peers(t *testing.T) {
t.Parallel()
var (
- filters = []PeerFilterFunc{
- func(_ IPeerSet, _ Peer) error { return nil },
- func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied!") },
- func(_ IPeerSet, _ Peer) error { return nil },
- }
- sw = MakeSwitch(
- cfg,
- 1,
- "testing",
- "123.123.123",
- initSwitchFunc,
- SwitchPeerFilters(filters...),
- )
+ peers = mock.GeneratePeers(t, 10)
+ sw = NewMultiplexSwitch(nil)
)
- defer sw.Stop()
-
- // simulate remote peer
- rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
- rp.Start()
- defer rp.Stop()
-
- p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
- chDescs: sw.chDescs,
- onPeerError: sw.StopPeerForError,
- isPersistent: sw.isPeerPersistentFn(),
- reactorsByCh: sw.reactorsByCh,
- })
- if err != nil {
- t.Fatal(err)
- }
- err = sw.addPeer(p)
- if err, ok := err.(RejectedError); ok {
- if !err.IsFiltered() {
- t.Errorf("expected peer to be filtered")
- }
- } else {
- t.Errorf("expected RejectedError")
+ // Create a new peer set
+ sw.peers = newSet()
+
+ for _, p := range peers {
+ // Load it up with peers
+ sw.peers.Add(p)
}
-}
-func TestSwitchPeerFilterTimeout(t *testing.T) {
- t.Parallel()
+ // Broadcast the data
+ ps := sw.Peers()
- var (
- filters = []PeerFilterFunc{
- func(_ IPeerSet, _ Peer) error {
- time.Sleep(10 * time.Millisecond)
- return nil
- },
- }
- sw = MakeSwitch(
- cfg,
- 1,
- "testing",
- "123.123.123",
- initSwitchFunc,
- SwitchFilterTimeout(5*time.Millisecond),
- SwitchPeerFilters(filters...),
- )
+ require.EqualValues(
+ t,
+ len(peers),
+ ps.NumInbound()+ps.NumOutbound(),
)
- defer sw.Stop()
-
- // simulate remote peer
- rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
- rp.Start()
- defer rp.Stop()
-
- p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
- chDescs: sw.chDescs,
- onPeerError: sw.StopPeerForError,
- isPersistent: sw.isPeerPersistentFn(),
- reactorsByCh: sw.reactorsByCh,
- })
- if err != nil {
- t.Fatal(err)
- }
- err = sw.addPeer(p)
- if _, ok := err.(FilterTimeoutError); !ok {
- t.Errorf("expected FilterTimeoutError")
+ for _, p := range peers {
+ assert.True(t, ps.Has(p.ID()))
}
}
-func TestSwitchPeerFilterDuplicate(t *testing.T) {
+func TestMultiplexSwitch_StopPeer(t *testing.T) {
t.Parallel()
- sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
- sw.Start()
- defer sw.Stop()
+ t.Run("peer not persistent", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ p = mock.GeneratePeers(t, 1)[0]
+ mockTransport = &mockTransport{
+ removeFn: func(removedPeer PeerConn) {
+ assert.Equal(t, p.ID(), removedPeer.ID())
+ },
+ }
+
+ sw = NewMultiplexSwitch(mockTransport)
+ )
+
+ // Create a new peer set
+ sw.peers = newSet()
- // simulate remote peer
- rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
- rp.Start()
- defer rp.Stop()
+ // Save the single peer
+ sw.peers.Add(p)
- p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
- chDescs: sw.chDescs,
- onPeerError: sw.StopPeerForError,
- isPersistent: sw.isPeerPersistentFn(),
- reactorsByCh: sw.reactorsByCh,
+ // Stop and remove the peer
+ sw.StopPeerForError(p, nil)
+
+ // Make sure the peer is removed
+ assert.False(t, sw.peers.Has(p.ID()))
})
- if err != nil {
- t.Fatal(err)
- }
- if err := sw.addPeer(p); err != nil {
- t.Fatal(err)
- }
+ t.Run("persistent peer", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ p = mock.GeneratePeers(t, 1)[0]
+ mockTransport = &mockTransport{
+ removeFn: func(removedPeer PeerConn) {
+ assert.Equal(t, p.ID(), removedPeer.ID())
+ },
+ netAddressFn: func() types.NetAddress {
+ return types.NetAddress{}
+ },
+ }
- err = sw.addPeer(p)
- if errRej, ok := err.(RejectedError); ok {
- if !errRej.IsDuplicate() {
- t.Errorf("expected peer to be duplicate. got %v", errRej)
+ sw = NewMultiplexSwitch(mockTransport)
+ )
+
+ // Make sure the peer is persistent
+ p.IsPersistentFn = func() bool {
+ return true
+ }
+
+ p.IsOutboundFn = func() bool {
+ return false
}
- } else {
- t.Errorf("expected RejectedError, got %v", err)
- }
-}
-func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) {
- t.Helper()
+ // Create a new peer set
+ sw.peers = newSet()
- time.Sleep(timeout)
- if sw.Peers().Size() != 0 {
- t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size())
- }
+ // Save the single peer
+ sw.peers.Add(p)
+
+ // Stop and remove the peer
+ sw.StopPeerForError(p, nil)
+
+ // Make sure the peer is removed
+ assert.False(t, sw.peers.Has(p.ID()))
+
+ // Make sure the peer is in the dial queue
+ sw.dialQueue.Has(p.SocketAddr())
+ })
}
-func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
+func TestMultiplexSwitch_DialLoop(t *testing.T) {
t.Parallel()
- assert, require := assert.New(t), require.New(t)
+ t.Run("peer already connected", func(t *testing.T) {
+ t.Parallel()
- sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
- err := sw.Start()
- if err != nil {
- t.Error(err)
- }
- defer sw.Stop()
-
- // simulate remote peer
- rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
- rp.Start()
- defer rp.Stop()
-
- p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
- chDescs: sw.chDescs,
- onPeerError: sw.StopPeerForError,
- isPersistent: sw.isPeerPersistentFn(),
- reactorsByCh: sw.reactorsByCh,
+ ctx, cancelFn := context.WithTimeout(
+ context.Background(),
+ 5*time.Second,
+ )
+ defer cancelFn()
+
+ var (
+ ch = make(chan struct{}, 1)
+
+ peerDialed bool
+
+ p = mock.GeneratePeers(t, 1)[0]
+ dialTime = time.Now().Add(-5 * time.Second) // in the past
+
+ mockSet = &mockSet{
+ hasFn: func(id types.ID) bool {
+ require.Equal(t, p.ID(), id)
+
+ cancelFn()
+
+ ch <- struct{}{}
+
+ return true
+ },
+ }
+
+ mockTransport = &mockTransport{
+ dialFn: func(
+ _ context.Context,
+ _ types.NetAddress,
+ _ PeerBehavior,
+ ) (PeerConn, error) {
+ peerDialed = true
+
+ return nil, nil
+ },
+ }
+
+ sw = NewMultiplexSwitch(mockTransport)
+ )
+
+ sw.peers = mockSet
+
+ // Prepare the dial queue
+ sw.dialQueue.Push(dial.Item{
+ Time: dialTime,
+ Address: p.SocketAddr(),
+ })
+
+ // Run the dial loop
+ go sw.runDialLoop(ctx)
+
+ select {
+ case <-ch:
+ case <-time.After(5 * time.Second):
+ }
+
+ assert.False(t, peerDialed)
})
- require.Nil(err)
- err = sw.addPeer(p)
- require.Nil(err)
+ t.Run("peer undialable", func(t *testing.T) {
+ t.Parallel()
- require.NotNil(sw.Peers().Get(rp.ID()))
+ ctx, cancelFn := context.WithTimeout(
+ context.Background(),
+ 5*time.Second,
+ )
+ defer cancelFn()
- // simulate failure by closing connection
- p.(*peer).CloseConn()
+ var (
+ ch = make(chan struct{}, 1)
- assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond)
- assert.False(p.IsRunning())
-}
+ peerDialed bool
-func TestSwitchStopPeerForError(t *testing.T) {
- t.Parallel()
+ p = mock.GeneratePeers(t, 1)[0]
+ dialTime = time.Now().Add(-5 * time.Second) // in the past
+
+ mockSet = &mockSet{
+ hasFn: func(id types.ID) bool {
+ require.Equal(t, p.ID(), id)
+
+ return false
+ },
+ }
+
+ mockTransport = &mockTransport{
+ dialFn: func(
+ _ context.Context,
+ _ types.NetAddress,
+ _ PeerBehavior,
+ ) (PeerConn, error) {
+ peerDialed = true
+
+ cancelFn()
+
+ ch <- struct{}{}
+
+ return nil, errors.New("invalid dial")
+ },
+ }
+
+ sw = NewMultiplexSwitch(mockTransport)
+ )
+
+ sw.peers = mockSet
- // make two connected switches
- sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch {
- return initSwitchFunc(i, sw)
+ // Prepare the dial queue
+ sw.dialQueue.Push(dial.Item{
+ Time: dialTime,
+ Address: p.SocketAddr(),
+ })
+
+ // Run the dial loop
+ go sw.runDialLoop(ctx)
+
+ select {
+ case <-ch:
+ case <-time.After(5 * time.Second):
+ }
+
+ assert.True(t, peerDialed)
})
- assert.Equal(t, len(sw1.Peers().List()), 1)
+ t.Run("peer dialed and added", func(t *testing.T) {
+ t.Parallel()
- // send messages to the peer from sw1
- p := sw1.Peers().List()[0]
- p.Send(0x1, []byte("here's a message to send"))
+ ctx, cancelFn := context.WithTimeout(
+ context.Background(),
+ 5*time.Second,
+ )
+ defer cancelFn()
- // stop sw2. this should cause the p to fail,
- // which results in calling StopPeerForError internally
- sw2.Stop()
+ var (
+ ch = make(chan struct{}, 1)
- // now call StopPeerForError explicitly, eg. from a reactor
- sw1.StopPeerForError(p, fmt.Errorf("some err"))
+ peerDialed bool
- assert.Equal(t, len(sw1.Peers().List()), 0)
-}
+ p = mock.GeneratePeers(t, 1)[0]
+ dialTime = time.Now().Add(-5 * time.Second) // in the past
-func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) {
- t.Parallel()
+ mockTransport = &mockTransport{
+ dialFn: func(
+ _ context.Context,
+ _ types.NetAddress,
+ _ PeerBehavior,
+ ) (PeerConn, error) {
+ peerDialed = true
- sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
- err := sw.Start()
- require.NoError(t, err)
- defer sw.Stop()
-
- // 1. simulate failure by closing connection
- rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
- rp.Start()
- defer rp.Stop()
-
- err = sw.AddPersistentPeers([]string{rp.Addr().String()})
- require.NoError(t, err)
-
- err = sw.DialPeerWithAddress(rp.Addr())
- require.Nil(t, err)
- require.NotNil(t, sw.Peers().Get(rp.ID()))
-
- p := sw.Peers().List()[0]
- p.(*peer).CloseConn()
-
- waitUntilSwitchHasAtLeastNPeers(sw, 1)
- assert.False(t, p.IsRunning()) // old peer instance
- assert.Equal(t, 1, sw.Peers().Size()) // new peer instance
-
- // 2. simulate first time dial failure
- rp = &remotePeer{
- PrivKey: ed25519.GenPrivKey(),
- Config: cfg,
- // Use different interface to prevent duplicate IP filter, this will break
- // beyond two peers.
- listenAddr: "127.0.0.1:0",
- }
- rp.Start()
- defer rp.Stop()
-
- conf := config.DefaultP2PConfig()
- conf.TestDialFail = true // will trigger a reconnect
- err = sw.addOutboundPeerWithConfig(rp.Addr(), conf)
- require.NotNil(t, err)
- // DialPeerWithAddres - sw.peerConfig resets the dialer
- waitUntilSwitchHasAtLeastNPeers(sw, 2)
- assert.Equal(t, 2, sw.Peers().Size())
-}
+ cancelFn()
-func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) {
- t.Parallel()
+ ch <- struct{}{}
+
+ return p, nil
+ },
+ }
- sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
- err := sw.Start()
- require.NoError(t, err)
- defer sw.Stop()
+ sw = NewMultiplexSwitch(mockTransport)
+ )
- // 1. simulate failure by closing the connection
- rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
- rp.Start()
- defer rp.Stop()
+ // Prepare the dial queue
+ sw.dialQueue.Push(dial.Item{
+ Time: dialTime,
+ Address: p.SocketAddr(),
+ })
- err = sw.AddPersistentPeers([]string{rp.Addr().String()})
- require.NoError(t, err)
+ // Run the dial loop
+ go sw.runDialLoop(ctx)
- conn, err := rp.Dial(sw.NetAddress())
- require.NoError(t, err)
- time.Sleep(100 * time.Millisecond)
- require.NotNil(t, sw.Peers().Get(rp.ID()))
+ select {
+ case <-ch:
+ case <-time.After(5 * time.Second):
+ }
- conn.Close()
+ require.True(t, sw.Peers().Has(p.ID()))
- waitUntilSwitchHasAtLeastNPeers(sw, 1)
- assert.Equal(t, 1, sw.Peers().Size())
+ assert.True(t, peerDialed)
+ })
}
-func TestSwitchDialPeersAsync(t *testing.T) {
+func TestMultiplexSwitch_AcceptLoop(t *testing.T) {
t.Parallel()
- if testing.Short() {
- return
- }
+ t.Run("inbound limit reached", func(t *testing.T) {
+ t.Parallel()
- sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
- err := sw.Start()
- require.NoError(t, err)
- defer sw.Stop()
+ ctx, cancelFn := context.WithTimeout(
+ context.Background(),
+ 5*time.Second,
+ )
+ defer cancelFn()
- rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
- rp.Start()
- defer rp.Stop()
+ var (
+ ch = make(chan struct{}, 1)
+ maxInbound = uint64(10)
- err = sw.DialPeersAsync([]string{rp.Addr().String()})
- require.NoError(t, err)
- time.Sleep(dialRandomizerIntervalMilliseconds * time.Millisecond)
- require.NotNil(t, sw.Peers().Get(rp.ID()))
-}
+ peerRemoved bool
+
+ p = mock.GeneratePeers(t, 1)[0]
+
+ mockTransport = &mockTransport{
+ acceptFn: func(_ context.Context, _ PeerBehavior) (PeerConn, error) {
+ return p, nil
+ },
+ removeFn: func(removedPeer PeerConn) {
+ require.Equal(t, p.ID(), removedPeer.ID())
+
+ peerRemoved = true
+
+ ch <- struct{}{}
+ },
+ }
-func waitUntilSwitchHasAtLeastNPeers(sw *Switch, n int) {
- for i := 0; i < 20; i++ {
- time.Sleep(250 * time.Millisecond)
- has := sw.Peers().Size()
- if has >= n {
- break
+ ps = &mockSet{
+ numInboundFn: func() uint64 {
+ return maxInbound
+ },
+ }
+
+ sw = NewMultiplexSwitch(
+ mockTransport,
+ WithMaxInboundPeers(maxInbound),
+ )
+ )
+
+ // Set the peer set
+ sw.peers = ps
+
+ // Run the accept loop
+ go sw.runAcceptLoop(ctx)
+
+ select {
+ case <-ch:
+ case <-time.After(5 * time.Second):
}
- }
+
+ assert.True(t, peerRemoved)
+ })
+
+ t.Run("peer accepted", func(t *testing.T) {
+ t.Parallel()
+
+ ctx, cancelFn := context.WithTimeout(
+ context.Background(),
+ 5*time.Second,
+ )
+ defer cancelFn()
+
+ var (
+ ch = make(chan struct{}, 1)
+ maxInbound = uint64(10)
+
+ peerAdded bool
+
+ p = mock.GeneratePeers(t, 1)[0]
+
+ mockTransport = &mockTransport{
+ acceptFn: func(_ context.Context, _ PeerBehavior) (PeerConn, error) {
+ return p, nil
+ },
+ }
+
+ ps = &mockSet{
+ numInboundFn: func() uint64 {
+ return maxInbound - 1 // available slot
+ },
+ addFn: func(peer PeerConn) {
+ require.Equal(t, p.ID(), peer.ID())
+
+ peerAdded = true
+
+ ch <- struct{}{}
+ },
+ }
+
+ sw = NewMultiplexSwitch(
+ mockTransport,
+ WithMaxInboundPeers(maxInbound),
+ )
+ )
+
+ // Set the peer set
+ sw.peers = ps
+
+ // Run the accept loop
+ go sw.runAcceptLoop(ctx)
+
+ select {
+ case <-ch:
+ case <-time.After(5 * time.Second):
+ }
+
+ assert.True(t, peerAdded)
+ })
}
-func TestSwitchFullConnectivity(t *testing.T) {
+func TestMultiplexSwitch_RedialLoop(t *testing.T) {
t.Parallel()
- switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches)
- defer func() {
- for _, sw := range switches {
- sw.Stop()
+ t.Run("no peers to dial", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ ch = make(chan struct{}, 1)
+
+ peersChecked = 0
+ peers = mock.GeneratePeers(t, 10)
+
+ ps = &mockSet{
+ hasFn: func(id types.ID) bool {
+ exists := false
+ for _, p := range peers {
+ if p.ID() == id {
+ exists = true
+
+ break
+ }
+ }
+
+ require.True(t, exists)
+
+ peersChecked++
+
+ if peersChecked == len(peers) {
+ ch <- struct{}{}
+ }
+
+ return true
+ },
+ }
+ )
+
+ // Make sure the peers are the
+ // switch persistent peers
+ addrs := make([]*types.NetAddress, 0, len(peers))
+
+ for _, p := range peers {
+ addrs = append(addrs, p.SocketAddr())
}
- }()
- for i, sw := range switches {
- if sw.Peers().Size() != 2 {
- t.Fatalf("Expected each switch to be connected to 2 other, but %d switch only connected to %d", sw.Peers().Size(), i)
+ // Create the switch
+ sw := NewMultiplexSwitch(
+ nil,
+ WithPersistentPeers(addrs),
+ )
+
+ // Set the peer set
+ sw.peers = ps
+
+ // Run the redial loop
+ ctx, cancelFn := context.WithTimeout(
+ context.Background(),
+ 5*time.Second,
+ )
+ defer cancelFn()
+
+ go sw.runRedialLoop(ctx)
+
+ select {
+ case <-ch:
+ case <-time.After(5 * time.Second):
}
- }
-}
-func TestSwitchAcceptRoutine(t *testing.T) {
- t.Parallel()
+ assert.Equal(t, len(peers), peersChecked)
+ })
+
+ t.Run("missing peers dialed", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ peers = mock.GeneratePeers(t, 10)
+ missingPeer = peers[0]
+ missingAddr = missingPeer.SocketAddr()
+
+ peersDialed []types.NetAddress
+
+ mockTransport = &mockTransport{
+ dialFn: func(
+ _ context.Context,
+ address types.NetAddress,
+ _ PeerBehavior,
+ ) (PeerConn, error) {
+ peersDialed = append(peersDialed, address)
+
+ if address.Equals(*missingPeer.SocketAddr()) {
+ return missingPeer, nil
+ }
+
+ return nil, errors.New("invalid dial")
+ },
+ }
+ ps = &mockSet{
+ hasFn: func(id types.ID) bool {
+ return id != missingPeer.ID()
+ },
+ }
+ )
+
+ // Make sure the peers are the
+ // switch persistent peers
+ addrs := make([]*types.NetAddress, 0, len(peers))
+
+ for _, p := range peers {
+ addrs = append(addrs, p.SocketAddr())
+ }
+
+ // Create the switch
+ sw := NewMultiplexSwitch(
+ mockTransport,
+ WithPersistentPeers(addrs),
+ )
+
+ // Set the peer set
+ sw.peers = ps
+
+ // Run the redial loop
+ ctx, cancelFn := context.WithTimeout(
+ context.Background(),
+ 5*time.Second,
+ )
+ defer cancelFn()
+
+ var wg sync.WaitGroup
+
+ wg.Add(2)
+
+ go func() {
+ defer wg.Done()
+
+ sw.runRedialLoop(ctx)
+ }()
+
+ go func() {
+ defer wg.Done()
+
+ deadline := time.After(5 * time.Second)
- cfg.MaxNumInboundPeers = 5
-
- // make switch
- sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
- err := sw.Start()
- require.NoError(t, err)
- defer sw.Stop()
-
- remotePeers := make([]*remotePeer, 0)
- assert.Equal(t, 0, sw.Peers().Size())
-
- // 1. check we connect up to MaxNumInboundPeers
- for i := 0; i < cfg.MaxNumInboundPeers; i++ {
- rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
- remotePeers = append(remotePeers, rp)
- rp.Start()
- c, err := rp.Dial(sw.NetAddress())
- require.NoError(t, err)
- // spawn a reading routine to prevent connection from closing
- go func(c net.Conn) {
for {
- one := make([]byte, 1)
- _, err := c.Read(one)
- if err != nil {
+ select {
+ case <-deadline:
+ return
+ default:
+ if !sw.dialQueue.Has(missingAddr) {
+ continue
+ }
+
+ cancelFn()
+
return
}
}
- }(c)
- }
- time.Sleep(100 * time.Millisecond)
- assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size())
-
- // 2. check we close new connections if we already have MaxNumInboundPeers peers
- rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
- rp.Start()
- conn, err := rp.Dial(sw.NetAddress())
- require.NoError(t, err)
- // check conn is closed
- one := make([]byte, 1)
- conn.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
- _, err = conn.Read(one)
- assert.Equal(t, io.EOF, err)
- assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size())
- rp.Stop()
-
- // stop remote peers
- for _, rp := range remotePeers {
- rp.Stop()
- }
-}
+ }()
-type errorTransport struct {
- acceptErr error
-}
+ wg.Wait()
-func (et errorTransport) NetAddress() NetAddress {
- panic("not implemented")
+ require.True(t, sw.dialQueue.Has(missingAddr))
+ assert.Equal(t, missingAddr, sw.dialQueue.Peek().Address)
+ })
}
-func (et errorTransport) Accept(c peerConfig) (Peer, error) {
- return nil, et.acceptErr
-}
+func TestMultiplexSwitch_DialPeers(t *testing.T) {
+ t.Parallel()
-func (errorTransport) Dial(NetAddress, peerConfig) (Peer, error) {
- panic("not implemented")
-}
+ t.Run("self dial request", func(t *testing.T) {
+ t.Parallel()
-func (errorTransport) Cleanup(Peer) {
- panic("not implemented")
-}
+ var (
+ p = mock.GeneratePeers(t, 1)[0]
+ addr = types.NetAddress{
+ ID: "id",
+ IP: p.SocketAddr().IP,
+ Port: p.SocketAddr().Port,
+ }
-func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
- t.Parallel()
+ mockTransport = &mockTransport{
+ netAddressFn: func() types.NetAddress {
+ return addr
+ },
+ }
+ )
- sw := NewSwitch(cfg, errorTransport{FilterTimeoutError{}})
- assert.NotPanics(t, func() {
- err := sw.Start()
- assert.NoError(t, err)
- sw.Stop()
- })
+ // Make sure the "peer" has the same address
+ // as the transport (node)
+ p.NodeInfoFn = func() types.NodeInfo {
+ return types.NodeInfo{
+ PeerID: addr.ID,
+ }
+ }
- sw = NewSwitch(cfg, errorTransport{RejectedError{conn: nil, err: errors.New("filtered"), isFiltered: true}})
- assert.NotPanics(t, func() {
- err := sw.Start()
- assert.NoError(t, err)
- sw.Stop()
- })
+ sw := NewMultiplexSwitch(mockTransport)
+
+ // Dial the peers
+ sw.DialPeers(p.SocketAddr())
- sw = NewSwitch(cfg, errorTransport{TransportClosedError{}})
- assert.NotPanics(t, func() {
- err := sw.Start()
- assert.NoError(t, err)
- sw.Stop()
+ // Make sure the peer wasn't actually dialed
+ assert.False(t, sw.dialQueue.Has(p.SocketAddr()))
})
-}
-// mockReactor checks that InitPeer never called before RemovePeer. If that's
-// not true, InitCalledBeforeRemoveFinished will return true.
-type mockReactor struct {
- *BaseReactor
+ t.Run("outbound peer limit reached", func(t *testing.T) {
+ t.Parallel()
- // atomic
- removePeerInProgress uint32
- initCalledBeforeRemoveFinished uint32
-}
+ var (
+ maxOutbound = uint64(10)
+ peers = mock.GeneratePeers(t, 10)
-func (r *mockReactor) RemovePeer(peer Peer, reason interface{}) {
- atomic.StoreUint32(&r.removePeerInProgress, 1)
- defer atomic.StoreUint32(&r.removePeerInProgress, 0)
- time.Sleep(100 * time.Millisecond)
-}
+ mockTransport = &mockTransport{
+ netAddressFn: func() types.NetAddress {
+ return types.NetAddress{
+ ID: "id",
+ IP: net.IP{},
+ }
+ },
+ }
-func (r *mockReactor) InitPeer(peer Peer) Peer {
- if atomic.LoadUint32(&r.removePeerInProgress) == 1 {
- atomic.StoreUint32(&r.initCalledBeforeRemoveFinished, 1)
- }
+ ps = &mockSet{
+ numOutboundFn: func() uint64 {
+ return maxOutbound
+ },
+ }
+ )
- return peer
-}
+ sw := NewMultiplexSwitch(
+ mockTransport,
+ WithMaxOutboundPeers(maxOutbound),
+ )
-func (r *mockReactor) InitCalledBeforeRemoveFinished() bool {
- return atomic.LoadUint32(&r.initCalledBeforeRemoveFinished) == 1
-}
+ // Set the peer set
+ sw.peers = ps
-// see stopAndRemovePeer
-func TestFlappySwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) {
- t.Parallel()
+ // Dial the peers
+ addrs := make([]*types.NetAddress, 0, len(peers))
- testutils.FilterStability(t, testutils.Flappy)
+ for _, p := range peers {
+ addrs = append(addrs, p.SocketAddr())
+ }
- // make reactor
- reactor := &mockReactor{}
- reactor.BaseReactor = NewBaseReactor("mockReactor", reactor)
+ sw.DialPeers(addrs...)
- // make switch
- sw := MakeSwitch(cfg, 1, "testing", "123.123.123", func(i int, sw *Switch) *Switch {
- sw.AddReactor("mock", reactor)
- return sw
+ // Make sure no peers were dialed
+ for _, p := range peers {
+ assert.False(t, sw.dialQueue.Has(p.SocketAddr()))
+ }
})
- err := sw.Start()
- require.NoError(t, err)
- defer sw.Stop()
-
- // add peer
- rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
- rp.Start()
- defer rp.Stop()
- _, err = rp.Dial(sw.NetAddress())
- require.NoError(t, err)
- // wait till the switch adds rp to the peer set
- time.Sleep(100 * time.Millisecond)
-
- // stop peer asynchronously
- go sw.StopPeerForError(sw.Peers().Get(rp.ID()), "test")
-
- // simulate peer reconnecting to us
- _, err = rp.Dial(sw.NetAddress())
- require.NoError(t, err)
- // wait till the switch adds rp to the peer set
- time.Sleep(100 * time.Millisecond)
-
- // make sure reactor.RemovePeer is finished before InitPeer is called
- assert.False(t, reactor.InitCalledBeforeRemoveFinished())
-}
-func BenchmarkSwitchBroadcast(b *testing.B) {
- s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch {
- // Make bar reactors of bar channels each
- sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{
- {ID: byte(0x00), Priority: 10},
- {ID: byte(0x01), Priority: 10},
- }, false))
- sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{
- {ID: byte(0x02), Priority: 10},
- {ID: byte(0x03), Priority: 10},
- }, false))
- return sw
- })
- defer s1.Stop()
- defer s2.Stop()
+ t.Run("peers dialed", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ maxOutbound = uint64(1000)
+ peers = mock.GeneratePeers(t, int(maxOutbound/2))
- // Allow time for goroutines to boot up
- time.Sleep(1 * time.Second)
+ mockTransport = &mockTransport{
+ netAddressFn: func() types.NetAddress {
+ return types.NetAddress{
+ ID: "id",
+ IP: net.IP{},
+ }
+ },
+ }
+ )
- b.ResetTimer()
+ sw := NewMultiplexSwitch(
+ mockTransport,
+ WithMaxOutboundPeers(10),
+ )
- numSuccess, numFailure := 0, 0
+ // Dial the peers
+ addrs := make([]*types.NetAddress, 0, len(peers))
- // Send random message from foo channel to another
- for i := 0; i < b.N; i++ {
- chID := byte(i % 4)
- successChan := s1.Broadcast(chID, []byte("test data"))
- for s := range successChan {
- if s {
- numSuccess++
- } else {
- numFailure++
- }
+ for _, p := range peers {
+ addrs = append(addrs, p.SocketAddr())
}
- }
- b.Logf("success: %v, failure: %v", numSuccess, numFailure)
+ sw.DialPeers(addrs...)
+
+ // Make sure peers were dialed
+ for _, p := range peers {
+ assert.True(t, sw.dialQueue.Has(p.SocketAddr()))
+ }
+ })
}
diff --git a/tm2/pkg/p2p/test_util.go b/tm2/pkg/p2p/test_util.go
deleted file mode 100644
index dd0d9cd6bc7..00000000000
--- a/tm2/pkg/p2p/test_util.go
+++ /dev/null
@@ -1,238 +0,0 @@
-package p2p
-
-import (
- "fmt"
- "net"
- "time"
-
- "github.com/gnolang/gno/tm2/pkg/crypto"
- "github.com/gnolang/gno/tm2/pkg/crypto/ed25519"
- "github.com/gnolang/gno/tm2/pkg/errors"
- "github.com/gnolang/gno/tm2/pkg/log"
- "github.com/gnolang/gno/tm2/pkg/p2p/config"
- "github.com/gnolang/gno/tm2/pkg/p2p/conn"
- "github.com/gnolang/gno/tm2/pkg/random"
- "github.com/gnolang/gno/tm2/pkg/versionset"
-)
-
-const testCh = 0x01
-
-// ------------------------------------------------
-
-func CreateRoutableAddr() (addr string, netAddr *NetAddress) {
- for {
- id := ed25519.GenPrivKey().PubKey().Address().ID()
- var err error
- addr = fmt.Sprintf("%s@%v.%v.%v.%v:26656", id, random.RandInt()%256, random.RandInt()%256, random.RandInt()%256, random.RandInt()%256)
- netAddr, err = NewNetAddressFromString(addr)
- if err != nil {
- panic(err)
- }
- if netAddr.Routable() {
- break
- }
- }
- return
-}
-
-// ------------------------------------------------------------------
-// Connects switches via arbitrary net.Conn. Used for testing.
-
-const TEST_HOST = "localhost"
-
-// MakeConnectedSwitches returns n switches, connected according to the connect func.
-// If connect==Connect2Switches, the switches will be fully connected.
-// initSwitch defines how the i'th switch should be initialized (ie. with what reactors).
-// NOTE: panics if any switch fails to start.
-func MakeConnectedSwitches(cfg *config.P2PConfig, n int, initSwitch func(int, *Switch) *Switch, connect func([]*Switch, int, int)) []*Switch {
- switches := make([]*Switch, n)
- for i := 0; i < n; i++ {
- switches[i] = MakeSwitch(cfg, i, TEST_HOST, "123.123.123", initSwitch)
- }
-
- if err := StartSwitches(switches); err != nil {
- panic(err)
- }
-
- for i := 0; i < n; i++ {
- for j := i + 1; j < n; j++ {
- connect(switches, i, j)
- }
- }
-
- return switches
-}
-
-// Connect2Switches will connect switches i and j via net.Pipe().
-// Blocks until a connection is established.
-// NOTE: caller ensures i and j are within bounds.
-func Connect2Switches(switches []*Switch, i, j int) {
- switchI := switches[i]
- switchJ := switches[j]
-
- c1, c2 := conn.NetPipe()
-
- doneCh := make(chan struct{})
- go func() {
- err := switchI.addPeerWithConnection(c1)
- if err != nil {
- panic(err)
- }
- doneCh <- struct{}{}
- }()
- go func() {
- err := switchJ.addPeerWithConnection(c2)
- if err != nil {
- panic(err)
- }
- doneCh <- struct{}{}
- }()
- <-doneCh
- <-doneCh
-}
-
-func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
- pc, err := testInboundPeerConn(conn, sw.config, sw.nodeKey.PrivKey)
- if err != nil {
- if err := conn.Close(); err != nil {
- sw.Logger.Error("Error closing connection", "err", err)
- }
- return err
- }
-
- ni, err := handshake(conn, time.Second, sw.nodeInfo)
- if err != nil {
- if err := conn.Close(); err != nil {
- sw.Logger.Error("Error closing connection", "err", err)
- }
- return err
- }
-
- p := newPeer(
- pc,
- MConnConfig(sw.config),
- ni,
- sw.reactorsByCh,
- sw.chDescs,
- sw.StopPeerForError,
- )
-
- if err = sw.addPeer(p); err != nil {
- pc.CloseConn()
- return err
- }
-
- return nil
-}
-
-// StartSwitches calls sw.Start() for each given switch.
-// It returns the first encountered error.
-func StartSwitches(switches []*Switch) error {
- for _, s := range switches {
- err := s.Start() // start switch and reactors
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func MakeSwitch(
- cfg *config.P2PConfig,
- i int,
- network, version string,
- initSwitch func(int, *Switch) *Switch,
- opts ...SwitchOption,
-) *Switch {
- nodeKey := NodeKey{
- PrivKey: ed25519.GenPrivKey(),
- }
- nodeInfo := testNodeInfo(nodeKey.ID(), fmt.Sprintf("node%d", i))
-
- t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg))
-
- if err := t.Listen(*nodeInfo.NetAddress); err != nil {
- panic(err)
- }
-
- // TODO: let the config be passed in?
- sw := initSwitch(i, NewSwitch(cfg, t, opts...))
- sw.SetLogger(log.NewNoopLogger().With("switch", i))
- sw.SetNodeKey(&nodeKey)
-
- for ch := range sw.reactorsByCh {
- nodeInfo.Channels = append(nodeInfo.Channels, ch)
- }
-
- // TODO: We need to setup reactors ahead of time so the NodeInfo is properly
- // populated and we don't have to do those awkward overrides and setters.
- t.nodeInfo = nodeInfo
- sw.SetNodeInfo(nodeInfo)
-
- return sw
-}
-
-func testInboundPeerConn(
- conn net.Conn,
- config *config.P2PConfig,
- ourNodePrivKey crypto.PrivKey,
-) (peerConn, error) {
- return testPeerConn(conn, config, false, false, ourNodePrivKey, nil)
-}
-
-func testPeerConn(
- rawConn net.Conn,
- cfg *config.P2PConfig,
- outbound, persistent bool,
- ourNodePrivKey crypto.PrivKey,
- socketAddr *NetAddress,
-) (pc peerConn, err error) {
- conn := rawConn
-
- // Fuzz connection
- if cfg.TestFuzz {
- // so we have time to do peer handshakes and get set up
- conn = FuzzConnAfterFromConfig(conn, 10*time.Second, cfg.TestFuzzConfig)
- }
-
- // Encrypt connection
- conn, err = upgradeSecretConn(conn, cfg.HandshakeTimeout, ourNodePrivKey)
- if err != nil {
- return pc, errors.Wrap(err, "Error creating peer")
- }
-
- // Only the information we already have
- return newPeerConn(outbound, persistent, conn, socketAddr), nil
-}
-
-// ----------------------------------------------------------------
-// rand node info
-
-func testNodeInfo(id ID, name string) NodeInfo {
- return testNodeInfoWithNetwork(id, name, "testing")
-}
-
-func testVersionSet() versionset.VersionSet {
- return versionset.VersionSet{
- versionset.VersionInfo{
- Name: "p2p",
- Version: "v0.0.0", // dontcare
- },
- }
-}
-
-func testNodeInfoWithNetwork(id ID, name, network string) NodeInfo {
- return NodeInfo{
- VersionSet: testVersionSet(),
- NetAddress: NewNetAddressFromIPPort(id, net.ParseIP("127.0.0.1"), 0),
- Network: network,
- Software: "p2ptest",
- Version: "v1.2.3-rc.0-deadbeef",
- Channels: []byte{testCh},
- Moniker: name,
- Other: NodeInfoOther{
- TxIndex: "on",
- RPCAddress: fmt.Sprintf("127.0.0.1:%d", 0),
- },
- }
-}
diff --git a/tm2/pkg/p2p/transport.go b/tm2/pkg/p2p/transport.go
index 5bfae9e52b8..9edef9a15e5 100644
--- a/tm2/pkg/p2p/transport.go
+++ b/tm2/pkg/p2p/transport.go
@@ -3,144 +3,64 @@ package p2p
import (
"context"
"fmt"
+ "io"
+ "log/slog"
"net"
- "strconv"
+ "sync"
"time"
"github.com/gnolang/gno/tm2/pkg/amino"
"github.com/gnolang/gno/tm2/pkg/crypto"
"github.com/gnolang/gno/tm2/pkg/errors"
"github.com/gnolang/gno/tm2/pkg/p2p/conn"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+ "golang.org/x/sync/errgroup"
)
-const (
- defaultDialTimeout = time.Second
- defaultFilterTimeout = 5 * time.Second
- defaultHandshakeTimeout = 3 * time.Second
-)
-
-// IPResolver is a behaviour subset of net.Resolver.
-type IPResolver interface {
- LookupIPAddr(context.Context, string) ([]net.IPAddr, error)
-}
-
-// accept is the container to carry the upgraded connection and NodeInfo from an
-// asynchronously running routine to the Accept method.
-type accept struct {
- netAddr *NetAddress
- conn net.Conn
- nodeInfo NodeInfo
- err error
-}
-
-// peerConfig is used to bundle data we need to fully setup a Peer with an
-// MConn, provided by the caller of Accept and Dial (currently the Switch). This
-// a temporary measure until reactor setup is less dynamic and we introduce the
-// concept of PeerBehaviour to communicate about significant Peer lifecycle
-// events.
-// TODO(xla): Refactor out with more static Reactor setup and PeerBehaviour.
-type peerConfig struct {
- chDescs []*conn.ChannelDescriptor
- onPeerError func(Peer, interface{})
- outbound bool
- // isPersistent allows you to set a function, which, given socket address
- // (for outbound peers) OR self-reported address (for inbound peers), tells
- // if the peer is persistent or not.
- isPersistent func(*NetAddress) bool
- reactorsByCh map[byte]Reactor
-}
-
-// Transport emits and connects to Peers. The implementation of Peer is left to
-// the transport. Each transport is also responsible to filter establishing
-// peers specific to its domain.
-type Transport interface {
- // Listening address.
- NetAddress() NetAddress
-
- // Accept returns a newly connected Peer.
- Accept(peerConfig) (Peer, error)
-
- // Dial connects to the Peer for the address.
- Dial(NetAddress, peerConfig) (Peer, error)
-
- // Cleanup any resources associated with Peer.
- Cleanup(Peer)
-}
-
-// TransportLifecycle bundles the methods for callers to control start and stop
-// behaviour.
-type TransportLifecycle interface {
- Close() error
- Listen(NetAddress) error
-}
-
-// ConnFilterFunc to be implemented by filter hooks after a new connection has
-// been established. The set of existing connections is passed along together
-// with all resolved IPs for the new connection.
-type ConnFilterFunc func(ConnSet, net.Conn, []net.IP) error
-
-// ConnDuplicateIPFilter resolves and keeps all ips for an incoming connection
-// and refuses new ones if they come from a known ip.
-func ConnDuplicateIPFilter() ConnFilterFunc {
- return func(cs ConnSet, c net.Conn, ips []net.IP) error {
- for _, ip := range ips {
- if cs.HasIP(ip) {
- return RejectedError{
- conn: c,
- err: fmt.Errorf("IP<%v> already connected", ip),
- isDuplicate: true,
- }
- }
- }
+// defaultHandshakeTimeout is the timeout for the STS handshaking protocol
+const defaultHandshakeTimeout = 3 * time.Second
- return nil
- }
-}
+var (
+ errTransportClosed = errors.New("transport is closed")
+ errTransportInactive = errors.New("transport is inactive")
+ errDuplicateConnection = errors.New("duplicate peer connection")
+ errPeerIDNodeInfoMismatch = errors.New("connection ID does not match node info ID")
+ errPeerIDDialMismatch = errors.New("connection ID does not match dialed ID")
+ errIncompatibleNodeInfo = errors.New("incompatible node info")
+)
-// MultiplexTransportOption sets an optional parameter on the
-// MultiplexTransport.
-type MultiplexTransportOption func(*MultiplexTransport)
+type connUpgradeFn func(io.ReadWriteCloser, crypto.PrivKey) (*conn.SecretConnection, error)
-// MultiplexTransportConnFilters sets the filters for rejection new connections.
-func MultiplexTransportConnFilters(
- filters ...ConnFilterFunc,
-) MultiplexTransportOption {
- return func(mt *MultiplexTransport) { mt.connFilters = filters }
-}
+type secretConn interface {
+ net.Conn
-// MultiplexTransportFilterTimeout sets the timeout waited for filter calls to
-// return.
-func MultiplexTransportFilterTimeout(
- timeout time.Duration,
-) MultiplexTransportOption {
- return func(mt *MultiplexTransport) { mt.filterTimeout = timeout }
+ RemotePubKey() crypto.PubKey
}
-// MultiplexTransportResolver sets the Resolver used for ip lookups, defaults to
-// net.DefaultResolver.
-func MultiplexTransportResolver(resolver IPResolver) MultiplexTransportOption {
- return func(mt *MultiplexTransport) { mt.resolver = resolver }
+// peerInfo is a wrapper for an unverified peer connection
+type peerInfo struct {
+ addr *types.NetAddress // the dial address of the peer
+ conn net.Conn // the connection associated with the peer
+ nodeInfo types.NodeInfo // the relevant peer node info
}
// MultiplexTransport accepts and dials tcp connections and upgrades them to
// multiplexed peers.
type MultiplexTransport struct {
- netAddr NetAddress
- listener net.Listener
+ ctx context.Context
+ cancelFn context.CancelFunc
- acceptc chan accept
- closec chan struct{}
+ logger *slog.Logger
- // Lookup table for duplicate ip and id checks.
- conns ConnSet
- connFilters []ConnFilterFunc
+ netAddr types.NetAddress // the node's P2P dial address, used for handshaking
+ nodeInfo types.NodeInfo // the node's P2P info, used for handshaking
+ nodeKey types.NodeKey // the node's private P2P key, used for handshaking
- dialTimeout time.Duration
- filterTimeout time.Duration
- handshakeTimeout time.Duration
- nodeInfo NodeInfo
- nodeKey NodeKey
- resolver IPResolver
+ listener net.Listener // listener for inbound peer connections
+ peerCh chan peerInfo // pipe for inbound peer connections
+ activeConns sync.Map // active peer connections (remote address -> nothing)
+
+ connUpgradeFn connUpgradeFn // Upgrades the connection to a secret connection
// TODO(xla): This config is still needed as we parameterize peerConn and
// peer currently. All relevant configuration should be refactored into options
@@ -148,439 +68,376 @@ type MultiplexTransport struct {
mConfig conn.MConnConfig
}
-// Test multiplexTransport for interface completeness.
-var (
- _ Transport = (*MultiplexTransport)(nil)
- _ TransportLifecycle = (*MultiplexTransport)(nil)
-)
-
// NewMultiplexTransport returns a tcp connected multiplexed peer.
func NewMultiplexTransport(
- nodeInfo NodeInfo,
- nodeKey NodeKey,
+ nodeInfo types.NodeInfo,
+ nodeKey types.NodeKey,
mConfig conn.MConnConfig,
+ logger *slog.Logger,
) *MultiplexTransport {
return &MultiplexTransport{
- acceptc: make(chan accept),
- closec: make(chan struct{}),
- dialTimeout: defaultDialTimeout,
- filterTimeout: defaultFilterTimeout,
- handshakeTimeout: defaultHandshakeTimeout,
- mConfig: mConfig,
- nodeInfo: nodeInfo,
- nodeKey: nodeKey,
- conns: NewConnSet(),
- resolver: net.DefaultResolver,
+ peerCh: make(chan peerInfo, 1),
+ mConfig: mConfig,
+ nodeInfo: nodeInfo,
+ nodeKey: nodeKey,
+ logger: logger,
+ connUpgradeFn: conn.MakeSecretConnection,
}
}
-// NetAddress implements Transport.
-func (mt *MultiplexTransport) NetAddress() NetAddress {
+// NetAddress returns the transport's listen address (for p2p connections)
+func (mt *MultiplexTransport) NetAddress() types.NetAddress {
return mt.netAddr
}
-// Accept implements Transport.
-func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) {
+// Accept waits for a verified inbound Peer to connect, and returns it [BLOCKING]
+func (mt *MultiplexTransport) Accept(ctx context.Context, behavior PeerBehavior) (PeerConn, error) {
+ // Sanity check, no need to wait
+ // on an inactive transport
+ if mt.listener == nil {
+ return nil, errTransportInactive
+ }
+
select {
- // This case should never have any side-effectful/blocking operations to
- // ensure that quality peers are ready to be used.
- case a := <-mt.acceptc:
- if a.err != nil {
- return nil, a.err
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case info, ok := <-mt.peerCh:
+ if !ok {
+ return nil, errTransportClosed
}
- cfg.outbound = false
-
- return mt.wrapPeer(a.conn, a.nodeInfo, cfg, a.netAddr), nil
- case <-mt.closec:
- return nil, TransportClosedError{}
+ return mt.newMultiplexPeer(info, behavior, false)
}
}
-// Dial implements Transport.
+// Dial creates an outbound Peer connection, and
+// verifies it (performs handshaking) [BLOCKING]
func (mt *MultiplexTransport) Dial(
- addr NetAddress,
- cfg peerConfig,
-) (Peer, error) {
- c, err := addr.DialTimeout(mt.dialTimeout)
+ ctx context.Context,
+ addr types.NetAddress,
+ behavior PeerBehavior,
+) (PeerConn, error) {
+ // Set a dial timeout for the connection
+ c, err := addr.DialContext(ctx)
if err != nil {
return nil, err
}
- // TODO(xla): Evaluate if we should apply filters if we explicitly dial.
- if err := mt.filterConn(c); err != nil {
- return nil, err
- }
-
- secretConn, nodeInfo, err := mt.upgrade(c, &addr)
+ // Process the connection with expected ID
+ info, err := mt.processConn(c, addr.ID)
if err != nil {
- return nil, err
- }
-
- cfg.outbound = true
+ // Close the net peer connection
+ _ = c.Close()
- p := mt.wrapPeer(secretConn, nodeInfo, cfg, &addr)
+ return nil, fmt.Errorf("unable to process connection, %w", err)
+ }
- return p, nil
+ return mt.newMultiplexPeer(info, behavior, true)
}
-// Close implements TransportLifecycle.
+// Close stops the multiplex transport
func (mt *MultiplexTransport) Close() error {
- close(mt.closec)
-
- if mt.listener != nil {
- return mt.listener.Close()
+ if mt.listener == nil {
+ return nil
}
- return nil
+ mt.cancelFn()
+
+ return mt.listener.Close()
}
-// Listen implements TransportLifecycle.
-func (mt *MultiplexTransport) Listen(addr NetAddress) error {
+// Listen starts an active process of listening for incoming connections [NON-BLOCKING]
+func (mt *MultiplexTransport) Listen(addr types.NetAddress) error {
+ // Reserve a port, and start listening
ln, err := net.Listen("tcp", addr.DialString())
if err != nil {
- return err
+ return fmt.Errorf("unable to listen on address, %w", err)
}
if addr.Port == 0 {
// net.Listen on port 0 means the kernel will auto-allocate a port
// - find out which one has been given to us.
- _, p, err := net.SplitHostPort(ln.Addr().String())
- if err != nil {
+ tcpAddr, ok := ln.Addr().(*net.TCPAddr)
+ if !ok {
return fmt.Errorf("error finding port (after listening on port 0): %w", err)
}
- pInt, _ := strconv.Atoi(p)
- addr.Port = uint16(pInt)
+
+ addr.Port = uint16(tcpAddr.Port)
}
+ // Set up the context
+ mt.ctx, mt.cancelFn = context.WithCancel(context.Background())
+
mt.netAddr = addr
mt.listener = ln
- go mt.acceptPeers()
+ // Run the routine for accepting
+ // incoming peer connections
+ go mt.runAcceptLoop()
return nil
}
-func (mt *MultiplexTransport) acceptPeers() {
+// runAcceptLoop runs the loop where incoming peers are:
+//
+// 1. accepted by the transport
+// 2. filtered
+// 3. upgraded (handshaked + verified)
+func (mt *MultiplexTransport) runAcceptLoop() {
+ var wg sync.WaitGroup
+
+ defer func() {
+ wg.Wait() // Wait for all process routines
+
+ close(mt.peerCh)
+ }()
+
for {
- c, err := mt.listener.Accept()
- if err != nil {
- // If Close() has been called, silently exit.
- select {
- case _, ok := <-mt.closec:
- if !ok {
- return
- }
- default:
- // Transport is not closed
- }
+ select {
+ case <-mt.ctx.Done():
+ mt.logger.Debug("transport accept context closed")
- mt.acceptc <- accept{err: err}
return
- }
+ default:
+ // Accept an incoming peer connection
+ c, err := mt.listener.Accept()
+ if err != nil {
+ mt.logger.Error(
+ "unable to accept p2p connection",
+ "err", err,
+ )
- // Connection upgrade and filtering should be asynchronous to avoid
- // Head-of-line blocking[0].
- // Reference: https://github.com/tendermint/classic/issues/2047
- //
- // [0] https://en.wikipedia.org/wiki/Head-of-line_blocking
- go func(c net.Conn) {
- defer func() {
- if r := recover(); r != nil {
- err := RejectedError{
- conn: c,
- err: errors.New("recovered from panic: %v", r),
- isAuthFailure: true,
- }
- select {
- case mt.acceptc <- accept{err: err}:
- case <-mt.closec:
- // Give up if the transport was closed.
- _ = c.Close()
- return
- }
- }
- }()
-
- var (
- nodeInfo NodeInfo
- secretConn *conn.SecretConnection
- netAddr *NetAddress
- )
-
- err := mt.filterConn(c)
- if err == nil {
- secretConn, nodeInfo, err = mt.upgrade(c, nil)
- if err == nil {
- addr := c.RemoteAddr()
- id := secretConn.RemotePubKey().Address().ID()
- netAddr = NewNetAddress(id, addr)
- }
+ continue
}
- select {
- case mt.acceptc <- accept{netAddr, secretConn, nodeInfo, err}:
- // Make the upgraded peer available.
- case <-mt.closec:
- // Give up if the transport was closed.
- _ = c.Close()
- return
- }
- }(c)
- }
-}
+ // Process the new connection asynchronously
+ wg.Add(1)
-// Cleanup removes the given address from the connections set and
-// closes the connection.
-func (mt *MultiplexTransport) Cleanup(p Peer) {
- mt.conns.RemoveAddr(p.RemoteAddr())
- _ = p.CloseConn()
-}
+ go func(c net.Conn) {
+ defer wg.Done()
-func (mt *MultiplexTransport) cleanup(c net.Conn) error {
- mt.conns.Remove(c)
+ info, err := mt.processConn(c, "")
+ if err != nil {
+ mt.logger.Error(
+ "unable to process p2p connection",
+ "err", err,
+ )
- return c.Close()
-}
+ // Close the connection
+ _ = c.Close()
-func (mt *MultiplexTransport) filterConn(c net.Conn) (err error) {
- defer func() {
- if err != nil {
- _ = c.Close()
- }
- }()
+ return
+ }
- // Reject if connection is already present.
- if mt.conns.Has(c) {
- return RejectedError{conn: c, isDuplicate: true}
+ select {
+ case mt.peerCh <- info:
+ case <-mt.ctx.Done():
+ // Give up if the transport was closed.
+ _ = c.Close()
+ }
+ }(c)
+ }
}
+}
- // Resolve ips for incoming conn.
- ips, err := resolveIPs(mt.resolver, c)
- if err != nil {
- return err
+// processConn handles the raw connection by upgrading it and verifying it
+func (mt *MultiplexTransport) processConn(c net.Conn, expectedID types.ID) (peerInfo, error) {
+ dialAddr := c.RemoteAddr().String()
+
+ // Check if the connection is a duplicate one
+ if _, exists := mt.activeConns.LoadOrStore(dialAddr, struct{}{}); exists {
+ return peerInfo{}, errDuplicateConnection
}
- errc := make(chan error, len(mt.connFilters))
+ // Handshake with the peer, through STS
+ secretConn, nodeInfo, err := mt.upgradeAndVerifyConn(c)
+ if err != nil {
+ mt.activeConns.Delete(dialAddr)
- for _, f := range mt.connFilters {
- go func(f ConnFilterFunc, c net.Conn, ips []net.IP, errc chan<- error) {
- errc <- f(mt.conns, c, ips)
- }(f, c, ips, errc)
+ return peerInfo{}, fmt.Errorf("unable to upgrade connection, %w", err)
}
- for i := 0; i < cap(errc); i++ {
- select {
- case err := <-errc:
- if err != nil {
- return RejectedError{conn: c, err: err, isFiltered: true}
- }
- case <-time.After(mt.filterTimeout):
- return FilterTimeoutError{}
- }
+ // Grab the connection ID.
+ // At this point, the connection and information shared
+ // with the peer is considered valid, since full handshaking
+ // and verification took place
+ id := secretConn.RemotePubKey().Address().ID()
+
+ // The reason the dial ID needs to be verified is because
+ // for outbound peers (peers the node dials), there is an expected peer ID
+ // when initializing the outbound connection, that can differ from the exchanged one.
+ // For inbound peers, the ID is whatever the peer exchanges during the
+ // handshaking process, and is verified separately
+ if !expectedID.IsZero() && id.String() != expectedID.String() {
+ mt.activeConns.Delete(dialAddr)
+
+ return peerInfo{}, fmt.Errorf(
+ "%w (expected %q got %q)",
+ errPeerIDDialMismatch,
+ expectedID,
+ id,
+ )
}
- mt.conns.Set(c, ips)
+ netAddr, _ := types.NewNetAddress(id, c.RemoteAddr())
- return nil
+ return peerInfo{
+ addr: netAddr,
+ conn: secretConn,
+ nodeInfo: nodeInfo,
+ }, nil
}
-func (mt *MultiplexTransport) upgrade(
- c net.Conn,
- dialedAddr *NetAddress,
-) (secretConn *conn.SecretConnection, nodeInfo NodeInfo, err error) {
- defer func() {
- if err != nil {
- _ = mt.cleanup(c)
- }
- }()
+// Remove removes the peer resources from the transport
+func (mt *MultiplexTransport) Remove(p PeerConn) {
+ mt.activeConns.Delete(p.RemoteAddr().String())
+}
- secretConn, err = upgradeSecretConn(c, mt.handshakeTimeout, mt.nodeKey.PrivKey)
+// upgradeAndVerifyConn upgrades the connections (performs the handshaking process)
+// and verifies that the connecting peer is valid
+func (mt *MultiplexTransport) upgradeAndVerifyConn(c net.Conn) (secretConn, types.NodeInfo, error) {
+ // Upgrade to a secret connection.
+ // A secret connection is a connection that has passed
+ // an initial handshaking process, as defined by the STS
+ // protocol, and is considered to be secure and authentic
+ sc, err := mt.upgradeToSecretConn(
+ c,
+ defaultHandshakeTimeout,
+ mt.nodeKey.PrivKey,
+ )
if err != nil {
- return nil, NodeInfo{}, RejectedError{
- conn: c,
- err: fmt.Errorf("secret conn failed: %w", err),
- isAuthFailure: true,
- }
+ return nil, types.NodeInfo{}, fmt.Errorf("unable to upgrade p2p connection, %w", err)
}
- // For outgoing conns, ensure connection key matches dialed key.
- connID := secretConn.RemotePubKey().Address().ID()
- if dialedAddr != nil {
- if dialedID := dialedAddr.ID; connID.String() != dialedID.String() {
- return nil, NodeInfo{}, RejectedError{
- conn: c,
- id: connID,
- err: fmt.Errorf(
- "conn.ID (%v) dialed ID (%v) mismatch",
- connID,
- dialedID,
- ),
- isAuthFailure: true,
- }
- }
- }
-
- nodeInfo, err = handshake(secretConn, mt.handshakeTimeout, mt.nodeInfo)
+ // Exchange node information
+ nodeInfo, err := exchangeNodeInfo(sc, defaultHandshakeTimeout, mt.nodeInfo)
if err != nil {
- return nil, NodeInfo{}, RejectedError{
- conn: c,
- err: fmt.Errorf("handshake failed: %w", err),
- isAuthFailure: true,
- }
+ return nil, types.NodeInfo{}, fmt.Errorf("unable to exchange node information, %w", err)
}
- if err := nodeInfo.Validate(); err != nil {
- return nil, NodeInfo{}, RejectedError{
- conn: c,
- err: err,
- isNodeInfoInvalid: true,
- }
- }
+ // Ensure the connection ID matches the node's reported ID
+ connID := sc.RemotePubKey().Address().ID()
- // Ensure connection key matches self reported key.
if connID != nodeInfo.ID() {
- return nil, NodeInfo{}, RejectedError{
- conn: c,
- id: connID,
- err: fmt.Errorf(
- "conn.ID (%v) NodeInfo.ID (%v) mismatch",
- connID,
- nodeInfo.ID(),
- ),
- isAuthFailure: true,
- }
- }
-
- // Reject self.
- if mt.nodeInfo.ID() == nodeInfo.ID() {
- return nil, NodeInfo{}, RejectedError{
- addr: *NewNetAddress(nodeInfo.ID(), c.RemoteAddr()),
- conn: c,
- id: nodeInfo.ID(),
- isSelf: true,
- }
+ return nil, types.NodeInfo{}, fmt.Errorf(
+ "%w (expected %q got %q)",
+ errPeerIDNodeInfoMismatch,
+ connID.String(),
+ nodeInfo.ID().String(),
+ )
}
- if err := mt.nodeInfo.CompatibleWith(nodeInfo); err != nil {
- return nil, NodeInfo{}, RejectedError{
- conn: c,
- err: err,
- id: nodeInfo.ID(),
- isIncompatible: true,
- }
+ // Check compatibility with the node
+ if err = mt.nodeInfo.CompatibleWith(nodeInfo); err != nil {
+ return nil, types.NodeInfo{}, fmt.Errorf("%w, %w", errIncompatibleNodeInfo, err)
}
- return secretConn, nodeInfo, nil
+ return sc, nodeInfo, nil
}
-func (mt *MultiplexTransport) wrapPeer(
- c net.Conn,
- ni NodeInfo,
- cfg peerConfig,
- socketAddr *NetAddress,
-) Peer {
- persistent := false
- if cfg.isPersistent != nil {
- if cfg.outbound {
- persistent = cfg.isPersistent(socketAddr)
- } else {
- selfReportedAddr := ni.NetAddress
- persistent = cfg.isPersistent(selfReportedAddr)
- }
+// newMultiplexPeer creates a new multiplex Peer, using
+// the provided Peer behavior and info
+func (mt *MultiplexTransport) newMultiplexPeer(
+ info peerInfo,
+ behavior PeerBehavior,
+ isOutbound bool,
+) (PeerConn, error) {
+ // Extract the host
+ host, _, err := net.SplitHostPort(info.conn.RemoteAddr().String())
+ if err != nil {
+ return nil, fmt.Errorf("unable to extract peer host, %w", err)
}
- peerConn := newPeerConn(
- cfg.outbound,
- persistent,
- c,
- socketAddr,
- )
+ // Look up the IPs
+ ips, err := net.LookupIP(host)
+ if err != nil {
+ return nil, fmt.Errorf("unable to lookup peer IPs, %w", err)
+ }
- p := newPeer(
- peerConn,
- mt.mConfig,
- ni,
- cfg.reactorsByCh,
- cfg.chDescs,
- cfg.onPeerError,
- )
+ // Wrap the info related to the connection
+ peerConn := &ConnInfo{
+ Outbound: isOutbound,
+ Persistent: behavior.IsPersistentPeer(info.addr.ID),
+ Private: behavior.IsPrivatePeer(info.nodeInfo.ID()),
+ Conn: info.conn,
+ RemoteIP: ips[0], // IPv4
+ SocketAddr: info.addr,
+ }
+
+ // Create the info related to the multiplex connection
+ mConfig := &ConnConfig{
+ MConfig: mt.mConfig,
+ ReactorsByCh: behavior.Reactors(),
+ ChDescs: behavior.ReactorChDescriptors(),
+ OnPeerError: behavior.HandlePeerError,
+ }
- return p
+ return newPeer(peerConn, info.nodeInfo, mConfig), nil
}
-func handshake(
- c net.Conn,
+// exchangeNodeInfo performs a data swap, where node
+// info is exchanged between the current node and a peer async
+func exchangeNodeInfo(
+ c secretConn,
timeout time.Duration,
- nodeInfo NodeInfo,
-) (NodeInfo, error) {
+ nodeInfo types.NodeInfo,
+) (types.NodeInfo, error) {
if err := c.SetDeadline(time.Now().Add(timeout)); err != nil {
- return NodeInfo{}, err
+ return types.NodeInfo{}, err
}
var (
- errc = make(chan error, 2)
-
- peerNodeInfo NodeInfo
+ peerNodeInfo types.NodeInfo
ourNodeInfo = nodeInfo
)
- go func(errc chan<- error, c net.Conn) {
+ g, _ := errgroup.WithContext(context.Background())
+
+ g.Go(func() error {
_, err := amino.MarshalSizedWriter(c, ourNodeInfo)
- errc <- err
- }(errc, c)
- go func(errc chan<- error, c net.Conn) {
+
+ return err
+ })
+
+ g.Go(func() error {
_, err := amino.UnmarshalSizedReader(
c,
&peerNodeInfo,
- int64(MaxNodeInfoSize()),
+ types.MaxNodeInfoSize,
)
- errc <- err
- }(errc, c)
- for i := 0; i < cap(errc); i++ {
- err := <-errc
- if err != nil {
- return NodeInfo{}, err
- }
+ return err
+ })
+
+ if err := g.Wait(); err != nil {
+ return types.NodeInfo{}, err
+ }
+
+ // Validate the received node information
+ if err := nodeInfo.Validate(); err != nil {
+ return types.NodeInfo{}, fmt.Errorf("unable to validate node info, %w", err)
}
return peerNodeInfo, c.SetDeadline(time.Time{})
}
-func upgradeSecretConn(
+// upgradeToSecretConn takes an active TCP connection,
+// and upgrades it to a verified, handshaked connection through
+// the STS protocol
+func (mt *MultiplexTransport) upgradeToSecretConn(
c net.Conn,
timeout time.Duration,
privKey crypto.PrivKey,
-) (*conn.SecretConnection, error) {
+) (secretConn, error) {
if err := c.SetDeadline(time.Now().Add(timeout)); err != nil {
return nil, err
}
- sc, err := conn.MakeSecretConnection(c, privKey)
+ // Handshake (STS)
+ sc, err := mt.connUpgradeFn(c, privKey)
if err != nil {
return nil, err
}
return sc, sc.SetDeadline(time.Time{})
}
-
-func resolveIPs(resolver IPResolver, c net.Conn) ([]net.IP, error) {
- host, _, err := net.SplitHostPort(c.RemoteAddr().String())
- if err != nil {
- return nil, err
- }
-
- addrs, err := resolver.LookupIPAddr(context.Background(), host)
- if err != nil {
- return nil, err
- }
-
- ips := []net.IP{}
-
- for _, addr := range addrs {
- ips = append(ips, addr.IP)
- }
-
- return ips, nil
-}
diff --git a/tm2/pkg/p2p/transport_test.go b/tm2/pkg/p2p/transport_test.go
index 63b1c26e666..3eb3264ec2b 100644
--- a/tm2/pkg/p2p/transport_test.go
+++ b/tm2/pkg/p2p/transport_test.go
@@ -1,650 +1,519 @@
package p2p
import (
+ "context"
"fmt"
- "math/rand"
"net"
- "reflect"
"testing"
"time"
- "github.com/gnolang/gno/tm2/pkg/amino"
- "github.com/gnolang/gno/tm2/pkg/crypto/ed25519"
+ "github.com/gnolang/gno/tm2/pkg/log"
"github.com/gnolang/gno/tm2/pkg/p2p/conn"
- "github.com/gnolang/gno/tm2/pkg/testutils"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+ "github.com/gnolang/gno/tm2/pkg/versionset"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
-var defaultNodeName = "host_peer"
-
-func emptyNodeInfo() NodeInfo {
- return NodeInfo{}
-}
-
-// newMultiplexTransport returns a tcp connected multiplexed peer
-// using the default MConnConfig. It's a convenience function used
-// for testing.
-func newMultiplexTransport(
- nodeInfo NodeInfo,
- nodeKey NodeKey,
-) *MultiplexTransport {
- return NewMultiplexTransport(
- nodeInfo, nodeKey, conn.DefaultMConnConfig(),
- )
-}
-
-func TestTransportMultiplexConnFilter(t *testing.T) {
- t.Parallel()
-
- mt := newMultiplexTransport(
- emptyNodeInfo(),
- NodeKey{
- PrivKey: ed25519.GenPrivKey(),
- },
- )
- id := mt.nodeKey.ID()
-
- MultiplexTransportConnFilters(
- func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil },
- func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil },
- func(_ ConnSet, _ net.Conn, _ []net.IP) error {
- return fmt.Errorf("rejected")
- },
- )(mt)
-
- addr, err := NewNetAddressFromString(NetAddressString(id, "127.0.0.1:0"))
- if err != nil {
- t.Fatal(err)
- }
-
- if err := mt.Listen(*addr); err != nil {
- t.Fatal(err)
- }
+// generateNetAddr generates dummy net addresses
+func generateNetAddr(t *testing.T, count int) []*types.NetAddress {
+ t.Helper()
- errc := make(chan error)
+ addrs := make([]*types.NetAddress, 0, count)
- go func() {
- addr := NewNetAddress(id, mt.listener.Addr())
+ for i := 0; i < count; i++ {
+ key := types.GenerateNodeKey()
- _, err := addr.Dial()
- if err != nil {
- errc <- err
- return
- }
+ // Grab a random port
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ require.NoError(t, err)
- close(errc)
- }()
+ addr, err := types.NewNetAddress(key.ID(), ln.Addr())
+ require.NoError(t, err)
- if err := <-errc; err != nil {
- t.Errorf("connection failed: %v", err)
+ addrs = append(addrs, addr)
}
- _, err = mt.Accept(peerConfig{})
- if err, ok := err.(RejectedError); ok {
- if !err.IsFiltered() {
- t.Errorf("expected peer to be filtered")
- }
- } else {
- t.Errorf("expected RejectedError")
- }
+ return addrs
}
-func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
+func TestMultiplexTransport_NetAddress(t *testing.T) {
t.Parallel()
- mt := newMultiplexTransport(
- emptyNodeInfo(),
- NodeKey{
- PrivKey: ed25519.GenPrivKey(),
- },
- )
- id := mt.nodeKey.ID()
-
- MultiplexTransportFilterTimeout(5 * time.Millisecond)(mt)
- MultiplexTransportConnFilters(
- func(_ ConnSet, _ net.Conn, _ []net.IP) error {
- time.Sleep(100 * time.Millisecond)
- return nil
- },
- )(mt)
-
- addr, err := NewNetAddressFromString(NetAddressString(id, "127.0.0.1:0"))
- if err != nil {
- t.Fatal(err)
- }
-
- if err := mt.Listen(*addr); err != nil {
- t.Fatal(err)
- }
-
- errc := make(chan error)
+ t.Run("transport not active", func(t *testing.T) {
+ t.Parallel()
- go func() {
- addr := NewNetAddress(id, mt.listener.Addr())
-
- _, err := addr.Dial()
- if err != nil {
- errc <- err
- return
- }
-
- close(errc)
- }()
-
- if err := <-errc; err != nil {
- t.Errorf("connection failed: %v", err)
- }
-
- _, err = mt.Accept(peerConfig{})
- if _, ok := err.(FilterTimeoutError); !ok {
- t.Errorf("expected FilterTimeoutError")
- }
-}
-
-func TestTransportMultiplexAcceptMultiple(t *testing.T) {
- t.Parallel()
+ var (
+ ni = types.NodeInfo{}
+ nk = types.NodeKey{}
+ mCfg = conn.DefaultMConnConfig()
+ logger = log.NewNoopLogger()
+ )
- mt := testSetupMultiplexTransport(t)
- laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
+ transport := NewMultiplexTransport(ni, nk, mCfg, logger)
+ addr := transport.NetAddress()
- var (
- seed = rand.New(rand.NewSource(time.Now().UnixNano()))
- nDialers = seed.Intn(64) + 64
- errc = make(chan error, nDialers)
- )
+ assert.Error(t, addr.Validate())
+ })
- // Setup dialers.
- for i := 0; i < nDialers; i++ {
- go testDialer(*laddr, errc)
- }
+ t.Run("active transport on random port", func(t *testing.T) {
+ t.Parallel()
- // Catch connection errors.
- for i := 0; i < nDialers; i++ {
- if err := <-errc; err != nil {
- t.Fatal(err)
- }
- }
+ var (
+ ni = types.NodeInfo{}
+ nk = types.NodeKey{}
+ mCfg = conn.DefaultMConnConfig()
+ logger = log.NewNoopLogger()
+ addr = generateNetAddr(t, 1)[0]
+ )
- ps := []Peer{}
+ addr.Port = 0 // random port
- // Accept all peers.
- for i := 0; i < cap(errc); i++ {
- p, err := mt.Accept(peerConfig{})
- if err != nil {
- t.Fatal(err)
- }
+ transport := NewMultiplexTransport(ni, nk, mCfg, logger)
- if err := p.Start(); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, transport.Listen(*addr))
+ defer func() {
+ require.NoError(t, transport.Close())
+ }()
- ps = append(ps, p)
- }
+ netAddr := transport.NetAddress()
+ assert.False(t, netAddr.Equals(*addr))
+ assert.NoError(t, netAddr.Validate())
+ })
- if have, want := len(ps), cap(errc); have != want {
- t.Errorf("have %v, want %v", have, want)
- }
+ t.Run("active transport on specific port", func(t *testing.T) {
+ t.Parallel()
- // Stop all peers.
- for _, p := range ps {
- if err := p.Stop(); err != nil {
- t.Fatal(err)
- }
- }
+ var (
+ ni = types.NodeInfo{}
+ nk = types.NodeKey{}
+ mCfg = conn.DefaultMConnConfig()
+ logger = log.NewNoopLogger()
+ addr = generateNetAddr(t, 1)[0]
+ )
- if err := mt.Close(); err != nil {
- t.Errorf("close errored: %v", err)
- }
-}
+ addr.Port = 4123 // specific port
-func testDialer(dialAddr NetAddress, errc chan error) {
- var (
- pv = ed25519.GenPrivKey()
- dialer = newMultiplexTransport(
- testNodeInfo(pv.PubKey().Address().ID(), defaultNodeName),
- NodeKey{
- PrivKey: pv,
- },
- )
- )
+ transport := NewMultiplexTransport(ni, nk, mCfg, logger)
- _, err := dialer.Dial(dialAddr, peerConfig{})
- if err != nil {
- errc <- err
- return
- }
+ require.NoError(t, transport.Listen(*addr))
+ defer func() {
+ require.NoError(t, transport.Close())
+ }()
- // Signal that the connection was established.
- errc <- nil
+ netAddr := transport.NetAddress()
+ assert.True(t, netAddr.Equals(*addr))
+ assert.NoError(t, netAddr.Validate())
+ })
}
-func TestFlappyTransportMultiplexAcceptNonBlocking(t *testing.T) {
+func TestMultiplexTransport_Accept(t *testing.T) {
t.Parallel()
- testutils.FilterStability(t, testutils.Flappy)
+ t.Run("inactive transport", func(t *testing.T) {
+ t.Parallel()
- mt := testSetupMultiplexTransport(t)
-
- var (
- fastNodePV = ed25519.GenPrivKey()
- fastNodeInfo = testNodeInfo(fastNodePV.PubKey().Address().ID(), "fastnode")
- errc = make(chan error)
- fastc = make(chan struct{})
- slowc = make(chan struct{})
- )
-
- // Simulate slow Peer.
- go func() {
- addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
-
- c, err := addr.Dial()
- if err != nil {
- errc <- err
- return
- }
-
- close(slowc)
+ var (
+ ni = types.NodeInfo{}
+ nk = types.NodeKey{}
+ mCfg = conn.DefaultMConnConfig()
+ logger = log.NewNoopLogger()
+ )
- select {
- case <-fastc:
- // Fast peer connected.
- case <-time.After(100 * time.Millisecond):
- // We error if the fast peer didn't succeed.
- errc <- fmt.Errorf("Fast peer timed out")
- }
+ transport := NewMultiplexTransport(ni, nk, mCfg, logger)
- sc, err := upgradeSecretConn(c, 100*time.Millisecond, ed25519.GenPrivKey())
- if err != nil {
- errc <- err
- return
- }
+ p, err := transport.Accept(context.Background(), nil)
- _, err = handshake(sc, 100*time.Millisecond,
- testNodeInfo(
- ed25519.GenPrivKey().PubKey().Address().ID(),
- "slow_peer",
- ))
- if err != nil {
- errc <- err
- return
- }
- }()
+ assert.Nil(t, p)
+ assert.ErrorIs(
+ t,
+ err,
+ errTransportInactive,
+ )
+ })
- // Simulate fast Peer.
- go func() {
- <-slowc
+ t.Run("transport closed", func(t *testing.T) {
+ t.Parallel()
- dialer := newMultiplexTransport(
- fastNodeInfo,
- NodeKey{
- PrivKey: fastNodePV,
- },
+ var (
+ ni = types.NodeInfo{}
+ nk = types.NodeKey{}
+ mCfg = conn.DefaultMConnConfig()
+ logger = log.NewNoopLogger()
+ addr = generateNetAddr(t, 1)[0]
)
- addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
- _, err := dialer.Dial(*addr, peerConfig{})
- if err != nil {
- errc <- err
- return
- }
+ addr.Port = 0
- close(errc)
- close(fastc)
- }()
+ transport := NewMultiplexTransport(ni, nk, mCfg, logger)
- if err := <-errc; err != nil {
- t.Errorf("connection failed: %v", err)
- }
+ // Start the transport
+ require.NoError(t, transport.Listen(*addr))
- p, err := mt.Accept(peerConfig{})
- if err != nil {
- t.Fatal(err)
- }
+ // Stop the transport
+ require.NoError(t, transport.Close())
- if have, want := p.NodeInfo(), fastNodeInfo; !reflect.DeepEqual(have, want) {
- t.Errorf("have %v, want %v", have, want)
- }
-}
-
-func TestTransportMultiplexValidateNodeInfo(t *testing.T) {
- t.Parallel()
+ p, err := transport.Accept(context.Background(), nil)
- mt := testSetupMultiplexTransport(t)
+ assert.Nil(t, p)
+ assert.ErrorIs(
+ t,
+ err,
+ errTransportClosed,
+ )
+ })
- errc := make(chan error)
+ t.Run("context canceled", func(t *testing.T) {
+ t.Parallel()
- go func() {
var (
- pv = ed25519.GenPrivKey()
- dialer = newMultiplexTransport(
- testNodeInfo(pv.PubKey().Address().ID(), ""), // Should not be empty
- NodeKey{
- PrivKey: pv,
- },
- )
+ ni = types.NodeInfo{}
+ nk = types.NodeKey{}
+ mCfg = conn.DefaultMConnConfig()
+ logger = log.NewNoopLogger()
+ addr = generateNetAddr(t, 1)[0]
)
- addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
+ addr.Port = 0
- _, err := dialer.Dial(*addr, peerConfig{})
- if err != nil {
- errc <- err
- return
- }
+ transport := NewMultiplexTransport(ni, nk, mCfg, logger)
- close(errc)
- }()
+ // Start the transport
+ require.NoError(t, transport.Listen(*addr))
- if err := <-errc; err != nil {
- t.Errorf("connection failed: %v", err)
- }
+ ctx, cancelFn := context.WithCancel(context.Background())
+ cancelFn()
- _, err := mt.Accept(peerConfig{})
- if err, ok := err.(RejectedError); ok {
- if !err.IsNodeInfoInvalid() {
- t.Errorf("expected NodeInfo to be invalid")
- }
- } else {
- t.Errorf("expected RejectedError")
- }
-}
+ p, err := transport.Accept(ctx, nil)
-func TestTransportMultiplexRejectMismatchID(t *testing.T) {
- t.Parallel()
+ assert.Nil(t, p)
+ assert.ErrorIs(
+ t,
+ err,
+ context.Canceled,
+ )
+ })
- mt := testSetupMultiplexTransport(t)
+ t.Run("peer ID mismatch", func(t *testing.T) {
+ t.Parallel()
- errc := make(chan error)
+ var (
+ network = "dev"
+ mCfg = conn.DefaultMConnConfig()
+ logger = log.NewNoopLogger()
+ keys = []*types.NodeKey{
+ types.GenerateNodeKey(),
+ types.GenerateNodeKey(),
+ }
- go func() {
- dialer := newMultiplexTransport(
- testNodeInfo(
- ed25519.GenPrivKey().PubKey().Address().ID(), "dialer",
- ),
- NodeKey{
- PrivKey: ed25519.GenPrivKey(),
- },
+ peerBehavior = &reactorPeerBehavior{
+ chDescs: make([]*conn.ChannelDescriptor, 0),
+ reactorsByCh: make(map[byte]Reactor),
+ handlePeerErrFn: func(_ PeerConn, err error) {
+ require.NoError(t, err)
+ },
+ isPersistentPeerFn: func(_ types.ID) bool {
+ return false
+ },
+ isPrivatePeerFn: func(_ types.ID) bool {
+ return false
+ },
+ }
)
- addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
- _, err := dialer.Dial(*addr, peerConfig{})
- if err != nil {
- errc <- err
- return
- }
+ peers := make([]*MultiplexTransport, 0, len(keys))
- close(errc)
- }()
+ for index, key := range keys {
+ addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
+ require.NoError(t, err)
- if err := <-errc; err != nil {
- t.Errorf("connection failed: %v", err)
- }
+ id := key.ID()
- _, err := mt.Accept(peerConfig{})
- if err, ok := err.(RejectedError); ok {
- if !err.IsAuthFailure() {
- t.Errorf("expected auth failure")
- }
- } else {
- t.Errorf("expected RejectedError")
- }
-}
+ if index%1 == 0 {
+ // Hijack the key value
+ id = types.GenerateNodeKey().ID()
+ }
-func TestTransportMultiplexDialRejectWrongID(t *testing.T) {
- t.Parallel()
+ na, err := types.NewNetAddress(id, addr)
+ require.NoError(t, err)
- mt := testSetupMultiplexTransport(t)
+ ni := types.NodeInfo{
+ Network: network, // common network
+ PeerID: id,
+ Version: "v1.0.0-rc.0",
+ Moniker: fmt.Sprintf("node-%d", index),
+ VersionSet: make(versionset.VersionSet, 0), // compatible version set
+ Channels: []byte{42}, // common channel
+ }
- var (
- pv = ed25519.GenPrivKey()
- dialer = newMultiplexTransport(
- testNodeInfo(pv.PubKey().Address().ID(), ""), // Should not be empty
- NodeKey{
- PrivKey: pv,
- },
- )
- )
+ // Create a fresh transport
+ tr := NewMultiplexTransport(ni, *key, mCfg, logger)
- wrongID := ed25519.GenPrivKey().PubKey().Address().ID()
- addr := NewNetAddress(wrongID, mt.listener.Addr())
+ // Start the transport
+ require.NoError(t, tr.Listen(*na))
- _, err := dialer.Dial(*addr, peerConfig{})
- if err != nil {
- t.Logf("connection failed: %v", err)
- if err, ok := err.(RejectedError); ok {
- if !err.IsAuthFailure() {
- t.Errorf("expected auth failure")
- }
- } else {
- t.Errorf("expected RejectedError")
+ t.Cleanup(func() {
+ assert.NoError(t, tr.Close())
+ })
+
+ peers = append(
+ peers,
+ tr,
+ )
}
- }
-}
-func TestTransportMultiplexRejectIncompatible(t *testing.T) {
- t.Parallel()
+ // Make peer 1 --dial--> peer 2, and handshake.
+ // This "upgrade" should fail because the peer shared a different
+ // peer ID than what they actually used for the secret connection
+ ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancelFn()
- mt := testSetupMultiplexTransport(t)
+ p, err := peers[0].Dial(ctx, peers[1].netAddr, peerBehavior)
+ assert.ErrorIs(t, err, errPeerIDNodeInfoMismatch)
+ require.Nil(t, p)
+ })
- errc := make(chan error)
+ t.Run("incompatible peers", func(t *testing.T) {
+ t.Parallel()
- go func() {
var (
- pv = ed25519.GenPrivKey()
- dialer = newMultiplexTransport(
- testNodeInfoWithNetwork(pv.PubKey().Address().ID(), "dialer", "incompatible-network"),
- NodeKey{
- PrivKey: pv,
+ network = "dev"
+ mCfg = conn.DefaultMConnConfig()
+ logger = log.NewNoopLogger()
+ keys = []*types.NodeKey{
+ types.GenerateNodeKey(),
+ types.GenerateNodeKey(),
+ }
+
+ peerBehavior = &reactorPeerBehavior{
+ chDescs: make([]*conn.ChannelDescriptor, 0),
+ reactorsByCh: make(map[byte]Reactor),
+ handlePeerErrFn: func(_ PeerConn, err error) {
+ require.NoError(t, err)
},
- )
+ isPersistentPeerFn: func(_ types.ID) bool {
+ return false
+ },
+ isPrivatePeerFn: func(_ types.ID) bool {
+ return false
+ },
+ }
)
- addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
- _, err := dialer.Dial(*addr, peerConfig{})
- if err != nil {
- errc <- err
- return
- }
+ peers := make([]*MultiplexTransport, 0, len(keys))
- close(errc)
- }()
+ for index, key := range keys {
+ addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
+ require.NoError(t, err)
- _, err := mt.Accept(peerConfig{})
- if err, ok := err.(RejectedError); ok {
- if !err.IsIncompatible() {
- t.Errorf("expected to reject incompatible")
- }
- } else {
- t.Errorf("expected RejectedError")
- }
-}
+ id := key.ID()
-func TestTransportMultiplexRejectSelf(t *testing.T) {
- t.Parallel()
+ na, err := types.NewNetAddress(id, addr)
+ require.NoError(t, err)
- mt := testSetupMultiplexTransport(t)
+ chainID := network
- errc := make(chan error)
+ if index%2 == 0 {
+ chainID = "totally-random-network"
+ }
- go func() {
- addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
+ ni := types.NodeInfo{
+ Network: chainID,
+ PeerID: id,
+ Version: "v1.0.0-rc.0",
+ Moniker: fmt.Sprintf("node-%d", index),
+ VersionSet: make(versionset.VersionSet, 0), // compatible version set
+ Channels: []byte{42}, // common channel
+ }
- _, err := mt.Dial(*addr, peerConfig{})
- if err != nil {
- errc <- err
- return
- }
+ // Create a fresh transport
+ tr := NewMultiplexTransport(ni, *key, mCfg, logger)
- close(errc)
- }()
+ // Start the transport
+ require.NoError(t, tr.Listen(*na))
- if err := <-errc; err != nil {
- if err, ok := err.(RejectedError); ok {
- if !err.IsSelf() {
- t.Errorf("expected to reject self, got: %v", err)
- }
- } else {
- t.Errorf("expected RejectedError")
- }
- } else {
- t.Errorf("expected connection failure")
- }
+ t.Cleanup(func() {
+ assert.NoError(t, tr.Close())
+ })
- _, err := mt.Accept(peerConfig{})
- if err, ok := err.(RejectedError); ok {
- if !err.IsSelf() {
- t.Errorf("expected to reject self, got: %v", err)
+ peers = append(
+ peers,
+ tr,
+ )
}
- } else {
- t.Errorf("expected RejectedError")
- }
-}
-func TestTransportConnDuplicateIPFilter(t *testing.T) {
- t.Parallel()
+ // Make peer 1 --dial--> peer 2, and handshake.
+ // This "upgrade" should fail because the peer shared a different
+ // peer ID than what they actually used for the secret connection
+ ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancelFn()
- filter := ConnDuplicateIPFilter()
+ p, err := peers[0].Dial(ctx, peers[1].netAddr, peerBehavior)
+ assert.ErrorIs(t, err, errIncompatibleNodeInfo)
+ require.Nil(t, p)
+ })
- if err := filter(nil, &testTransportConn{}, nil); err != nil {
- t.Fatal(err)
- }
+ t.Run("dialed peer ID mismatch", func(t *testing.T) {
+ t.Parallel()
- var (
- c = &testTransportConn{}
- cs = NewConnSet()
- )
+ var (
+ network = "dev"
+ mCfg = conn.DefaultMConnConfig()
+ logger = log.NewNoopLogger()
+ keys = []*types.NodeKey{
+ types.GenerateNodeKey(),
+ types.GenerateNodeKey(),
+ }
- cs.Set(c, []net.IP{
- {10, 0, 10, 1},
- {10, 0, 10, 2},
- {10, 0, 10, 3},
- })
+ peerBehavior = &reactorPeerBehavior{
+ chDescs: make([]*conn.ChannelDescriptor, 0),
+ reactorsByCh: make(map[byte]Reactor),
+ handlePeerErrFn: func(_ PeerConn, err error) {
+ require.NoError(t, err)
+ },
+ isPersistentPeerFn: func(_ types.ID) bool {
+ return false
+ },
+ isPrivatePeerFn: func(_ types.ID) bool {
+ return false
+ },
+ }
+ )
- if err := filter(cs, c, []net.IP{
- {10, 0, 10, 2},
- }); err == nil {
- t.Errorf("expected Peer to be rejected as duplicate")
- }
-}
+ peers := make([]*MultiplexTransport, 0, len(keys))
-func TestTransportHandshake(t *testing.T) {
- t.Parallel()
+ for index, key := range keys {
+ addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
+ require.NoError(t, err)
- ln, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatal(err)
- }
+ na, err := types.NewNetAddress(key.ID(), addr)
+ require.NoError(t, err)
- var (
- peerPV = ed25519.GenPrivKey()
- peerNodeInfo = testNodeInfo(peerPV.PubKey().Address().ID(), defaultNodeName)
- )
+ ni := types.NodeInfo{
+ Network: network, // common network
+ PeerID: key.ID(),
+ Version: "v1.0.0-rc.0",
+ Moniker: fmt.Sprintf("node-%d", index),
+ VersionSet: make(versionset.VersionSet, 0), // compatible version set
+ Channels: []byte{42}, // common channel
+ }
- go func() {
- c, err := net.Dial(ln.Addr().Network(), ln.Addr().String())
- if err != nil {
- t.Error(err)
- return
- }
+ // Create a fresh transport
+ tr := NewMultiplexTransport(ni, *key, mCfg, logger)
- go func(c net.Conn) {
- _, err := amino.MarshalSizedWriter(c, peerNodeInfo)
- if err != nil {
- t.Error(err)
- }
- }(c)
- go func(c net.Conn) {
- var ni NodeInfo
-
- _, err := amino.UnmarshalSizedReader(
- c,
- &ni,
- int64(MaxNodeInfoSize()),
+ // Start the transport
+ require.NoError(t, tr.Listen(*na))
+
+ t.Cleanup(func() {
+ assert.NoError(t, tr.Close())
+ })
+
+ peers = append(
+ peers,
+ tr,
)
- if err != nil {
- t.Error(err)
- }
- }(c)
- }()
+ }
- c, err := ln.Accept()
- if err != nil {
- t.Fatal(err)
- }
+ // Make peer 1 --dial--> peer 2, and handshake
+ ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancelFn()
- ni, err := handshake(c, 100*time.Millisecond, emptyNodeInfo())
- if err != nil {
- t.Fatal(err)
- }
+ p, err := peers[0].Dial(
+ ctx,
+ types.NetAddress{
+ ID: types.GenerateNodeKey().ID(), // mismatched ID
+ IP: peers[1].netAddr.IP,
+ Port: peers[1].netAddr.Port,
+ },
+ peerBehavior,
+ )
+ assert.ErrorIs(t, err, errPeerIDDialMismatch)
+ assert.Nil(t, p)
+ })
- if have, want := ni, peerNodeInfo; !reflect.DeepEqual(have, want) {
- t.Errorf("have %v, want %v", have, want)
- }
-}
+ t.Run("valid peer accepted", func(t *testing.T) {
+ t.Parallel()
-// create listener
-func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport {
- t.Helper()
+ var (
+ network = "dev"
+ mCfg = conn.DefaultMConnConfig()
+ logger = log.NewNoopLogger()
+ keys = []*types.NodeKey{
+ types.GenerateNodeKey(),
+ types.GenerateNodeKey(),
+ }
- var (
- pv = ed25519.GenPrivKey()
- id = pv.PubKey().Address().ID()
- mt = newMultiplexTransport(
- testNodeInfo(
- id, "transport",
- ),
- NodeKey{
- PrivKey: pv,
- },
+ peerBehavior = &reactorPeerBehavior{
+ chDescs: make([]*conn.ChannelDescriptor, 0),
+ reactorsByCh: make(map[byte]Reactor),
+ handlePeerErrFn: func(_ PeerConn, err error) {
+ require.NoError(t, err)
+ },
+ isPersistentPeerFn: func(_ types.ID) bool {
+ return false
+ },
+ isPrivatePeerFn: func(_ types.ID) bool {
+ return false
+ },
+ }
)
- )
- addr, err := NewNetAddressFromString(NetAddressString(id, "127.0.0.1:0"))
- if err != nil {
- t.Fatal(err)
- }
+ peers := make([]*MultiplexTransport, 0, len(keys))
- if err := mt.Listen(*addr); err != nil {
- t.Fatal(err)
- }
+ for index, key := range keys {
+ addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
+ require.NoError(t, err)
- return mt
-}
+ na, err := types.NewNetAddress(key.ID(), addr)
+ require.NoError(t, err)
-type testTransportAddr struct{}
+ ni := types.NodeInfo{
+ Network: network, // common network
+ PeerID: key.ID(),
+ Version: "v1.0.0-rc.0",
+ Moniker: fmt.Sprintf("node-%d", index),
+ VersionSet: make(versionset.VersionSet, 0), // compatible version set
+ Channels: []byte{42}, // common channel
+ }
-func (a *testTransportAddr) Network() string { return "tcp" }
-func (a *testTransportAddr) String() string { return "test.local:1234" }
+ // Create a fresh transport
+ tr := NewMultiplexTransport(ni, *key, mCfg, logger)
-type testTransportConn struct{}
+ // Start the transport
+ require.NoError(t, tr.Listen(*na))
-func (c *testTransportConn) Close() error {
- return fmt.Errorf("Close() not implemented")
-}
+ t.Cleanup(func() {
+ assert.NoError(t, tr.Close())
+ })
-func (c *testTransportConn) LocalAddr() net.Addr {
- return &testTransportAddr{}
-}
+ peers = append(
+ peers,
+ tr,
+ )
+ }
-func (c *testTransportConn) RemoteAddr() net.Addr {
- return &testTransportAddr{}
-}
+ // Make peer 1 --dial--> peer 2, and handshake
+ ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancelFn()
-func (c *testTransportConn) Read(_ []byte) (int, error) {
- return -1, fmt.Errorf("Read() not implemented")
-}
+ p, err := peers[0].Dial(ctx, peers[1].netAddr, peerBehavior)
+ require.NoError(t, err)
+ require.NotNil(t, p)
-func (c *testTransportConn) SetDeadline(_ time.Time) error {
- return fmt.Errorf("SetDeadline() not implemented")
-}
+ // Make sure the new peer info is valid
+ assert.Equal(t, peers[1].netAddr.ID, p.ID())
-func (c *testTransportConn) SetReadDeadline(_ time.Time) error {
- return fmt.Errorf("SetReadDeadline() not implemented")
-}
+ assert.Equal(t, peers[1].nodeInfo.Channels, p.NodeInfo().Channels)
+ assert.Equal(t, peers[1].nodeInfo.Moniker, p.NodeInfo().Moniker)
+ assert.Equal(t, peers[1].nodeInfo.Network, p.NodeInfo().Network)
-func (c *testTransportConn) SetWriteDeadline(_ time.Time) error {
- return fmt.Errorf("SetWriteDeadline() not implemented")
-}
+ // Attempt to dial again, expect the dial to fail
+ // because the connection is already active
+ dialedPeer, err := peers[0].Dial(ctx, peers[1].netAddr, peerBehavior)
+ require.ErrorIs(t, err, errDuplicateConnection)
+ assert.Nil(t, dialedPeer)
-func (c *testTransportConn) Write(_ []byte) (int, error) {
- return -1, fmt.Errorf("Write() not implemented")
+ // Remove the peer
+ peers[0].Remove(p)
+ })
}
diff --git a/tm2/pkg/p2p/types.go b/tm2/pkg/p2p/types.go
index 150325f52bb..d206a5af662 100644
--- a/tm2/pkg/p2p/types.go
+++ b/tm2/pkg/p2p/types.go
@@ -1,10 +1,115 @@
package p2p
import (
+ "context"
+ "net"
+
"github.com/gnolang/gno/tm2/pkg/p2p/conn"
+ "github.com/gnolang/gno/tm2/pkg/p2p/events"
+ "github.com/gnolang/gno/tm2/pkg/p2p/types"
+ "github.com/gnolang/gno/tm2/pkg/service"
)
type (
ChannelDescriptor = conn.ChannelDescriptor
ConnectionStatus = conn.ConnectionStatus
)
+
+// PeerConn is a wrapper for a connected peer
+type PeerConn interface {
+ service.Service
+
+ FlushStop()
+
+ ID() types.ID // peer's cryptographic ID
+ RemoteIP() net.IP // remote IP of the connection
+ RemoteAddr() net.Addr // remote address of the connection
+
+ IsOutbound() bool // did we dial the peer
+ IsPersistent() bool // do we redial this peer when we disconnect
+ IsPrivate() bool // do we share the peer
+
+ CloseConn() error // close original connection
+
+ NodeInfo() types.NodeInfo // peer's info
+ Status() ConnectionStatus
+ SocketAddr() *types.NetAddress // actual address of the socket
+
+ Send(byte, []byte) bool
+ TrySend(byte, []byte) bool
+
+ Set(string, any)
+ Get(string) any
+}
+
+// PeerSet has a (immutable) subset of the methods of PeerSet.
+type PeerSet interface {
+ Add(peer PeerConn)
+ Remove(key types.ID) bool
+ Has(key types.ID) bool
+ Get(key types.ID) PeerConn
+ List() []PeerConn
+
+ NumInbound() uint64 // returns the number of connected inbound nodes
+ NumOutbound() uint64 // returns the number of connected outbound nodes
+}
+
+// Transport handles peer dialing and connection acceptance. Additionally,
+// it is also responsible for any custom connection mechanisms (like handshaking).
+// Peers returned by the transport are considered to be verified and sound
+type Transport interface {
+ // NetAddress returns the Transport's dial address
+ NetAddress() types.NetAddress
+
+ // Accept returns a newly connected inbound peer
+ Accept(context.Context, PeerBehavior) (PeerConn, error)
+
+ // Dial dials a peer, and returns it
+ Dial(context.Context, types.NetAddress, PeerBehavior) (PeerConn, error)
+
+ // Remove drops any resources associated
+ // with the PeerConn in the transport
+ Remove(PeerConn)
+}
+
+// Switch is the abstraction in the p2p module that handles
+// and manages peer connections thorough a Transport
+type Switch interface {
+ // Broadcast publishes data on the given channel, to all peers
+ Broadcast(chID byte, data []byte)
+
+ // Peers returns the latest peer set
+ Peers() PeerSet
+
+ // Subscribe subscribes to active switch events
+ Subscribe(filterFn events.EventFilter) (<-chan events.Event, func())
+
+ // StopPeerForError stops the peer with the given reason
+ StopPeerForError(peer PeerConn, err error)
+
+ // DialPeers marks the given peers as ready for async dialing
+ DialPeers(peerAddrs ...*types.NetAddress)
+}
+
+// PeerBehavior wraps the Reactor and MultiplexSwitch information a Transport would need when
+// dialing or accepting new Peer connections.
+// It is worth noting that the only reason why this information is required in the first place,
+// is because Peers expose an API through which different TM modules can interact with them.
+// In the future™, modules should not directly "Send" anything to Peers, but instead communicate through
+// other mediums, such as the P2P module
+type PeerBehavior interface {
+ // ReactorChDescriptors returns the Reactor channel descriptors
+ ReactorChDescriptors() []*conn.ChannelDescriptor
+
+ // Reactors returns the node's active p2p Reactors (modules)
+ Reactors() map[byte]Reactor
+
+ // HandlePeerError propagates a peer connection error for further processing
+ HandlePeerError(PeerConn, error)
+
+ // IsPersistentPeer returns a flag indicating if the given peer is persistent
+ IsPersistentPeer(types.ID) bool
+
+ // IsPrivatePeer returns a flag indicating if the given peer is private
+ IsPrivatePeer(types.ID) bool
+}
diff --git a/tm2/pkg/p2p/types/key.go b/tm2/pkg/p2p/types/key.go
new file mode 100644
index 00000000000..bc45de709d8
--- /dev/null
+++ b/tm2/pkg/p2p/types/key.go
@@ -0,0 +1,113 @@
+package types
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/gnolang/gno/tm2/pkg/amino"
+ "github.com/gnolang/gno/tm2/pkg/crypto"
+ "github.com/gnolang/gno/tm2/pkg/crypto/ed25519"
+ osm "github.com/gnolang/gno/tm2/pkg/os"
+)
+
+// ID represents the cryptographically unique Peer ID
+type ID = crypto.ID
+
+// NewIDFromStrings returns an array of ID's build using
+// the provided strings
+func NewIDFromStrings(idStrs []string) ([]ID, []error) {
+ var (
+ ids = make([]ID, 0, len(idStrs))
+ errs = make([]error, 0, len(idStrs))
+ )
+
+ for _, idStr := range idStrs {
+ id := ID(idStr)
+ if err := id.Validate(); err != nil {
+ errs = append(errs, err)
+
+ continue
+ }
+
+ ids = append(ids, id)
+ }
+
+ return ids, errs
+}
+
+// NodeKey is the persistent peer key.
+// It contains the nodes private key for authentication.
+// NOTE: keep in sync with gno.land/cmd/gnoland/secrets.go
+type NodeKey struct {
+ crypto.PrivKey `json:"priv_key"` // our priv key
+}
+
+// ID returns the bech32 representation
+// of the node's public p2p key, with
+// the bech32 prefix
+func (k NodeKey) ID() ID {
+ return k.PubKey().Address().ID()
+}
+
+// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath.
+// If the file does not exist, it generates and saves a new NodeKey.
+func LoadOrGenNodeKey(path string) (*NodeKey, error) {
+ // Check if the key exists
+ if osm.FileExists(path) {
+ // Load the node key
+ return LoadNodeKey(path)
+ }
+
+ // Key is not present on path,
+ // generate a fresh one
+ nodeKey := GenerateNodeKey()
+ if err := saveNodeKey(path, nodeKey); err != nil {
+ return nil, fmt.Errorf("unable to save node key, %w", err)
+ }
+
+ return nodeKey, nil
+}
+
+// LoadNodeKey loads the node key from the given path
+func LoadNodeKey(path string) (*NodeKey, error) {
+ // Load the key
+ jsonBytes, err := os.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read key, %w", err)
+ }
+
+ var nodeKey NodeKey
+
+ // Parse the key
+ if err = amino.UnmarshalJSON(jsonBytes, &nodeKey); err != nil {
+ return nil, fmt.Errorf("unable to JSON unmarshal node key, %w", err)
+ }
+
+ return &nodeKey, nil
+}
+
+// GenerateNodeKey generates a random
+// node P2P key, based on ed25519
+func GenerateNodeKey() *NodeKey {
+ privKey := ed25519.GenPrivKey()
+
+ return &NodeKey{
+ PrivKey: privKey,
+ }
+}
+
+// saveNodeKey saves the node key
+func saveNodeKey(path string, nodeKey *NodeKey) error {
+ // Get Amino JSON
+ marshalledData, err := amino.MarshalJSONIndent(nodeKey, "", "\t")
+ if err != nil {
+ return fmt.Errorf("unable to marshal node key into JSON, %w", err)
+ }
+
+ // Save the data to disk
+ if err := os.WriteFile(path, marshalledData, 0o644); err != nil {
+ return fmt.Errorf("unable to save node key to path, %w", err)
+ }
+
+ return nil
+}
diff --git a/tm2/pkg/p2p/types/key_test.go b/tm2/pkg/p2p/types/key_test.go
new file mode 100644
index 00000000000..5dc153b08c0
--- /dev/null
+++ b/tm2/pkg/p2p/types/key_test.go
@@ -0,0 +1,158 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// generateKeys generates random node p2p keys
+func generateKeys(t *testing.T, count int) []*NodeKey {
+ t.Helper()
+
+ keys := make([]*NodeKey, count)
+
+ for i := 0; i < count; i++ {
+ keys[i] = GenerateNodeKey()
+ }
+
+ return keys
+}
+
+func TestNodeKey_Generate(t *testing.T) {
+ t.Parallel()
+
+ keys := generateKeys(t, 10)
+
+ for _, key := range keys {
+ require.NotNil(t, key)
+ assert.NotNil(t, key.PrivKey)
+
+ // Make sure all keys are unique
+ for _, keyInner := range keys {
+ if key.ID() == keyInner.ID() {
+ continue
+ }
+
+ assert.False(t, key.Equals(keyInner))
+ }
+ }
+}
+
+func TestNodeKey_Load(t *testing.T) {
+ t.Parallel()
+
+ t.Run("non-existing key", func(t *testing.T) {
+ t.Parallel()
+
+ key, err := LoadNodeKey("definitely valid path")
+
+ require.Nil(t, key)
+ assert.ErrorIs(t, err, os.ErrNotExist)
+ })
+
+ t.Run("invalid key format", func(t *testing.T) {
+ t.Parallel()
+
+ // Generate a random path
+ path := fmt.Sprintf("%s/key.json", t.TempDir())
+
+ type random struct {
+ field string
+ }
+
+ data, err := json.Marshal(&random{
+ field: "random data",
+ })
+ require.NoError(t, err)
+
+ // Save the invalid data format
+ require.NoError(t, os.WriteFile(path, data, 0o644))
+
+ // Load the key, that's invalid
+ key, err := LoadNodeKey(path)
+
+ require.NoError(t, err)
+ assert.Nil(t, key.PrivKey)
+ })
+
+ t.Run("valid key loaded", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ path = fmt.Sprintf("%s/key.json", t.TempDir())
+ key = GenerateNodeKey()
+ )
+
+ // Save the key
+ require.NoError(t, saveNodeKey(path, key))
+
+ // Load the key, that's valid
+ loadedKey, err := LoadNodeKey(path)
+ require.NoError(t, err)
+
+ assert.True(t, key.PrivKey.Equals(loadedKey.PrivKey))
+ assert.Equal(t, key.ID(), loadedKey.ID())
+ })
+}
+
+func TestNodeKey_ID(t *testing.T) {
+ t.Parallel()
+
+ keys := generateKeys(t, 10)
+
+ for _, key := range keys {
+ // Make sure the ID is valid
+ id := key.ID()
+ require.NotNil(t, id)
+
+ assert.NoError(t, id.Validate())
+ }
+}
+
+func TestNodeKey_LoadOrGenNodeKey(t *testing.T) {
+ t.Parallel()
+
+ t.Run("existing key loaded", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ path = fmt.Sprintf("%s/key.json", t.TempDir())
+ key = GenerateNodeKey()
+ )
+
+ // Save the key
+ require.NoError(t, saveNodeKey(path, key))
+
+ loadedKey, err := LoadOrGenNodeKey(path)
+ require.NoError(t, err)
+
+ // Make sure the key was not generated
+ assert.True(t, key.PrivKey.Equals(loadedKey.PrivKey))
+ })
+
+ t.Run("fresh key generated", func(t *testing.T) {
+ t.Parallel()
+
+ path := fmt.Sprintf("%s/key.json", t.TempDir())
+
+ // Make sure there is no key at the path
+ _, err := os.Stat(path)
+ require.ErrorIs(t, err, os.ErrNotExist)
+
+ // Generate the fresh key
+ key, err := LoadOrGenNodeKey(path)
+ require.NoError(t, err)
+
+ // Load the saved key
+ loadedKey, err := LoadOrGenNodeKey(path)
+ require.NoError(t, err)
+
+ // Make sure the keys are the same
+ assert.True(t, key.PrivKey.Equals(loadedKey.PrivKey))
+ })
+}
diff --git a/tm2/pkg/p2p/netaddress.go b/tm2/pkg/p2p/types/netaddress.go
similarity index 52%
rename from tm2/pkg/p2p/netaddress.go
rename to tm2/pkg/p2p/types/netaddress.go
index 77f89b2a4b3..a43f90454ea 100644
--- a/tm2/pkg/p2p/netaddress.go
+++ b/tm2/pkg/p2p/types/netaddress.go
@@ -2,143 +2,156 @@
// Originally Copyright (c) 2013-2014 Conformal Systems LLC.
// https://github.com/conformal/btcd/blob/master/LICENSE
-package p2p
+package types
import (
- "flag"
+ "context"
"fmt"
"net"
"strconv"
"strings"
- "time"
"github.com/gnolang/gno/tm2/pkg/crypto"
"github.com/gnolang/gno/tm2/pkg/errors"
)
-type ID = crypto.ID
+const (
+ nilNetAddress = ""
+ badNetAddress = ""
+)
+
+var (
+ ErrInvalidTCPAddress = errors.New("invalid TCP address")
+ ErrUnsetIPAddress = errors.New("unset IP address")
+ ErrInvalidIP = errors.New("invalid IP address")
+ ErrUnspecifiedIP = errors.New("unspecified IP address")
+ ErrInvalidNetAddress = errors.New("invalid net address")
+ ErrEmptyHost = errors.New("empty host address")
+)
// NetAddress defines information about a peer on the network
-// including its Address, IP address, and port.
-// NOTE: NetAddress is not meant to be mutated due to memoization.
-// @amino2: immutable XXX
+// including its ID, IP address, and port
type NetAddress struct {
- ID ID `json:"id"` // authenticated identifier (TODO)
- IP net.IP `json:"ip"` // part of "addr"
- Port uint16 `json:"port"` // part of "addr"
-
- // TODO:
- // Name string `json:"name"` // optional DNS name
-
- // memoize .String()
- str string
+ ID ID `json:"id"` // unique peer identifier (public key address)
+ IP net.IP `json:"ip"` // the IP part of the dial address
+ Port uint16 `json:"port"` // the port part of the dial address
}
// NetAddressString returns id@addr. It strips the leading
// protocol from protocolHostPort if it exists.
func NetAddressString(id ID, protocolHostPort string) string {
- addr := removeProtocolIfDefined(protocolHostPort)
- return fmt.Sprintf("%s@%s", id, addr)
+ return fmt.Sprintf(
+ "%s@%s",
+ id,
+ removeProtocolIfDefined(protocolHostPort),
+ )
}
// NewNetAddress returns a new NetAddress using the provided TCP
-// address. When testing, other net.Addr (except TCP) will result in
-// using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will
-// panic. Panics if ID is invalid.
-// TODO: socks proxies?
-func NewNetAddress(id ID, addr net.Addr) *NetAddress {
+// address
+func NewNetAddress(id ID, addr net.Addr) (*NetAddress, error) {
+ // Make sure the address is valid
tcpAddr, ok := addr.(*net.TCPAddr)
if !ok {
- if flag.Lookup("test.v") == nil { // normal run
- panic(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr))
- } else { // in testing
- netAddr := NewNetAddressFromIPPort("", net.IP("0.0.0.0"), 0)
- netAddr.ID = id
- return netAddr
- }
+ return nil, ErrInvalidTCPAddress
}
+ // Validate the ID
if err := id.Validate(); err != nil {
- panic(fmt.Sprintf("Invalid ID %v: %v (addr: %v)", id, err, addr))
+ return nil, fmt.Errorf("unable to verify ID, %w", err)
}
- ip := tcpAddr.IP
- port := uint16(tcpAddr.Port)
- na := NewNetAddressFromIPPort("", ip, port)
+ na := NewNetAddressFromIPPort(
+ tcpAddr.IP,
+ uint16(tcpAddr.Port),
+ )
+
+ // Set the ID
na.ID = id
- return na
+
+ return na, nil
}
// NewNetAddressFromString returns a new NetAddress using the provided address in
// the form of "ID@IP:Port".
// Also resolves the host if host is not an IP.
-// Errors are of type ErrNetAddressXxx where Xxx is in (NoID, Invalid, Lookup)
func NewNetAddressFromString(idaddr string) (*NetAddress, error) {
- idaddr = removeProtocolIfDefined(idaddr)
- spl := strings.Split(idaddr, "@")
+ var (
+ prunedAddr = removeProtocolIfDefined(idaddr)
+ spl = strings.Split(prunedAddr, "@")
+ )
+
if len(spl) != 2 {
- return nil, NetAddressNoIDError{idaddr}
+ return nil, ErrInvalidNetAddress
}
- // get ID
- id := crypto.ID(spl[0])
+ var (
+ id = crypto.ID(spl[0])
+ addr = spl[1]
+ )
+
+ // Validate the ID
if err := id.Validate(); err != nil {
- return nil, NetAddressInvalidError{idaddr, err}
+ return nil, fmt.Errorf("unable to verify address ID, %w", err)
}
- addr := spl[1]
- // get host and port
+ // Extract the host and port
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
- return nil, NetAddressInvalidError{addr, err}
+ return nil, fmt.Errorf("unable to split host and port, %w", err)
}
- if len(host) == 0 {
- return nil, NetAddressInvalidError{
- addr,
- errors.New("host is empty"),
- }
+
+ if host == "" {
+ return nil, ErrEmptyHost
}
ip := net.ParseIP(host)
if ip == nil {
ips, err := net.LookupIP(host)
if err != nil {
- return nil, NetAddressLookupError{host, err}
+ return nil, fmt.Errorf("unable to look up IP, %w", err)
}
+
ip = ips[0]
}
port, err := strconv.ParseUint(portStr, 10, 16)
if err != nil {
- return nil, NetAddressInvalidError{portStr, err}
+ return nil, fmt.Errorf("unable to parse port %s, %w", portStr, err)
}
- na := NewNetAddressFromIPPort("", ip, uint16(port))
+ na := NewNetAddressFromIPPort(ip, uint16(port))
na.ID = id
+
return na, nil
}
// NewNetAddressFromStrings returns an array of NetAddress'es build using
// the provided strings.
func NewNetAddressFromStrings(idaddrs []string) ([]*NetAddress, []error) {
- netAddrs := make([]*NetAddress, 0)
- errs := make([]error, 0)
+ var (
+ netAddrs = make([]*NetAddress, 0, len(idaddrs))
+ errs = make([]error, 0, len(idaddrs))
+ )
+
for _, addr := range idaddrs {
netAddr, err := NewNetAddressFromString(addr)
if err != nil {
errs = append(errs, err)
- } else {
- netAddrs = append(netAddrs, netAddr)
+
+ continue
}
+
+ netAddrs = append(netAddrs, netAddr)
}
+
return netAddrs, errs
}
// NewNetAddressFromIPPort returns a new NetAddress using the provided IP
// and port number.
-func NewNetAddressFromIPPort(id ID, ip net.IP, port uint16) *NetAddress {
+func NewNetAddressFromIPPort(ip net.IP, port uint16) *NetAddress {
return &NetAddress{
- ID: id,
IP: ip,
Port: port,
}
@@ -146,88 +159,78 @@ func NewNetAddressFromIPPort(id ID, ip net.IP, port uint16) *NetAddress {
// Equals reports whether na and other are the same addresses,
// including their ID, IP, and Port.
-func (na *NetAddress) Equals(other interface{}) bool {
- if o, ok := other.(*NetAddress); ok {
- return na.String() == o.String()
- }
- return false
+func (na *NetAddress) Equals(other NetAddress) bool {
+ return na.String() == other.String()
}
// Same returns true is na has the same non-empty ID or DialString as other.
-func (na *NetAddress) Same(other interface{}) bool {
- if o, ok := other.(*NetAddress); ok {
- if na.DialString() == o.DialString() {
- return true
- }
- if na.ID != "" && na.ID == o.ID {
- return true
- }
- }
- return false
+func (na *NetAddress) Same(other NetAddress) bool {
+ var (
+ dialsSame = na.DialString() == other.DialString()
+ IDsSame = na.ID != "" && na.ID == other.ID
+ )
+
+ return dialsSame || IDsSame
}
// String representation: @:
func (na *NetAddress) String() string {
if na == nil {
- return ""
- }
- if na.str != "" {
- return na.str
+ return nilNetAddress
}
+
str, err := na.MarshalAmino()
if err != nil {
- return ""
+ return badNetAddress
}
+
return str
}
+// MarshalAmino stringifies a NetAddress.
// Needed because (a) IP doesn't encode, and (b) the intend of this type is to
// serialize to a string anyways.
func (na NetAddress) MarshalAmino() (string, error) {
- if na.str == "" {
- addrStr := na.DialString()
- if na.ID != "" {
- addrStr = NetAddressString(na.ID, addrStr)
- }
- na.str = addrStr
+ addrStr := na.DialString()
+
+ if na.ID != "" {
+ return NetAddressString(na.ID, addrStr), nil
}
- return na.str, nil
+
+ return addrStr, nil
}
-func (na *NetAddress) UnmarshalAmino(str string) (err error) {
- na2, err := NewNetAddressFromString(str)
+func (na *NetAddress) UnmarshalAmino(raw string) (err error) {
+ netAddress, err := NewNetAddressFromString(raw)
if err != nil {
return err
}
- *na = *na2
+
+ *na = *netAddress
+
return nil
}
func (na *NetAddress) DialString() string {
if na == nil {
- return ""
+ return nilNetAddress
}
+
return net.JoinHostPort(
na.IP.String(),
strconv.FormatUint(uint64(na.Port), 10),
)
}
-// Dial calls net.Dial on the address.
-func (na *NetAddress) Dial() (net.Conn, error) {
- conn, err := net.Dial("tcp", na.DialString())
- if err != nil {
- return nil, err
- }
- return conn, nil
-}
+// DialContext dials the given NetAddress with a context
+func (na *NetAddress) DialContext(ctx context.Context) (net.Conn, error) {
+ var d net.Dialer
-// DialTimeout calls net.DialTimeout on the address.
-func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) {
- conn, err := net.DialTimeout("tcp", na.DialString(), timeout)
+ conn, err := d.DialContext(ctx, "tcp", na.DialString())
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("unable to dial address, %w", err)
}
+
return conn, nil
}
@@ -236,47 +239,45 @@ func (na *NetAddress) Routable() bool {
if err := na.Validate(); err != nil {
return false
}
+
// TODO(oga) bitcoind doesn't include RFC3849 here, but should we?
- return !(na.RFC1918() || na.RFC3927() || na.RFC4862() ||
- na.RFC4193() || na.RFC4843() || na.Local())
+ return !(na.RFC1918() ||
+ na.RFC3927() ||
+ na.RFC4862() ||
+ na.RFC4193() ||
+ na.RFC4843() ||
+ na.Local())
}
-func (na *NetAddress) ValidateLocal() error {
+// Validate validates the NetAddress params
+func (na *NetAddress) Validate() error {
+ // Validate the ID
if err := na.ID.Validate(); err != nil {
- return err
+ return fmt.Errorf("unable to validate ID, %w", err)
}
+
+ // Make sure the IP is set
if na.IP == nil {
- return errors.New("no IP")
- }
- if len(na.IP) != 4 && len(na.IP) != 16 {
- return fmt.Errorf("invalid IP bytes: %v", len(na.IP))
- }
- if na.RFC3849() || na.IP.Equal(net.IPv4bcast) {
- return errors.New("invalid IP", na.IP.IsUnspecified())
+ return ErrUnsetIPAddress
}
- return nil
-}
-func (na *NetAddress) Validate() error {
- if err := na.ID.Validate(); err != nil {
- return err
- }
- if na.IP == nil {
- return errors.New("no IP")
+ // Make sure the IP is valid
+ ipLen := len(na.IP)
+ if ipLen != 4 && ipLen != 16 {
+ return ErrInvalidIP
}
- if len(na.IP) != 4 && len(na.IP) != 16 {
- return fmt.Errorf("invalid IP bytes: %v", len(na.IP))
+
+ // Check if the IP is unspecified
+ if na.IP.IsUnspecified() {
+ return ErrUnspecifiedIP
}
- if na.IP.IsUnspecified() || na.RFC3849() || na.IP.Equal(net.IPv4bcast) {
- return errors.New("invalid IP", na.IP.IsUnspecified())
+
+ // Check if the IP conforms to standards, or is a broadcast
+ if na.RFC3849() || na.IP.Equal(net.IPv4bcast) {
+ return ErrInvalidIP
}
- return nil
-}
-// HasID returns true if the address has an ID.
-// NOTE: It does not check whether the ID is valid or not.
-func (na *NetAddress) HasID() bool {
- return !na.ID.IsZero()
+ return nil
}
// Local returns true if it is a local address.
@@ -284,56 +285,6 @@ func (na *NetAddress) Local() bool {
return na.IP.IsLoopback() || zero4.Contains(na.IP)
}
-// ReachabilityTo checks whenever o can be reached from na.
-func (na *NetAddress) ReachabilityTo(o *NetAddress) int {
- const (
- Unreachable = 0
- Default = iota
- Teredo
- Ipv6_weak
- Ipv4
- Ipv6_strong
- )
- switch {
- case !na.Routable():
- return Unreachable
- case na.RFC4380():
- switch {
- case !o.Routable():
- return Default
- case o.RFC4380():
- return Teredo
- case o.IP.To4() != nil:
- return Ipv4
- default: // ipv6
- return Ipv6_weak
- }
- case na.IP.To4() != nil:
- if o.Routable() && o.IP.To4() != nil {
- return Ipv4
- }
- return Default
- default: /* ipv6 */
- var tunnelled bool
- // Is our v6 is tunnelled?
- if o.RFC3964() || o.RFC6052() || o.RFC6145() {
- tunnelled = true
- }
- switch {
- case !o.Routable():
- return Default
- case o.RFC4380():
- return Teredo
- case o.IP.To4() != nil:
- return Ipv4
- case tunnelled:
- // only prioritise ipv6 if we aren't tunnelling it.
- return Ipv6_weak
- }
- return Ipv6_strong
- }
-}
-
// RFC1918: IPv4 Private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12)
// RFC3849: IPv6 Documentation address (2001:0DB8::/32)
// RFC3927: IPv4 Autoconfig (169.254.0.0/16)
@@ -376,9 +327,12 @@ func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) }
func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) }
func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) }
+// removeProtocolIfDefined removes the protocol part of the given address
func removeProtocolIfDefined(addr string) string {
- if strings.Contains(addr, "://") {
- return strings.Split(addr, "://")[1]
+ if !strings.Contains(addr, "://") {
+ // No protocol part
+ return addr
}
- return addr
+
+ return strings.Split(addr, "://")[1]
}
diff --git a/tm2/pkg/p2p/types/netaddress_test.go b/tm2/pkg/p2p/types/netaddress_test.go
new file mode 100644
index 00000000000..1f8f0229b99
--- /dev/null
+++ b/tm2/pkg/p2p/types/netaddress_test.go
@@ -0,0 +1,323 @@
+package types
+
+import (
+ "fmt"
+ "net"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func BenchmarkNetAddress_String(b *testing.B) {
+ key := GenerateNodeKey()
+
+ na, err := NewNetAddressFromString(NetAddressString(key.ID(), "127.0.0.1:0"))
+ require.NoError(b, err)
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _ = na.String()
+ }
+}
+
+func TestNewNetAddress(t *testing.T) {
+ t.Parallel()
+
+ t.Run("invalid TCP address", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ key = GenerateNodeKey()
+ address = "127.0.0.1:8080"
+ )
+
+ udpAddr, err := net.ResolveUDPAddr("udp", address)
+ require.NoError(t, err)
+
+ _, err = NewNetAddress(key.ID(), udpAddr)
+ require.Error(t, err)
+ })
+
+ t.Run("invalid ID", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ id = "" // zero ID
+ address = "127.0.0.1:8080"
+ )
+
+ tcpAddr, err := net.ResolveTCPAddr("tcp", address)
+ require.NoError(t, err)
+
+ _, err = NewNetAddress(ID(id), tcpAddr)
+ require.Error(t, err)
+ })
+
+ t.Run("valid net address", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ key = GenerateNodeKey()
+ address = "127.0.0.1:8080"
+ )
+
+ tcpAddr, err := net.ResolveTCPAddr("tcp", address)
+ require.NoError(t, err)
+
+ addr, err := NewNetAddress(key.ID(), tcpAddr)
+ require.NoError(t, err)
+
+ assert.Equal(t, fmt.Sprintf("%s@%s", key.ID(), address), addr.String())
+ })
+}
+
+func TestNewNetAddressFromString(t *testing.T) {
+ t.Parallel()
+
+ t.Run("valid net address", func(t *testing.T) {
+ t.Parallel()
+
+ testTable := []struct {
+ name string
+ addr string
+ expected string
+ }{
+ {"no protocol", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080"},
+ {"tcp input", "tcp://g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080"},
+ {"udp input", "udp://g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080"},
+ {"no protocol", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080"},
+ {"tcp input", "tcp://g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080"},
+ {"udp input", "udp://g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080"},
+ {"correct nodeId w/tcp", "tcp://g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080", "g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080"},
+ }
+
+ for _, testCase := range testTable {
+ t.Run(testCase.name, func(t *testing.T) {
+ t.Parallel()
+
+ addr, err := NewNetAddressFromString(testCase.addr)
+ require.NoError(t, err)
+
+ assert.Equal(t, testCase.expected, addr.String())
+ })
+ }
+ })
+
+ t.Run("invalid net address", func(t *testing.T) {
+ t.Parallel()
+
+ testTable := []struct {
+ name string
+ addr string
+ }{
+ {"no node id and no protocol", "127.0.0.1:8080"},
+ {"no node id w/ tcp input", "tcp://127.0.0.1:8080"},
+ {"no node id w/ udp input", "udp://127.0.0.1:8080"},
+
+ {"malformed tcp input", "tcp//g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080"},
+ {"malformed udp input", "udp//g1m6kmam774klwlh4dhmhaatd7al02m0h0jwnyc6@127.0.0.1:8080"},
+
+ {"invalid host", "notahost"},
+ {"invalid port", "127.0.0.1:notapath"},
+ {"invalid host w/ port", "notahost:8080"},
+ {"just a port", "8082"},
+ {"non-existent port", "127.0.0:8080000"},
+
+ {"too short nodeId", "deadbeef@127.0.0.1:8080"},
+ {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080"},
+ {"not bech32 nodeId", "xxxm6kmam774klwlh4dhmhaatd7al02m0h0hdap9l@127.0.0.1:8080"},
+
+ {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080"},
+ {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080"},
+ {"not bech32 nodeId w/tcp", "tcp://xxxxm6kmam774klwlh4dhmhaatd7al02m0h0hdap9l@127.0.0.1:8080"},
+
+ {"no node id", "tcp://@127.0.0.1:8080"},
+ {"no node id or IP", "tcp://@"},
+ {"tcp no host, w/ port", "tcp://:26656"},
+ {"empty", ""},
+ {"node id delimiter 1", "@"},
+ {"node id delimiter 2", " @"},
+ {"node id delimiter 3", " @ "},
+ }
+
+ for _, testCase := range testTable {
+ t.Run(testCase.name, func(t *testing.T) {
+ t.Parallel()
+
+ addr, err := NewNetAddressFromString(testCase.addr)
+
+ assert.Nil(t, addr)
+ assert.Error(t, err)
+ })
+ }
+ })
+}
+
+func TestNewNetAddressFromStrings(t *testing.T) {
+ t.Parallel()
+
+ t.Run("invalid addresses", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ keys = generateKeys(t, 10)
+ strs = make([]string, 0, len(keys))
+ )
+
+ for index, key := range keys {
+ if index%2 != 0 {
+ strs = append(
+ strs,
+ fmt.Sprintf("%s@:8080", key.ID()), // missing host
+ )
+
+ continue
+ }
+
+ strs = append(
+ strs,
+ fmt.Sprintf("%s@127.0.0.1:8080", key.ID()),
+ )
+ }
+
+ // Convert the strings
+ addrs, errs := NewNetAddressFromStrings(strs)
+
+ assert.Len(t, errs, len(keys)/2)
+ assert.Equal(t, len(keys)/2, len(addrs))
+
+ for index, addr := range addrs {
+ assert.Contains(t, addr.String(), keys[index*2].ID())
+ }
+ })
+
+ t.Run("valid addresses", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ keys = generateKeys(t, 10)
+ strs = make([]string, 0, len(keys))
+ )
+
+ for _, key := range keys {
+ strs = append(
+ strs,
+ fmt.Sprintf("%s@127.0.0.1:8080", key.ID()),
+ )
+ }
+
+ // Convert the strings
+ addrs, errs := NewNetAddressFromStrings(strs)
+
+ assert.Len(t, errs, 0)
+ assert.Equal(t, len(keys), len(addrs))
+
+ for index, addr := range addrs {
+ assert.Contains(t, addr.String(), keys[index].ID())
+ }
+ })
+}
+
+func TestNewNetAddressFromIPPort(t *testing.T) {
+ t.Parallel()
+
+ var (
+ host = "127.0.0.1"
+ port = uint16(8080)
+ )
+
+ addr := NewNetAddressFromIPPort(net.ParseIP(host), port)
+
+ assert.Equal(
+ t,
+ fmt.Sprintf("%s:%d", host, port),
+ addr.String(),
+ )
+}
+
+func TestNetAddress_Local(t *testing.T) {
+ t.Parallel()
+
+ testTable := []struct {
+ name string
+ addr string
+ isLocal bool
+ }{
+ {
+ "local loopback",
+ "127.0.0.1:8080",
+ true,
+ },
+ {
+ "local loopback, zero",
+ "0.0.0.0:8080",
+ true,
+ },
+ {
+ "non-local address",
+ "200.100.200.100:8080",
+ false,
+ },
+ }
+
+ for _, testCase := range testTable {
+ t.Run(testCase.name, func(t *testing.T) {
+ t.Parallel()
+
+ key := GenerateNodeKey()
+
+ addr, err := NewNetAddressFromString(
+ fmt.Sprintf(
+ "%s@%s",
+ key.ID(),
+ testCase.addr,
+ ),
+ )
+ require.NoError(t, err)
+
+ assert.Equal(t, testCase.isLocal, addr.Local())
+ })
+ }
+}
+
+func TestNetAddress_Routable(t *testing.T) {
+ t.Parallel()
+
+ testTable := []struct {
+ name string
+ addr string
+ isRoutable bool
+ }{
+ {
+ "local loopback",
+ "127.0.0.1:8080",
+ false,
+ },
+ {
+ "routable address",
+ "gno.land:80",
+ true,
+ },
+ }
+
+ for _, testCase := range testTable {
+ t.Run(testCase.name, func(t *testing.T) {
+ t.Parallel()
+
+ key := GenerateNodeKey()
+
+ addr, err := NewNetAddressFromString(
+ fmt.Sprintf(
+ "%s@%s",
+ key.ID(),
+ testCase.addr,
+ ),
+ )
+ require.NoError(t, err)
+
+ assert.Equal(t, testCase.isRoutable, addr.Routable())
+ })
+ }
+}
diff --git a/tm2/pkg/p2p/types/node_info.go b/tm2/pkg/p2p/types/node_info.go
new file mode 100644
index 00000000000..8452cb43cb8
--- /dev/null
+++ b/tm2/pkg/p2p/types/node_info.go
@@ -0,0 +1,141 @@
+package types
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/gnolang/gno/tm2/pkg/strings"
+ "github.com/gnolang/gno/tm2/pkg/versionset"
+)
+
+const (
+ MaxNodeInfoSize = int64(10240) // 10KB
+ maxNumChannels = 16 // plenty of room for upgrades, for now
+)
+
+var (
+ ErrInvalidPeerID = errors.New("invalid peer ID")
+ ErrInvalidVersion = errors.New("invalid node version")
+ ErrInvalidMoniker = errors.New("invalid node moniker")
+ ErrInvalidRPCAddress = errors.New("invalid node RPC address")
+ ErrExcessiveChannels = errors.New("excessive node channels")
+ ErrDuplicateChannels = errors.New("duplicate node channels")
+ ErrIncompatibleNetworks = errors.New("incompatible networks")
+ ErrNoCommonChannels = errors.New("no common channels")
+)
+
+// NodeInfo is the basic node information exchanged
+// between two peers during the Tendermint P2P handshake.
+type NodeInfo struct {
+ // Set of protocol versions
+ VersionSet versionset.VersionSet `json:"version_set"`
+
+ // Unique peer identifier
+ PeerID ID `json:"id"`
+
+ // Check compatibility.
+ // Channels are HexBytes so easier to read as JSON
+ Network string `json:"network"` // network/chain ID
+ Software string `json:"software"` // name of immediate software
+ Version string `json:"version"` // software major.minor.revision
+ Channels []byte `json:"channels"` // channels this node knows about
+
+ // ASCIIText fields
+ Moniker string `json:"moniker"` // arbitrary moniker
+ Other NodeInfoOther `json:"other"` // other application specific data
+}
+
+// NodeInfoOther is the misc. application specific data
+type NodeInfoOther struct {
+ TxIndex string `json:"tx_index"`
+ RPCAddress string `json:"rpc_address"`
+}
+
+// Validate checks the self-reported NodeInfo is safe.
+// It returns an error if there
+// are too many Channels, if there are any duplicate Channels,
+// if the ListenAddr is malformed, or if the ListenAddr is a host name
+// that can not be resolved to some IP
+func (info NodeInfo) Validate() error {
+ // Validate the ID
+ if err := info.PeerID.Validate(); err != nil {
+ return fmt.Errorf("%w, %w", ErrInvalidPeerID, err)
+ }
+
+ // Validate Version
+ if len(info.Version) > 0 &&
+ (!strings.IsASCIIText(info.Version) ||
+ strings.ASCIITrim(info.Version) == "") {
+ return ErrInvalidVersion
+ }
+
+ // Validate Channels - ensure max and check for duplicates.
+ if len(info.Channels) > maxNumChannels {
+ return ErrExcessiveChannels
+ }
+
+ channelMap := make(map[byte]struct{}, len(info.Channels))
+ for _, ch := range info.Channels {
+ if _, ok := channelMap[ch]; ok {
+ return ErrDuplicateChannels
+ }
+
+ // Mark the channel as present
+ channelMap[ch] = struct{}{}
+ }
+
+ // Validate Moniker.
+ if !strings.IsASCIIText(info.Moniker) || strings.ASCIITrim(info.Moniker) == "" {
+ return ErrInvalidMoniker
+ }
+
+ // XXX: Should we be more strict about address formats?
+ rpcAddr := info.Other.RPCAddress
+ if len(rpcAddr) > 0 && (!strings.IsASCIIText(rpcAddr) || strings.ASCIITrim(rpcAddr) == "") {
+ return ErrInvalidRPCAddress
+ }
+
+ return nil
+}
+
+// ID returns the local node ID
+func (info NodeInfo) ID() ID {
+ return info.PeerID
+}
+
+// CompatibleWith checks if two NodeInfo are compatible with each other.
+// CONTRACT: two nodes are compatible if the Block version and networks match,
+// and they have at least one channel in common
+func (info NodeInfo) CompatibleWith(other NodeInfo) error {
+ // Validate the protocol versions
+ if _, err := info.VersionSet.CompatibleWith(other.VersionSet); err != nil {
+ return fmt.Errorf("incompatible version sets, %w", err)
+ }
+
+ // Make sure nodes are on the same network
+ if info.Network != other.Network {
+ return ErrIncompatibleNetworks
+ }
+
+ // Make sure there is at least 1 channel in common
+ commonFound := false
+ for _, ch1 := range info.Channels {
+ for _, ch2 := range other.Channels {
+ if ch1 == ch2 {
+ commonFound = true
+
+ break
+ }
+ }
+
+ if commonFound {
+ break
+ }
+ }
+
+ if !commonFound {
+ return ErrNoCommonChannels
+ }
+
+ return nil
+}
diff --git a/tm2/pkg/p2p/types/node_info_test.go b/tm2/pkg/p2p/types/node_info_test.go
new file mode 100644
index 00000000000..d03d77e608f
--- /dev/null
+++ b/tm2/pkg/p2p/types/node_info_test.go
@@ -0,0 +1,321 @@
+package types
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/gnolang/gno/tm2/pkg/versionset"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNodeInfo_Validate(t *testing.T) {
+ t.Parallel()
+
+ t.Run("invalid peer ID", func(t *testing.T) {
+ t.Parallel()
+
+ info := &NodeInfo{
+ PeerID: "", // zero
+ }
+
+ assert.ErrorIs(t, info.Validate(), ErrInvalidPeerID)
+ })
+
+ t.Run("invalid version", func(t *testing.T) {
+ t.Parallel()
+
+ testTable := []struct {
+ name string
+ version string
+ }{
+ {
+ "non-ascii version",
+ "¢§µ",
+ },
+ {
+ "empty tab version",
+ fmt.Sprintf("\t"),
+ },
+ {
+ "empty space version",
+ fmt.Sprintf(" "),
+ },
+ }
+
+ for _, testCase := range testTable {
+ t.Run(testCase.name, func(t *testing.T) {
+ t.Parallel()
+
+ info := &NodeInfo{
+ PeerID: GenerateNodeKey().ID(),
+ Version: testCase.version,
+ }
+
+ assert.ErrorIs(t, info.Validate(), ErrInvalidVersion)
+ })
+ }
+ })
+
+ t.Run("invalid moniker", func(t *testing.T) {
+ t.Parallel()
+
+ testTable := []struct {
+ name string
+ moniker string
+ }{
+ {
+ "empty moniker",
+ "",
+ },
+ {
+ "non-ascii moniker",
+ "¢§µ",
+ },
+ {
+ "empty tab moniker",
+ fmt.Sprintf("\t"),
+ },
+ {
+ "empty space moniker",
+ fmt.Sprintf(" "),
+ },
+ }
+
+ for _, testCase := range testTable {
+ t.Run(testCase.name, func(t *testing.T) {
+ t.Parallel()
+
+ info := &NodeInfo{
+ PeerID: GenerateNodeKey().ID(),
+ Moniker: testCase.moniker,
+ }
+
+ assert.ErrorIs(t, info.Validate(), ErrInvalidMoniker)
+ })
+ }
+ })
+
+ t.Run("invalid RPC Address", func(t *testing.T) {
+ t.Parallel()
+
+ testTable := []struct {
+ name string
+ rpcAddress string
+ }{
+ {
+ "non-ascii moniker",
+ "¢§µ",
+ },
+ {
+ "empty tab RPC address",
+ fmt.Sprintf("\t"),
+ },
+ {
+ "empty space RPC address",
+ fmt.Sprintf(" "),
+ },
+ }
+
+ for _, testCase := range testTable {
+ t.Run(testCase.name, func(t *testing.T) {
+ t.Parallel()
+
+ info := &NodeInfo{
+ PeerID: GenerateNodeKey().ID(),
+ Moniker: "valid moniker",
+ Other: NodeInfoOther{
+ RPCAddress: testCase.rpcAddress,
+ },
+ }
+
+ assert.ErrorIs(t, info.Validate(), ErrInvalidRPCAddress)
+ })
+ }
+ })
+
+ t.Run("invalid channels", func(t *testing.T) {
+ t.Parallel()
+
+ testTable := []struct {
+ name string
+ channels []byte
+ expectedErr error
+ }{
+ {
+ "too many channels",
+ make([]byte, maxNumChannels+1),
+ ErrExcessiveChannels,
+ },
+ {
+ "duplicate channels",
+ []byte{
+ byte(10),
+ byte(20),
+ byte(10),
+ },
+ ErrDuplicateChannels,
+ },
+ }
+
+ for _, testCase := range testTable {
+ t.Run(testCase.name, func(t *testing.T) {
+ t.Parallel()
+
+ info := &NodeInfo{
+ PeerID: GenerateNodeKey().ID(),
+ Moniker: "valid moniker",
+ Channels: testCase.channels,
+ }
+
+ assert.ErrorIs(t, info.Validate(), testCase.expectedErr)
+ })
+ }
+ })
+
+ t.Run("valid node info", func(t *testing.T) {
+ t.Parallel()
+
+ info := &NodeInfo{
+ PeerID: GenerateNodeKey().ID(),
+ Moniker: "valid moniker",
+ Channels: []byte{10, 20, 30},
+ Other: NodeInfoOther{
+ RPCAddress: "0.0.0.0:26657",
+ },
+ }
+
+ assert.NoError(t, info.Validate())
+ })
+}
+
+func TestNodeInfo_CompatibleWith(t *testing.T) {
+ t.Parallel()
+
+ t.Run("incompatible version sets", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ name = "Block"
+
+ infoOne = &NodeInfo{
+ VersionSet: []versionset.VersionInfo{
+ {
+ Name: name,
+ Version: "badversion",
+ },
+ },
+ }
+
+ infoTwo = &NodeInfo{
+ VersionSet: []versionset.VersionInfo{
+ {
+ Name: name,
+ Version: "v0.0.0",
+ },
+ },
+ }
+ )
+
+ assert.Error(t, infoTwo.CompatibleWith(*infoOne))
+ })
+
+ t.Run("incompatible networks", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ name = "Block"
+ version = "v0.0.0"
+
+ infoOne = &NodeInfo{
+ Network: "+wrong",
+ VersionSet: []versionset.VersionInfo{
+ {
+ Name: name,
+ Version: version,
+ },
+ },
+ }
+
+ infoTwo = &NodeInfo{
+ Network: "gno",
+ VersionSet: []versionset.VersionInfo{
+ {
+ Name: name,
+ Version: version,
+ },
+ },
+ }
+ )
+
+ assert.ErrorIs(t, infoTwo.CompatibleWith(*infoOne), ErrIncompatibleNetworks)
+ })
+
+ t.Run("no common channels", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ name = "Block"
+ version = "v0.0.0"
+ network = "gno"
+
+ infoOne = &NodeInfo{
+ Network: network,
+ VersionSet: []versionset.VersionInfo{
+ {
+ Name: name,
+ Version: version,
+ },
+ },
+ Channels: []byte{10},
+ }
+
+ infoTwo = &NodeInfo{
+ Network: network,
+ VersionSet: []versionset.VersionInfo{
+ {
+ Name: name,
+ Version: version,
+ },
+ },
+ Channels: []byte{20},
+ }
+ )
+
+ assert.ErrorIs(t, infoTwo.CompatibleWith(*infoOne), ErrNoCommonChannels)
+ })
+
+ t.Run("fully compatible node infos", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ name = "Block"
+ version = "v0.0.0"
+ network = "gno"
+ channels = []byte{10, 20, 30}
+
+ infoOne = &NodeInfo{
+ Network: network,
+ VersionSet: []versionset.VersionInfo{
+ {
+ Name: name,
+ Version: version,
+ },
+ },
+ Channels: channels,
+ }
+
+ infoTwo = &NodeInfo{
+ Network: network,
+ VersionSet: []versionset.VersionInfo{
+ {
+ Name: name,
+ Version: version,
+ },
+ },
+ Channels: channels[1:],
+ }
+ )
+
+ assert.NoError(t, infoTwo.CompatibleWith(*infoOne))
+ })
+}
diff --git a/tm2/pkg/p2p/upnp/probe.go b/tm2/pkg/p2p/upnp/probe.go
deleted file mode 100644
index 29480e7cecc..00000000000
--- a/tm2/pkg/p2p/upnp/probe.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package upnp
-
-import (
- "fmt"
- "log/slog"
- "net"
- "time"
-)
-
-type UPNPCapabilities struct {
- PortMapping bool
- Hairpin bool
-}
-
-func makeUPNPListener(intPort int, extPort int, logger *slog.Logger) (NAT, net.Listener, net.IP, error) {
- nat, err := Discover()
- if err != nil {
- return nil, nil, nil, fmt.Errorf("NAT upnp could not be discovered: %w", err)
- }
- logger.Info(fmt.Sprintf("ourIP: %v", nat.(*upnpNAT).ourIP))
-
- ext, err := nat.GetExternalAddress()
- if err != nil {
- return nat, nil, nil, fmt.Errorf("external address error: %w", err)
- }
- logger.Info(fmt.Sprintf("External address: %v", ext))
-
- port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0)
- if err != nil {
- return nat, nil, ext, fmt.Errorf("port mapping error: %w", err)
- }
- logger.Info(fmt.Sprintf("Port mapping mapped: %v", port))
-
- // also run the listener, open for all remote addresses.
- listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort))
- if err != nil {
- return nat, nil, ext, fmt.Errorf("error establishing listener: %w", err)
- }
- return nat, listener, ext, nil
-}
-
-func testHairpin(listener net.Listener, extAddr string, logger *slog.Logger) (supportsHairpin bool) {
- // Listener
- go func() {
- inConn, err := listener.Accept()
- if err != nil {
- logger.Info(fmt.Sprintf("Listener.Accept() error: %v", err))
- return
- }
- logger.Info(fmt.Sprintf("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr()))
- buf := make([]byte, 1024)
- n, err := inConn.Read(buf)
- if err != nil {
- logger.Info(fmt.Sprintf("Incoming connection read error: %v", err))
- return
- }
- logger.Info(fmt.Sprintf("Incoming connection read %v bytes: %X", n, buf))
- if string(buf) == "test data" {
- supportsHairpin = true
- return
- }
- }()
-
- // Establish outgoing
- outConn, err := net.Dial("tcp", extAddr)
- if err != nil {
- logger.Info(fmt.Sprintf("Outgoing connection dial error: %v", err))
- return
- }
-
- n, err := outConn.Write([]byte("test data"))
- if err != nil {
- logger.Info(fmt.Sprintf("Outgoing connection write error: %v", err))
- return
- }
- logger.Info(fmt.Sprintf("Outgoing connection wrote %v bytes", n))
-
- // Wait for data receipt
- time.Sleep(1 * time.Second)
- return supportsHairpin
-}
-
-func Probe(logger *slog.Logger) (caps UPNPCapabilities, err error) {
- logger.Info("Probing for UPnP!")
-
- intPort, extPort := 8001, 8001
-
- nat, listener, ext, err := makeUPNPListener(intPort, extPort, logger)
- if err != nil {
- return
- }
- caps.PortMapping = true
-
- // Deferred cleanup
- defer func() {
- if err := nat.DeletePortMapping("tcp", intPort, extPort); err != nil {
- logger.Error(fmt.Sprintf("Port mapping delete error: %v", err))
- }
- if err := listener.Close(); err != nil {
- logger.Error(fmt.Sprintf("Listener closing error: %v", err))
- }
- }()
-
- supportsHairpin := testHairpin(listener, fmt.Sprintf("%v:%v", ext, extPort), logger)
- if supportsHairpin {
- caps.Hairpin = true
- }
-
- return
-}
diff --git a/tm2/pkg/p2p/upnp/upnp.go b/tm2/pkg/p2p/upnp/upnp.go
deleted file mode 100644
index cd47ac35553..00000000000
--- a/tm2/pkg/p2p/upnp/upnp.go
+++ /dev/null
@@ -1,392 +0,0 @@
-// Taken from taipei-torrent.
-// Just enough UPnP to be able to forward ports
-// For more information, see: http://www.upnp-hacks.org/upnp.html
-package upnp
-
-// TODO: use syscalls to get actual ourIP, see issue #712
-
-import (
- "bytes"
- "encoding/xml"
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "strconv"
- "strings"
- "time"
-)
-
-type upnpNAT struct {
- serviceURL string
- ourIP string
- urnDomain string
-}
-
-// protocol is either "udp" or "tcp"
-type NAT interface {
- GetExternalAddress() (addr net.IP, err error)
- AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error)
- DeletePortMapping(protocol string, externalPort, internalPort int) (err error)
-}
-
-func Discover() (nat NAT, err error) {
- ssdp, err := net.ResolveUDPAddr("udp4", "239.255.255.250:1900")
- if err != nil {
- return
- }
- conn, err := net.ListenPacket("udp4", ":0")
- if err != nil {
- return
- }
- socket := conn.(*net.UDPConn)
- defer socket.Close() //nolint: errcheck
-
- if err := socket.SetDeadline(time.Now().Add(3 * time.Second)); err != nil {
- return nil, err
- }
-
- st := "InternetGatewayDevice:1"
-
- buf := bytes.NewBufferString(
- "M-SEARCH * HTTP/1.1\r\n" +
- "HOST: 239.255.255.250:1900\r\n" +
- "ST: ssdp:all\r\n" +
- "MAN: \"ssdp:discover\"\r\n" +
- "MX: 2\r\n\r\n")
- message := buf.Bytes()
- answerBytes := make([]byte, 1024)
- for i := 0; i < 3; i++ {
- _, err = socket.WriteToUDP(message, ssdp)
- if err != nil {
- return
- }
- var n int
- _, _, err = socket.ReadFromUDP(answerBytes)
- if err != nil {
- return
- }
- for {
- n, _, err = socket.ReadFromUDP(answerBytes)
- if err != nil {
- break
- }
- answer := string(answerBytes[0:n])
- if !strings.Contains(answer, st) {
- continue
- }
- // HTTP header field names are case-insensitive.
- // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
- locString := "\r\nlocation:"
- answer = strings.ToLower(answer)
- locIndex := strings.Index(answer, locString)
- if locIndex < 0 {
- continue
- }
- loc := answer[locIndex+len(locString):]
- endIndex := strings.Index(loc, "\r\n")
- if endIndex < 0 {
- continue
- }
- locURL := strings.TrimSpace(loc[0:endIndex])
- var serviceURL, urnDomain string
- serviceURL, urnDomain, err = getServiceURL(locURL)
- if err != nil {
- return
- }
- var ourIP net.IP
- ourIP, err = localIPv4()
- if err != nil {
- return
- }
- nat = &upnpNAT{serviceURL: serviceURL, ourIP: ourIP.String(), urnDomain: urnDomain}
- return
- }
- }
- err = errors.New("UPnP port discovery failed")
- return nat, err
-}
-
-type Envelope struct {
- XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"`
- Soap *SoapBody
-}
-
-type SoapBody struct {
- XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Body"`
- ExternalIP *ExternalIPAddressResponse
-}
-
-type ExternalIPAddressResponse struct {
- XMLName xml.Name `xml:"GetExternalIPAddressResponse"`
- IPAddress string `xml:"NewExternalIPAddress"`
-}
-
-type ExternalIPAddress struct {
- XMLName xml.Name `xml:"NewExternalIPAddress"`
- IP string
-}
-
-type UPNPService struct {
- ServiceType string `xml:"serviceType"`
- ControlURL string `xml:"controlURL"`
-}
-
-type DeviceList struct {
- Device []Device `xml:"device"`
-}
-
-type ServiceList struct {
- Service []UPNPService `xml:"service"`
-}
-
-type Device struct {
- XMLName xml.Name `xml:"device"`
- DeviceType string `xml:"deviceType"`
- DeviceList DeviceList `xml:"deviceList"`
- ServiceList ServiceList `xml:"serviceList"`
-}
-
-type Root struct {
- Device Device
-}
-
-func getChildDevice(d *Device, deviceType string) *Device {
- dl := d.DeviceList.Device
- for i := 0; i < len(dl); i++ {
- if strings.Contains(dl[i].DeviceType, deviceType) {
- return &dl[i]
- }
- }
- return nil
-}
-
-func getChildService(d *Device, serviceType string) *UPNPService {
- sl := d.ServiceList.Service
- for i := 0; i < len(sl); i++ {
- if strings.Contains(sl[i].ServiceType, serviceType) {
- return &sl[i]
- }
- }
- return nil
-}
-
-func localIPv4() (net.IP, error) {
- tt, err := net.Interfaces()
- if err != nil {
- return nil, err
- }
- for _, t := range tt {
- aa, err := t.Addrs()
- if err != nil {
- return nil, err
- }
- for _, a := range aa {
- ipnet, ok := a.(*net.IPNet)
- if !ok {
- continue
- }
- v4 := ipnet.IP.To4()
- if v4 == nil || v4[0] == 127 { // loopback address
- continue
- }
- return v4, nil
- }
- }
- return nil, errors.New("cannot find local IP address")
-}
-
-func getServiceURL(rootURL string) (url, urnDomain string, err error) {
- r, err := http.Get(rootURL) //nolint: gosec
- if err != nil {
- return
- }
- defer r.Body.Close() //nolint: errcheck
-
- if r.StatusCode >= 400 {
- err = errors.New(fmt.Sprint(r.StatusCode))
- return
- }
- var root Root
- err = xml.NewDecoder(r.Body).Decode(&root)
- if err != nil {
- return
- }
- a := &root.Device
- if !strings.Contains(a.DeviceType, "InternetGatewayDevice:1") {
- err = errors.New("no InternetGatewayDevice")
- return
- }
- b := getChildDevice(a, "WANDevice:1")
- if b == nil {
- err = errors.New("no WANDevice")
- return
- }
- c := getChildDevice(b, "WANConnectionDevice:1")
- if c == nil {
- err = errors.New("no WANConnectionDevice")
- return
- }
- d := getChildService(c, "WANIPConnection:1")
- if d == nil {
- // Some routers don't follow the UPnP spec, and put WanIPConnection under WanDevice,
- // instead of under WanConnectionDevice
- d = getChildService(b, "WANIPConnection:1")
-
- if d == nil {
- err = errors.New("no WANIPConnection")
- return
- }
- }
- // Extract the domain name, which isn't always 'schemas-upnp-org'
- urnDomain = strings.Split(d.ServiceType, ":")[1]
- url = combineURL(rootURL, d.ControlURL)
- return url, urnDomain, err
-}
-
-func combineURL(rootURL, subURL string) string {
- protocolEnd := "://"
- protoEndIndex := strings.Index(rootURL, protocolEnd)
- a := rootURL[protoEndIndex+len(protocolEnd):]
- rootIndex := strings.Index(a, "/")
- return rootURL[0:protoEndIndex+len(protocolEnd)+rootIndex] + subURL
-}
-
-func soapRequest(url, function, message, domain string) (r *http.Response, err error) {
- fullMessage := "" +
- "\r\n" +
- "" + message + " "
-
- req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage))
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"")
- req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3")
- // req.Header.Set("Transfer-Encoding", "chunked")
- req.Header.Set("SOAPAction", "\"urn:"+domain+":service:WANIPConnection:1#"+function+"\"")
- req.Header.Set("Connection", "Close")
- req.Header.Set("Cache-Control", "no-cache")
- req.Header.Set("Pragma", "no-cache")
-
- // log.Stderr("soapRequest ", req)
-
- r, err = http.DefaultClient.Do(req)
- if err != nil {
- return nil, err
- }
- /*if r.Body != nil {
- defer r.Body.Close()
- }*/
-
- if r.StatusCode >= 400 {
- // log.Stderr(function, r.StatusCode)
- err = errors.New("Error " + strconv.Itoa(r.StatusCode) + " for " + function)
- r = nil
- return
- }
- return r, err
-}
-
-type statusInfo struct {
- externalIPAddress string
-}
-
-func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) {
- message := "\r\n" +
- " "
-
- var response *http.Response
- response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain)
- if response != nil {
- defer response.Body.Close() //nolint: errcheck
- }
- if err != nil {
- return
- }
- var envelope Envelope
- data, err := io.ReadAll(response.Body)
- if err != nil {
- return
- }
- reader := bytes.NewReader(data)
- err = xml.NewDecoder(reader).Decode(&envelope)
- if err != nil {
- return
- }
-
- info = statusInfo{envelope.Soap.ExternalIP.IPAddress}
-
- if err != nil {
- return
- }
-
- return info, err
-}
-
-// GetExternalAddress returns an external IP. If GetExternalIPAddress action
-// fails or IP returned is invalid, GetExternalAddress returns an error.
-func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) {
- info, err := n.getExternalIPAddress()
- if err != nil {
- return
- }
- addr = net.ParseIP(info.externalIPAddress)
- if addr == nil {
- err = fmt.Errorf("failed to parse IP: %v", info.externalIPAddress)
- }
- return
-}
-
-func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) {
- // A single concatenation would break ARM compilation.
- message := "\r\n" +
- "" + strconv.Itoa(externalPort)
- message += " " + protocol + " "
- message += "" + strconv.Itoa(internalPort) + " " +
- "" + n.ourIP + " " +
- "1 "
- message += description +
- " " + strconv.Itoa(timeout) +
- " "
-
- var response *http.Response
- response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain)
- if response != nil {
- defer response.Body.Close() //nolint: errcheck
- }
- if err != nil {
- return
- }
-
- // TODO: check response to see if the port was forwarded
- // log.Println(message, response)
- // JAE:
- // body, err := io.ReadAll(response.Body)
- // fmt.Println(string(body), err)
- mappedExternalPort = externalPort
- _ = response
- return
-}
-
-func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) {
- message := "\r\n" +
- "" + strconv.Itoa(externalPort) +
- " " + protocol + " " +
- " "
-
- var response *http.Response
- response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain)
- if response != nil {
- defer response.Body.Close() //nolint: errcheck
- }
- if err != nil {
- return
- }
-
- // TODO: check response to see if the port was deleted
- // log.Println(message, response)
- _ = response
- return
-}
diff --git a/tm2/pkg/sdk/auth/ante.go b/tm2/pkg/sdk/auth/ante.go
index 997478fe4b5..f05a8eff0a7 100644
--- a/tm2/pkg/sdk/auth/ante.go
+++ b/tm2/pkg/sdk/auth/ante.go
@@ -387,9 +387,10 @@ func EnsureSufficientMempoolFees(ctx sdk.Context, fee std.Fee) sdk.Result {
if prod1.Cmp(prod2) >= 0 {
return sdk.Result{}
} else {
+ fee := new(big.Int).Quo(prod2, gpg)
return abciResult(std.ErrInsufficientFee(
fmt.Sprintf(
- "insufficient fees; got: {Gas-Wanted: %d, Gas-Fee %s}, fee required: %+v as minimum gas price set by the node", feeGasPrice.Gas, feeGasPrice.Price, gp,
+ "insufficient fees; got: {Gas-Wanted: %d, Gas-Fee %s}, fee required: %d with %+v as minimum gas price set by the node", feeGasPrice.Gas, feeGasPrice.Price, fee, gp,
),
))
}
@@ -418,16 +419,20 @@ func SetGasMeter(simulate bool, ctx sdk.Context, gasLimit int64) sdk.Context {
// GetSignBytes returns a slice of bytes to sign over for a given transaction
// and an account.
func GetSignBytes(chainID string, tx std.Tx, acc std.Account, genesis bool) ([]byte, error) {
- var accNum uint64
+ var (
+ accNum uint64
+ accSequence uint64
+ )
if !genesis {
accNum = acc.GetAccountNumber()
+ accSequence = acc.GetSequence()
}
return std.GetSignaturePayload(
std.SignDoc{
ChainID: chainID,
AccountNumber: accNum,
- Sequence: acc.GetSequence(),
+ Sequence: accSequence,
Fee: tx.Fee,
Msgs: tx.Msgs,
Memo: tx.Memo,
diff --git a/tm2/pkg/sdk/auth/ante_test.go b/tm2/pkg/sdk/auth/ante_test.go
index 78018b415eb..7c6ace51e4e 100644
--- a/tm2/pkg/sdk/auth/ante_test.go
+++ b/tm2/pkg/sdk/auth/ante_test.go
@@ -209,8 +209,8 @@ func TestAnteHandlerAccountNumbersAtBlockHeightZero(t *testing.T) {
tx = tu.NewTestTx(t, ctx.ChainID(), msgs, privs, []uint64{1}, seqs, fee)
checkInvalidTx(t, anteHandler, ctx, tx, false, std.UnauthorizedError{})
- // from correct account number
- seqs = []uint64{1}
+ // At genesis account number is zero
+ seqs = []uint64{0}
tx = tu.NewTestTx(t, ctx.ChainID(), msgs, privs, []uint64{0}, seqs, fee)
checkValidTx(t, anteHandler, ctx, tx, false)
@@ -223,7 +223,7 @@ func TestAnteHandlerAccountNumbersAtBlockHeightZero(t *testing.T) {
checkInvalidTx(t, anteHandler, ctx, tx, false, std.UnauthorizedError{})
// correct account numbers
- privs, accnums, seqs = []crypto.PrivKey{priv1, priv2}, []uint64{0, 0}, []uint64{2, 0}
+ privs, accnums, seqs = []crypto.PrivKey{priv1, priv2}, []uint64{0, 0}, []uint64{0, 0}
tx = tu.NewTestTx(t, ctx.ChainID(), msgs, privs, accnums, seqs, fee)
checkValidTx(t, anteHandler, ctx, tx, false)
}
diff --git a/tm2/pkg/sdk/config/config.go b/tm2/pkg/sdk/config/config.go
new file mode 100644
index 00000000000..6e5ededf9a4
--- /dev/null
+++ b/tm2/pkg/sdk/config/config.go
@@ -0,0 +1,35 @@
+package config
+
+import (
+ "github.com/gnolang/gno/tm2/pkg/errors"
+ "github.com/gnolang/gno/tm2/pkg/std"
+)
+
+// -----------------------------------------------------------------------------
+// Application Config
+
+// AppConfig defines the configuration options for the Application
+type AppConfig struct {
+ // Lowest gas prices accepted by a validator in the form of "100tokenA/3gas;10tokenB/5gas" separated by semicolons
+ MinGasPrices string `json:"min_gas_prices" toml:"min_gas_prices" comment:"Lowest gas prices accepted by a validator"`
+}
+
+// DefaultAppConfig returns a default configuration for the application
+func DefaultAppConfig() *AppConfig {
+ return &AppConfig{
+ MinGasPrices: "",
+ }
+}
+
+// ValidateBasic performs basic validation, checking format and param bounds, etc., and
+// returns an error if any check fails.
+func (cfg *AppConfig) ValidateBasic() error {
+ if cfg.MinGasPrices == "" {
+ return nil
+ }
+ if _, err := std.ParseGasPrices(cfg.MinGasPrices); err != nil {
+ return errors.Wrap(err, "invalid min gas prices")
+ }
+
+ return nil
+}
diff --git a/tm2/pkg/sdk/config/config_test.go b/tm2/pkg/sdk/config/config_test.go
new file mode 100644
index 00000000000..dd0c391849b
--- /dev/null
+++ b/tm2/pkg/sdk/config/config_test.go
@@ -0,0 +1,36 @@
+package config
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateAppConfig(t *testing.T) {
+ c := DefaultAppConfig()
+ c.MinGasPrices = "" // empty
+
+ testCases := []struct {
+ testName string
+ minGasPrices string
+ expectErr bool
+ }{
+ {"invalid min gas prices invalid gas", "10token/1", true},
+ {"invalid min gas prices invalid gas denom", "9token/0gs", true},
+ {"invalid min gas prices zero gas", "10token/0gas", true},
+ {"invalid min gas prices no gas", "10token/gas", true},
+ {"invalid min gas prices negtive gas", "10token/-1gas", true},
+ {"invalid min gas prices invalid denom", "10$token/2gas", true},
+ {"invalid min gas prices invalid second denom", "10token/2gas;10/3gas", true},
+ {"valid min gas prices", "10foo/3gas;5bar/3gas", false},
+ }
+
+ cfg := DefaultAppConfig()
+ for _, tc := range testCases {
+ tc := tc
+ t.Run(tc.testName, func(t *testing.T) {
+ cfg.MinGasPrices = tc.minGasPrices
+ assert.Equal(t, tc.expectErr, cfg.ValidateBasic() != nil)
+ })
+ }
+}
diff --git a/tm2/pkg/std/gasprice.go b/tm2/pkg/std/gasprice.go
index fd082a93371..82d236c1d04 100644
--- a/tm2/pkg/std/gasprice.go
+++ b/tm2/pkg/std/gasprice.go
@@ -29,9 +29,11 @@ func ParseGasPrice(gasprice string) (GasPrice, error) {
if gas.Denom != "gas" {
return GasPrice{}, errors.New("invalid gas price: %s (invalid gas denom)", gasprice)
}
- if gas.Amount == 0 {
- return GasPrice{}, errors.New("invalid gas price: %s (gas can not be zero)", gasprice)
+
+ if gas.Amount <= 0 {
+ return GasPrice{}, errors.New("invalid gas price: %s (invalid gas amount)", gasprice)
}
+
return GasPrice{
Gas: gas.Amount,
Price: price,
diff --git a/tm2/pkg/telemetry/metrics/metrics.go b/tm2/pkg/telemetry/metrics/metrics.go
index e3ae932612f..5eeb664ec8f 100644
--- a/tm2/pkg/telemetry/metrics/metrics.go
+++ b/tm2/pkg/telemetry/metrics/metrics.go
@@ -16,12 +16,10 @@ import (
)
const (
- broadcastTxTimerKey = "broadcast_tx_hist"
- buildBlockTimerKey = "build_block_hist"
+ buildBlockTimerKey = "build_block_hist"
inboundPeersKey = "inbound_peers_gauge"
outboundPeersKey = "outbound_peers_gauge"
- dialingPeersKey = "dialing_peers_gauge"
numMempoolTxsKey = "num_mempool_txs_hist"
numCachedTxsKey = "num_cached_txs_hist"
@@ -42,11 +40,6 @@ const (
)
var (
- // Misc //
-
- // BroadcastTxTimer measures the transaction broadcast duration
- BroadcastTxTimer metric.Int64Histogram
-
// Networking //
// InboundPeers measures the active number of inbound peers
@@ -55,9 +48,6 @@ var (
// OutboundPeers measures the active number of outbound peers
OutboundPeers metric.Int64Gauge
- // DialingPeers measures the active number of peers in the dialing state
- DialingPeers metric.Int64Gauge
-
// Mempool //
// NumMempoolTxs measures the number of transaction inside the mempool
@@ -156,14 +146,6 @@ func Init(config config.Config) error {
otel.SetMeterProvider(provider)
meter := provider.Meter(config.MeterName)
- if BroadcastTxTimer, err = meter.Int64Histogram(
- broadcastTxTimerKey,
- metric.WithDescription("broadcast tx duration"),
- metric.WithUnit("ms"),
- ); err != nil {
- return fmt.Errorf("unable to create histogram, %w", err)
- }
-
if BuildBlockTimer, err = meter.Int64Histogram(
buildBlockTimerKey,
metric.WithDescription("block build duration"),
@@ -188,18 +170,10 @@ func Init(config config.Config) error {
); err != nil {
return fmt.Errorf("unable to create histogram, %w", err)
}
+
// Initialize OutboundPeers Gauge
OutboundPeers.Record(ctx, 0)
- if DialingPeers, err = meter.Int64Gauge(
- dialingPeersKey,
- metric.WithDescription("dialing peer count"),
- ); err != nil {
- return fmt.Errorf("unable to create histogram, %w", err)
- }
- // Initialize DialingPeers Gauge
- DialingPeers.Record(ctx, 0)
-
// Mempool //
if NumMempoolTxs, err = meter.Int64Histogram(
numMempoolTxsKey,