diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index f91581fa69..e8afde6877 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -346,7 +346,7 @@ jobs: with: working-directory: ${{matrix.package}}/ # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version - version: v1.56.1 + version: v1.56.2 # see: https://github.com/golangci/golangci-lint/issues/2654 args: --timeout=60m env: diff --git a/.github/workflows/goreleaser-actions.yml b/.github/workflows/goreleaser-actions.yml index 8ccf98e553..e248f41555 100644 --- a/.github/workflows/goreleaser-actions.yml +++ b/.github/workflows/goreleaser-actions.yml @@ -20,7 +20,6 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 # needed if using new-from-rev (see: https://golangci-lint.run/usage/configuration/#issues-configuration) - submodules: true - name: Cache Docker images. @@ -169,11 +168,6 @@ jobs: gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} passphrase: ${{ secrets.GPG_PASSPHRASE }} - - name: Set up Go - uses: actions/setup-go@v4 - with: - go-version: 1.20.x - - name: Login to GitHub Container Registry uses: docker/login-action@v1 diff --git a/contrib/promexporter/config/config.go b/contrib/promexporter/config/config.go index c185783bf4..3325088346 100644 --- a/contrib/promexporter/config/config.go +++ b/contrib/promexporter/config/config.go @@ -90,7 +90,7 @@ func DecodeConfig(filePath string) (_ Config, err error) { Address: "0x230a1ac45690b9ae1176389434610b9526d2f21b", ChainIDs: types.ToInts(types.ETH, types.OPTIMISM, types.CRONOS, types.BSC, types.POLYGON, types.FANTOM, types.BOBA, types.METIS, types.MOONBEAM, types.MOONRIVER, types.DOGECHAIN, types.CANTO, types.KLAYTN, - types.BASE, types.ARBITRUM, types.AVALANCHE, types.DFK, types.AURORA, types.HARMONY), + types.BASE, types.ARBITRUM, types.AVALANCHE, types.DFK, types.AURORA, types.HARMONY, types.BLAST), Name: "validators", }, { @@ -130,6 +130,7 @@ func DecodeConfig(filePath string) (_ Config, err error) { types.CANTO.Int(): "0xDde5BEC4815E1CeCf336fb973Ca578e8D83606E0", types.DOGECHAIN.Int(): "0x9508BF380c1e6f751D97604732eF1Bae6673f299", types.BASE.Int(): "0xf07d1C752fAb503E47FEF309bf14fbDD3E867089", + types.BLAST.Int(): "0x55769bAF6ec39B3bf4aAE948eB890eA33307Ef3C", } cfg.VpriceCheckTokens = []string{"nUSD", "nETH"} diff --git a/contrib/promexporter/internal/types/chainID.go b/contrib/promexporter/internal/types/chainID.go index 6c362c146f..15d7304d2a 100644 --- a/contrib/promexporter/internal/types/chainID.go +++ b/contrib/promexporter/internal/types/chainID.go @@ -24,6 +24,7 @@ const ( CRONOS ChainID = 25 KOVAN ChainID = 42 BSC ChainID = 56 + BLAST ChainID = 81457 POLYGON ChainID = 137 FANTOM ChainID = 250 BOBA ChainID = 288 diff --git a/contrib/promexporter/internal/types/chainid_string.go b/contrib/promexporter/internal/types/chainid_string.go index e06514d9c4..a20ac28dce 100644 --- a/contrib/promexporter/internal/types/chainid_string.go +++ b/contrib/promexporter/internal/types/chainid_string.go @@ -16,6 +16,7 @@ func _() { _ = x[CRONOS-25] _ = x[KOVAN-42] _ = x[BSC-56] + _ = x[BLAST-81457] _ = x[POLYGON-137] _ = x[FANTOM-250] _ = x[BOBA-288] @@ -33,7 +34,7 @@ func _() { _ = x[HARMONY-1666600000] } -const _ChainID_name = "ETHROPSTENRINKEBYGOERLIOPTIMISMCRONOSKOVANBSCPOLYGONFANTOMBOBAMETISMOONBEAMMOONRIVERDOGECHAINCANTOKLAYTNBASEARBITRUMAVALANCHEDFKAURORAHARMONY" +const _ChainID_name = "ETHROPSTENRINKEBYGOERLIOPTIMISMCRONOSKOVANBSCPOLYGONFANTOMBOBAMETISMOONBEAMMOONRIVERDOGECHAINCANTOKLAYTNBASEARBITRUMAVALANCHEDFKBLASTAURORAHARMONY" var _ChainID_map = map[ChainID]string{ 1: _ChainID_name[0:3], @@ -57,8 +58,9 @@ var _ChainID_map = map[ChainID]string{ 42161: _ChainID_name[108:116], 43114: _ChainID_name[116:125], 53935: _ChainID_name[125:128], - 1313161554: _ChainID_name[128:134], - 1666600000: _ChainID_name[134:141], + 81457: _ChainID_name[128:133], + 1313161554: _ChainID_name[133:139], + 1666600000: _ChainID_name[139:146], } func (i ChainID) String() string { diff --git a/contrib/screener-api/config/config.go b/contrib/screener-api/config/config.go index aa55d7840e..564d75d74e 100644 --- a/contrib/screener-api/config/config.go +++ b/contrib/screener-api/config/config.go @@ -18,6 +18,19 @@ type Config struct { Port int `yaml:"port"` // Database is the database configuration Database DatabaseConfig `yaml:"database"` + // VolumeThresholds is the volume thresholds for each risk type + VolumeThresholds []VolumeThreshold `yaml:"volumeThresholds"` + // TODO: This HAS to be re-structured somehow + // Whitelist is a list of addresses to whitelist + Whitelist []string `yaml:"whitelist"` +} + +// VolumeThreshold defines thresholds for different risk categories and types. +type VolumeThreshold struct { + Category string `yaml:"category"` + TypeOfRisk string `yaml:"typeOfRisk"` + Incoming float64 `yaml:"incoming"` + Outgoing float64 `yaml:"outgoing"` } // GetCacheTime gets how long to use the cache for a given ruleset. diff --git a/contrib/screener-api/screener/internal/risk.go b/contrib/screener-api/screener/internal/risk.go index 47bc0333b7..4c5b3c14ee 100644 --- a/contrib/screener-api/screener/internal/risk.go +++ b/contrib/screener-api/screener/internal/risk.go @@ -3,9 +3,11 @@ package internal import ( "fmt" - "github.com/synapsecns/sanguine/contrib/screener-api/trmlabs" "strconv" "strings" + + "github.com/synapsecns/sanguine/contrib/screener-api/config" + "github.com/synapsecns/sanguine/contrib/screener-api/trmlabs" ) // rulesetManager manages the rulesets. @@ -51,7 +53,7 @@ func (rm *rulesetManager) GetRuleset(callerType string) RuleSet { // RuleSet interface defines methods to work with risk rules. type RuleSet interface { HasRisk(riskType string) bool - HasAddressIndicators(riskIndicators ...trmlabs.AddressRiskIndicator) (bool, error) + HasAddressIndicators(thresholds []config.VolumeThreshold, riskIndicators ...trmlabs.AddressRiskIndicator) (bool, error) } // CallerRuler implements the RuleSet interface for a specific caller type. @@ -72,7 +74,12 @@ func (cr *CallerRuler) HasRisk(riskType string) bool { } // HasAddressIndicators returns a list of addressRiskIndicator. -func (cr *CallerRuler) HasAddressIndicators(riskIndicators ...trmlabs.AddressRiskIndicator) (bool, error) { +// +//nolint:cyclop +func (cr *CallerRuler) HasAddressIndicators(thresholds []config.VolumeThreshold, riskIndicators ...trmlabs.AddressRiskIndicator) (bool, error) { + // Initialize a variable to track if any indicator is blocked + anyIndicatorBlocked := false + for _, ri := range riskIndicators { incoming, err := strconv.ParseFloat(ri.IncomingVolumeUsd, 32) if err != nil { @@ -84,14 +91,24 @@ func (cr *CallerRuler) HasAddressIndicators(riskIndicators ...trmlabs.AddressRis return false, fmt.Errorf("could not parse outgoing volume: %w", err) } - riskParam := MakeParam(ri.Category, ri.RiskType) - isBlocked, found := cr.riskRules[riskParam] - if isBlocked && found && (incoming > 0 || outgoing > 0) { - return true, nil + // Check against thresholds + for _, threshold := range thresholds { + if strings.EqualFold(ri.Category, threshold.Category) && strings.EqualFold(ri.RiskType, threshold.TypeOfRisk) { + // If either incoming or outgoing volume exceeds the threshold, the indicator is blocked + if (threshold.Incoming > 0 && incoming > threshold.Incoming) || (threshold.Outgoing > 0 && outgoing > threshold.Outgoing) { + anyIndicatorBlocked = true + break // No need to check other thresholds, this indicator is blocked + } + } + } + + if anyIndicatorBlocked { + break // No need to check further indicators, at least one indicator is blocked } } - return false, nil + // Return true if any indicator is blocked, otherwise false + return anyIndicatorBlocked, nil } // MakeParam creates a risk param from the given category and risk type in a standardized format. diff --git a/contrib/screener-api/screener/internal/risk_test.go b/contrib/screener-api/screener/internal/risk_test.go index f03d106b6b..986b2b9b5e 100644 --- a/contrib/screener-api/screener/internal/risk_test.go +++ b/contrib/screener-api/screener/internal/risk_test.go @@ -1,11 +1,13 @@ package internal_test import ( + "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/synapsecns/sanguine/contrib/screener-api/config" "github.com/synapsecns/sanguine/contrib/screener-api/screener/internal" "github.com/synapsecns/sanguine/contrib/screener-api/trmlabs" - "testing" ) func TestNewRulesetManager(t *testing.T) { @@ -50,13 +52,23 @@ func TestHasAddressIndicators(t *testing.T) { "category1_risktype1": true, } + // Define thresholds for testing + thresholds := []config.VolumeThreshold{ + { + Category: "Category1", + TypeOfRisk: "RiskType1", + Incoming: 1500, // Set thresholds to allow the test cases to pass or fail as expected + Outgoing: 800, + }, + } + cr := internal.NewRuleset(riskRules) // Test case where the indicator meets risk rules indicators := []trmlabs.AddressRiskIndicator{ - {IncomingVolumeUsd: "1000", OutgoingVolumeUsd: "500", Category: "Category1", RiskType: "RiskType1"}, + {IncomingVolumeUsd: "1501", OutgoingVolumeUsd: "500", Category: "Category1", RiskType: "RiskType1"}, } - result, err := cr.HasAddressIndicators(indicators...) + result, err := cr.HasAddressIndicators(thresholds, indicators...) require.NoError(t, err) assert.True(t, result) @@ -64,7 +76,7 @@ func TestHasAddressIndicators(t *testing.T) { indicators = []trmlabs.AddressRiskIndicator{ {IncomingVolumeUsd: "100", OutgoingVolumeUsd: "50", Category: "Category2", RiskType: "RiskType2"}, } - result, err = cr.HasAddressIndicators(indicators...) + result, err = cr.HasAddressIndicators(thresholds, indicators...) require.NoError(t, err) assert.False(t, result) @@ -72,13 +84,13 @@ func TestHasAddressIndicators(t *testing.T) { indicators = []trmlabs.AddressRiskIndicator{ {IncomingVolumeUsd: "invalid", OutgoingVolumeUsd: "500", Category: "Category1", RiskType: "RiskType1"}, } - _, err = cr.HasAddressIndicators(indicators...) + _, err = cr.HasAddressIndicators(thresholds, indicators...) require.Error(t, err) // Test case with invalid outgoing volume indicators = []trmlabs.AddressRiskIndicator{ {IncomingVolumeUsd: "1000", OutgoingVolumeUsd: "invalid", Category: "Category1", RiskType: "RiskType1"}, } - _, err = cr.HasAddressIndicators(indicators...) + _, err = cr.HasAddressIndicators(thresholds, indicators...) require.Error(t, err) } diff --git a/contrib/screener-api/screener/screener.go b/contrib/screener-api/screener/screener.go index e7dbb6b074..4d7ea3a3a4 100644 --- a/contrib/screener-api/screener/screener.go +++ b/contrib/screener-api/screener/screener.go @@ -6,6 +6,11 @@ import ( "encoding/json" "errors" "fmt" + "net/http" + "strings" + "sync" + "time" + "github.com/gin-gonic/gin" "github.com/ipfs/go-log" "github.com/synapsecns/sanguine/contrib/screener-api/config" @@ -21,10 +26,6 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slices" - "net/http" - "strings" - "sync" - "time" ) // Screener is the interface for the screener. @@ -34,6 +35,7 @@ type Screener interface { type screenerImpl struct { rulesManager internal.RulesetManager + thresholds []config.VolumeThreshold db db.RuleDB router *gin.Engine metrics metrics.Handler @@ -41,6 +43,7 @@ type screenerImpl struct { client trmlabs.Client blacklist []string blacklistMux sync.RWMutex + whitelist []string } var logger = log.Logger("screener") @@ -56,6 +59,11 @@ func NewScreener(ctx context.Context, cfg config.Config, metricHandler metrics.H if err != nil { return nil, fmt.Errorf("could not create trm client: %w", err) } + screener.thresholds = cfg.VolumeThresholds + + for _, item := range cfg.Whitelist { + screener.whitelist = append(screener.whitelist, strings.ToLower(item)) + } screener.rulesManager, err = setupScreener(cfg.Rulesets) if err != nil { @@ -152,6 +160,11 @@ func (s *screenerImpl) screenAddress(c *gin.Context) { } s.blacklistMux.RUnlock() + if slices.Contains(s.whitelist, address) { + c.JSON(http.StatusOK, gin.H{"risk": false}) + return + } + ctx, span := s.metrics.Tracer().Start(c.Request.Context(), "screenAddress", trace.WithAttributes(attribute.String("address", address))) defer func() { metrics.EndSpanWithErr(span, err) @@ -171,7 +184,7 @@ func (s *screenerImpl) screenAddress(c *gin.Context) { } var hasIndicator bool - if hasIndicator, err = currentRules.HasAddressIndicators(indicators...); err != nil { + if hasIndicator, err = currentRules.HasAddressIndicators(s.thresholds, indicators...); err != nil { c.JSON(http.StatusOK, gin.H{"risk": true}) return } diff --git a/contrib/screener-api/screener/setup.go b/contrib/screener-api/screener/setup.go index 1918645ba0..fb398c9d4c 100644 --- a/contrib/screener-api/screener/setup.go +++ b/contrib/screener-api/screener/setup.go @@ -3,15 +3,16 @@ package screener import ( "encoding/csv" "fmt" + "os" + "strings" + "github.com/gocarina/gocsv" "github.com/synapsecns/sanguine/contrib/screener-api/config" "github.com/synapsecns/sanguine/contrib/screener-api/screener/internal" - "os" - "strings" ) -func setupScreener(rulesets map[string]config.RulesetConfig) (internal.RulesetManager, error) { - mgr := internal.NewRulesetManager(map[string]map[string]bool{}) +func setupScreener(rulesets map[string]config.RulesetConfig) (mgr internal.RulesetManager, err error) { + mgr = internal.NewRulesetManager(map[string]map[string]bool{}) for csvName, cfg := range rulesets { csvPath := cfg.Filename parsedCsv, err := parseCsv(csvPath) diff --git a/contrib/screener-api/screener/suite_test.go b/contrib/screener-api/screener/suite_test.go index 24231084ff..b5edbc9d5a 100644 --- a/contrib/screener-api/screener/suite_test.go +++ b/contrib/screener-api/screener/suite_test.go @@ -4,6 +4,10 @@ import ( "context" "errors" "fmt" + "strconv" + "testing" + "time" + "github.com/Flaque/filet" "github.com/gocarina/gocsv" "github.com/phayes/freeport" @@ -19,9 +23,6 @@ import ( "github.com/synapsecns/sanguine/core/metrics" "github.com/synapsecns/sanguine/core/metrics/localmetrics" "github.com/synapsecns/sanguine/core/testsuite" - "strconv" - "testing" - "time" ) type ScreenerSuite struct { diff --git a/core/README.md b/core/README.md index 6ee1a517ee..23f5cef041 100644 --- a/core/README.md +++ b/core/README.md @@ -5,7 +5,6 @@ Core contains common libraries used across the synapse Go repositories. - ## Directory Structure
@@ -20,7 +19,7 @@ root
 ├── merkle: Provides a go based merkle tree implementation.
 ├── metrics: Provides a set of utilities for working with metrics/otel tracing.
 ├── mocktesting: Provides a mocked tester for use with `testing.TB`
-├── observer: Provides an interface for adding/removing listeners.
+├── observer(deprecated): Provides an interface for adding/removing listeners.
 ├── processlog: Provides a way to interact with detatched processes as streams.
 ├── retry: Retries a function until it succeeds or the timeout is reached. This comes with a set of backoff strategies/options.
 ├── server: Provides a context-safe server that can be used to start/stop a server.
diff --git a/docker/goreleaser/Dockerfile b/docker/goreleaser/Dockerfile
index 8365755351..9a92b3e53f 100644
--- a/docker/goreleaser/Dockerfile
+++ b/docker/goreleaser/Dockerfile
@@ -1,7 +1,7 @@
 FROM --platform=linux/amd64 debian:11
 
-ARG VERSION_ARG=1.20.0-pro
-ARG SHA_ARG=d2d76cf4b212f67cb9995c8539167a1c6d771859aad20ed242bfab640b6d396f
+ARG VERSION_ARG=1.24.0-pro
+ARG SHA_ARG=01237f7151d2c46c307f21de183eb863ce47a4b5244507487ec663640b077d7d
 ARG FILE_ARG=goreleaser-pro_Linux_x86_64.tar.gz
 ARG DOWNLOAD_ARG=https://github.com/goreleaser/goreleaser-pro/releases/download/v${VERSION_ARG}/${FILE_ARG}
 
@@ -12,8 +12,8 @@ ENV GORELEASER_DOWNLOAD_FILE=$FILE_ARG
 ENV GORELEASER_DOWNLOAD_URL=$DOWNLOAD_ARG
 
 # Golang
-ENV GOLANG_VERSION=1.21.3
-ENV GOLANG_SHA=1241381b2843fae5a9707eec1f8fb2ef94d827990582c7c7c32f5bdfbfd420c8
+ENV GOLANG_VERSION=1.22.0
+ENV GOLANG_SHA=f6c8a87aa03b92c4b0bf3d558e28ea03006eb29db78917daec5cfb6ec1046265
 ENV GOLANG_DOWNLOAD_FILE=go${GOLANG_VERSION}.linux-amd64.tar.gz
 ENV GOLANG_DOWNLOAD_URL=https://dl.google.com/go/${GOLANG_DOWNLOAD_FILE}
 
diff --git a/ethergo/README.md b/ethergo/README.md
index 4a474621a3..02cfa8a70b 100644
--- a/ethergo/README.md
+++ b/ethergo/README.md
@@ -29,15 +29,16 @@ root
 │   ├── geth: Contains an embedded geth backend. This is useful for testing against a local geth instance without forking capabilities. This does not require docker and runs fully embedded in the go application, as such it is faster than the docker-based backends, but less versatile. Used when an rpc address is needed for a localnet.
 │   ├── preset: Contains a number of preset backends for testing.
 │   ├── simulated: The fastest backend, this does not expose an rpc endpoint and uses geth's [simulated backend](https://goethereumbook.org/en/client-simulated/)
-├── chain: Contains a client for interacting with the chain. This will be removed in a future version. Please use [client](./client) going forward.
+├── chain: Contains a client for interacting with the chain. This will be removed in a future version. Please use [client](./client) going forward.
 │   ├── chainwatcher: Watches the chain for events, blocks and logs
 │   ├── client: Contains eth clients w/ rate limiting, workarounds for bugs in some chains, etc.
 │   ├── gas: Contains a deterministic gas estimator
-│   ├── watcher: Client interface for chain watcher.
+│   ├── watcher: Client interface for chain watcher.
 ├── contracts: Contains interfaces for using contracts with the deployer + manager
 ├── client: Contains an open tracing compatible ethclient with batching.
 ├── example: Contains a full featured example of how to use deployer + manager
 ├── forker: Allows the use of fork tests in live chains without docker using an anvil binary.
+├── listener: Drop-in contract listener
 ├── manager: Manages contract deployments.
 ├── mocks: Contains mocks for testing various data types (transactions, addresses, logs, etc)
 ├── parser: Parse hardhat deployments
diff --git a/ethergo/example/README.md b/ethergo/example/README.md
index 824cdaa050..6ad31612ee 100644
--- a/ethergo/example/README.md
+++ b/ethergo/example/README.md
@@ -195,5 +195,77 @@
       }
       ```
 
+   4. (Optional): Create a typecast getter:
+      To avoid naked casts of contract handle, we can potionally create a typecast getter.
+      To do this, we're going to create a thin wrapper around deploymanager.
+
+      ```go
+      package example
+      import (
+      "context"
+      "github.com/synapsecns/sanguine/ethergo/backends"
+      "github.com/synapsecns/sanguine/ethergo/contracts"
+      "github.com/synapsecns/sanguine/ethergo/manager"
+      "testing"
+      )
+
+      // DeployManager wraps DeployManager and allows typed contract handles to be returned.
+      type DeployManager struct {
+        *manager.DeployerManager
+      }
+
+      // NewDeployManager creates a new DeployManager.
+      func NewDeployManager(t *testing.T) *DeployManager {
+        t.Helper()
+
+        parentManager := manager.NewDeployerManager(t, NewCounterDeployer)
+        return &DeployManager{parentManager}
+      }
+      ```
+
+      Now we can create a handle to get the contract for us;
+
+      ```go
+      package example
+      // see above for imports
+
+      import (
+        "context"
+        "github.com/synapsecns/sanguine/ethergo/backends"
+        "github.com/synapsecns/sanguine/ethergo/contracts"
+        "github.com/synapsecns/sanguine/ethergo/example/counter"
+        "github.com/synapsecns/sanguine/ethergo/manager"
+        "testing"
+      )
+
+      // GetCounter gets the pre-created counter.
+      func (d *DeployManager) GetCounter(ctx context.Context, backend backends.SimulatedTestBackend) (contract contracts.DeployedContract, handle *counter.CounterRef) {
+         d.T().Helper()
+
+        return manager.GetContract[*counter.CounterRef](ctx, d.T(), d, backend, CounterType)
+      }
+      ```
+
+   5. (Optional) Make sure are dependencies are correct: We can also create a test to assert our dependencides are correctly listed in each deployer. That looks like this:
+
+         ```go
+         package example_test
+
+         import (
+            "context"
+            "github.com/synapsecns/sanguine/ethergo/backends"
+            "github.com/synapsecns/sanguine/ethergo/contracts"
+            "github.com/synapsecns/sanguine/ethergo/example"
+            "github.com/synapsecns/sanguine/ethergo/manager"
+            "testing"
+          )
+
+
+           func TestDependenciesCorrect(t *testing.T) {
+              manager.AssertDependenciesCorrect(context.Background(), t, func() manager.IDeployManager {
+                 return example.NewDeployerManager(t)
+           })
+           }
+         ```
 
 That's it! You should be done. As you can see, there's a lot more that can be done here. Passing in a list of all your deployers every time doesn't make sense. You'll want to create a standard testutil and extend it. We also haven't covered that any backend here is interchangable: you can use simulated, ganache, or embedded geth. This tutorial should've covered the basics though
diff --git a/ethergo/example/counter/counter.abigen.go b/ethergo/example/counter/counter.abigen.go
index 8dfa0b19ec..9b4eac4e66 100644
--- a/ethergo/example/counter/counter.abigen.go
+++ b/ethergo/example/counter/counter.abigen.go
@@ -31,15 +31,16 @@ var (
 
 // CounterMetaData contains all meta data concerning the Counter contract.
 var CounterMetaData = &bind.MetaData{
-	ABI: "[{\"inputs\":[],\"name\":\"decrementCounter\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCount\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getVitalikCount\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"incrementCounter\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"vitalikIncrement\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
+	ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"int256\",\"name\":\"count\",\"type\":\"int256\"}],\"name\":\"Decremented\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"int256\",\"name\":\"count\",\"type\":\"int256\"}],\"name\":\"Incremented\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"user\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"int256\",\"name\":\"count\",\"type\":\"int256\"}],\"name\":\"IncrementedByUser\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"decrementCounter\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"deployBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCount\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getVitalikCount\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"incrementCounter\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"vitalikIncrement\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
 	Sigs: map[string]string{
 		"f5c5ad83": "decrementCounter()",
+		"a3ec191a": "deployBlock()",
 		"a87d942c": "getCount()",
 		"9f6f1ec1": "getVitalikCount()",
 		"5b34b966": "incrementCounter()",
 		"6c573535": "vitalikIncrement()",
 	},
-	Bin: "0x608060405260008055600060015534801561001957600080fd5b506102b0806100296000396000f3fe608060405234801561001057600080fd5b50600436106100675760003560e01c80639f6f1ec1116100505780639f6f1ec11461007e578063a87d942c14610094578063f5c5ad831461009c57600080fd5b80635b34b9661461006c5780636c57353514610076575b600080fd5b6100746100a4565b005b6100746100bd565b6001545b60405190815260200160405180910390f35b600054610082565b610074610151565b60016000808282546100b69190610163565b9091555050565b3373d8da6bf26964af9d7eed9e03e53415d37aa960451461013e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f4f6e6c7920566974616c696b2063616e20636f756e7420627920313000000000604482015260640160405180910390fd5b600a600160008282546100b69190610163565b60016000808282546100b691906101d7565b6000808212827f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0384138115161561019d5761019d61024b565b827f80000000000000000000000000000000000000000000000000000000000000000384128116156101d1576101d161024b565b50500190565b6000808312837f8000000000000000000000000000000000000000000000000000000000000000018312811516156102115761021161024b565b837f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0183138116156102455761024561024b565b50500390565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fdfea26469706673582212201e03c8b68dcbcef6344fae810afa5d485b33bc238c0e8cb1508114b9c0ca702964736f6c63430008040033",
+	Bin: "0x60a060405260008055600060015534801561001957600080fd5b5043608052608051610390610038600039600060a401526103906000f3fe608060405234801561001057600080fd5b50600436106100725760003560e01c8063a3ec191a11610050578063a3ec191a1461009f578063a87d942c146100c6578063f5c5ad83146100ce57600080fd5b80635b34b966146100775780636c573535146100815780639f6f1ec114610089575b600080fd5b61007f6100d6565b005b61007f610126565b6001545b60405190815260200160405180910390f35b61008d7f000000000000000000000000000000000000000000000000000000000000000081565b60005461008d565b61007f6101f9565b60016000808282546100e89190610243565b90915550506000546040519081527fda0bc8b9b52da793a50e130494716550dab510a10a485be3f1b23d4da60ff4be906020015b60405180910390a1565b3373d8da6bf26964af9d7eed9e03e53415d37aa96045146101a7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f4f6e6c7920566974616c696b2063616e20636f756e7420627920313000000000604482015260640160405180910390fd5b600a600160008282546101ba9190610243565b90915550506001546040805133815260208101929092527f5832be325e40e91e7a991db4415bdfa9c689e8007072fdb8de3be47757a14557910161011c565b600160008082825461020b91906102b7565b90915550506000546040519081527f22ccb5ba3d32a9221c3efe39ffab06d1ddc4bd6684975ea75fa60f95ccff53de9060200161011c565b6000808212827f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0384138115161561027d5761027d61032b565b827f80000000000000000000000000000000000000000000000000000000000000000384128116156102b1576102b161032b565b50500190565b6000808312837f8000000000000000000000000000000000000000000000000000000000000000018312811516156102f1576102f161032b565b837f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0183138116156103255761032561032b565b50500390565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fdfea2646970667358221220dd6810ba2d049a0f7354bf12f6a017744c806f0470f592679a80ef552f69b85164736f6c63430008040033",
 }
 
 // CounterABI is the input ABI used to generate the binding from.
@@ -213,6 +214,37 @@ func (_Counter *CounterTransactorRaw) Transact(opts *bind.TransactOpts, method s
 	return _Counter.Contract.contract.Transact(opts, method, params...)
 }
 
+// DeployBlock is a free data retrieval call binding the contract method 0xa3ec191a.
+//
+// Solidity: function deployBlock() view returns(uint256)
+func (_Counter *CounterCaller) DeployBlock(opts *bind.CallOpts) (*big.Int, error) {
+	var out []interface{}
+	err := _Counter.contract.Call(opts, &out, "deployBlock")
+
+	if err != nil {
+		return *new(*big.Int), err
+	}
+
+	out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
+
+	return out0, err
+
+}
+
+// DeployBlock is a free data retrieval call binding the contract method 0xa3ec191a.
+//
+// Solidity: function deployBlock() view returns(uint256)
+func (_Counter *CounterSession) DeployBlock() (*big.Int, error) {
+	return _Counter.Contract.DeployBlock(&_Counter.CallOpts)
+}
+
+// DeployBlock is a free data retrieval call binding the contract method 0xa3ec191a.
+//
+// Solidity: function deployBlock() view returns(uint256)
+func (_Counter *CounterCallerSession) DeployBlock() (*big.Int, error) {
+	return _Counter.Contract.DeployBlock(&_Counter.CallOpts)
+}
+
 // GetCount is a free data retrieval call binding the contract method 0xa87d942c.
 //
 // Solidity: function getCount() view returns(int256)
@@ -337,3 +369,406 @@ func (_Counter *CounterSession) VitalikIncrement() (*types.Transaction, error) {
 func (_Counter *CounterTransactorSession) VitalikIncrement() (*types.Transaction, error) {
 	return _Counter.Contract.VitalikIncrement(&_Counter.TransactOpts)
 }
+
+// CounterDecrementedIterator is returned from FilterDecremented and is used to iterate over the raw logs and unpacked data for Decremented events raised by the Counter contract.
+type CounterDecrementedIterator struct {
+	Event *CounterDecremented // Event containing the contract specifics and raw log
+
+	contract *bind.BoundContract // Generic contract to use for unpacking event data
+	event    string              // Event name to use for unpacking event data
+
+	logs chan types.Log        // Log channel receiving the found contract events
+	sub  ethereum.Subscription // Subscription for errors, completion and termination
+	done bool                  // Whether the subscription completed delivering logs
+	fail error                 // Occurred error to stop iteration
+}
+
+// Next advances the iterator to the subsequent event, returning whether there
+// are any more events found. In case of a retrieval or parsing error, false is
+// returned and Error() can be queried for the exact failure.
+func (it *CounterDecrementedIterator) Next() bool {
+	// If the iterator failed, stop iterating
+	if it.fail != nil {
+		return false
+	}
+	// If the iterator completed, deliver directly whatever's available
+	if it.done {
+		select {
+		case log := <-it.logs:
+			it.Event = new(CounterDecremented)
+			if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+				it.fail = err
+				return false
+			}
+			it.Event.Raw = log
+			return true
+
+		default:
+			return false
+		}
+	}
+	// Iterator still in progress, wait for either a data or an error event
+	select {
+	case log := <-it.logs:
+		it.Event = new(CounterDecremented)
+		if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+			it.fail = err
+			return false
+		}
+		it.Event.Raw = log
+		return true
+
+	case err := <-it.sub.Err():
+		it.done = true
+		it.fail = err
+		return it.Next()
+	}
+}
+
+// Error returns any retrieval or parsing error occurred during filtering.
+func (it *CounterDecrementedIterator) Error() error {
+	return it.fail
+}
+
+// Close terminates the iteration process, releasing any pending underlying
+// resources.
+func (it *CounterDecrementedIterator) Close() error {
+	it.sub.Unsubscribe()
+	return nil
+}
+
+// CounterDecremented represents a Decremented event raised by the Counter contract.
+type CounterDecremented struct {
+	Count *big.Int
+	Raw   types.Log // Blockchain specific contextual infos
+}
+
+// FilterDecremented is a free log retrieval operation binding the contract event 0x22ccb5ba3d32a9221c3efe39ffab06d1ddc4bd6684975ea75fa60f95ccff53de.
+//
+// Solidity: event Decremented(int256 count)
+func (_Counter *CounterFilterer) FilterDecremented(opts *bind.FilterOpts) (*CounterDecrementedIterator, error) {
+
+	logs, sub, err := _Counter.contract.FilterLogs(opts, "Decremented")
+	if err != nil {
+		return nil, err
+	}
+	return &CounterDecrementedIterator{contract: _Counter.contract, event: "Decremented", logs: logs, sub: sub}, nil
+}
+
+// WatchDecremented is a free log subscription operation binding the contract event 0x22ccb5ba3d32a9221c3efe39ffab06d1ddc4bd6684975ea75fa60f95ccff53de.
+//
+// Solidity: event Decremented(int256 count)
+func (_Counter *CounterFilterer) WatchDecremented(opts *bind.WatchOpts, sink chan<- *CounterDecremented) (event.Subscription, error) {
+
+	logs, sub, err := _Counter.contract.WatchLogs(opts, "Decremented")
+	if err != nil {
+		return nil, err
+	}
+	return event.NewSubscription(func(quit <-chan struct{}) error {
+		defer sub.Unsubscribe()
+		for {
+			select {
+			case log := <-logs:
+				// New log arrived, parse the event and forward to the user
+				event := new(CounterDecremented)
+				if err := _Counter.contract.UnpackLog(event, "Decremented", log); err != nil {
+					return err
+				}
+				event.Raw = log
+
+				select {
+				case sink <- event:
+				case err := <-sub.Err():
+					return err
+				case <-quit:
+					return nil
+				}
+			case err := <-sub.Err():
+				return err
+			case <-quit:
+				return nil
+			}
+		}
+	}), nil
+}
+
+// ParseDecremented is a log parse operation binding the contract event 0x22ccb5ba3d32a9221c3efe39ffab06d1ddc4bd6684975ea75fa60f95ccff53de.
+//
+// Solidity: event Decremented(int256 count)
+func (_Counter *CounterFilterer) ParseDecremented(log types.Log) (*CounterDecremented, error) {
+	event := new(CounterDecremented)
+	if err := _Counter.contract.UnpackLog(event, "Decremented", log); err != nil {
+		return nil, err
+	}
+	event.Raw = log
+	return event, nil
+}
+
+// CounterIncrementedIterator is returned from FilterIncremented and is used to iterate over the raw logs and unpacked data for Incremented events raised by the Counter contract.
+type CounterIncrementedIterator struct {
+	Event *CounterIncremented // Event containing the contract specifics and raw log
+
+	contract *bind.BoundContract // Generic contract to use for unpacking event data
+	event    string              // Event name to use for unpacking event data
+
+	logs chan types.Log        // Log channel receiving the found contract events
+	sub  ethereum.Subscription // Subscription for errors, completion and termination
+	done bool                  // Whether the subscription completed delivering logs
+	fail error                 // Occurred error to stop iteration
+}
+
+// Next advances the iterator to the subsequent event, returning whether there
+// are any more events found. In case of a retrieval or parsing error, false is
+// returned and Error() can be queried for the exact failure.
+func (it *CounterIncrementedIterator) Next() bool {
+	// If the iterator failed, stop iterating
+	if it.fail != nil {
+		return false
+	}
+	// If the iterator completed, deliver directly whatever's available
+	if it.done {
+		select {
+		case log := <-it.logs:
+			it.Event = new(CounterIncremented)
+			if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+				it.fail = err
+				return false
+			}
+			it.Event.Raw = log
+			return true
+
+		default:
+			return false
+		}
+	}
+	// Iterator still in progress, wait for either a data or an error event
+	select {
+	case log := <-it.logs:
+		it.Event = new(CounterIncremented)
+		if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+			it.fail = err
+			return false
+		}
+		it.Event.Raw = log
+		return true
+
+	case err := <-it.sub.Err():
+		it.done = true
+		it.fail = err
+		return it.Next()
+	}
+}
+
+// Error returns any retrieval or parsing error occurred during filtering.
+func (it *CounterIncrementedIterator) Error() error {
+	return it.fail
+}
+
+// Close terminates the iteration process, releasing any pending underlying
+// resources.
+func (it *CounterIncrementedIterator) Close() error {
+	it.sub.Unsubscribe()
+	return nil
+}
+
+// CounterIncremented represents a Incremented event raised by the Counter contract.
+type CounterIncremented struct {
+	Count *big.Int
+	Raw   types.Log // Blockchain specific contextual infos
+}
+
+// FilterIncremented is a free log retrieval operation binding the contract event 0xda0bc8b9b52da793a50e130494716550dab510a10a485be3f1b23d4da60ff4be.
+//
+// Solidity: event Incremented(int256 count)
+func (_Counter *CounterFilterer) FilterIncremented(opts *bind.FilterOpts) (*CounterIncrementedIterator, error) {
+
+	logs, sub, err := _Counter.contract.FilterLogs(opts, "Incremented")
+	if err != nil {
+		return nil, err
+	}
+	return &CounterIncrementedIterator{contract: _Counter.contract, event: "Incremented", logs: logs, sub: sub}, nil
+}
+
+// WatchIncremented is a free log subscription operation binding the contract event 0xda0bc8b9b52da793a50e130494716550dab510a10a485be3f1b23d4da60ff4be.
+//
+// Solidity: event Incremented(int256 count)
+func (_Counter *CounterFilterer) WatchIncremented(opts *bind.WatchOpts, sink chan<- *CounterIncremented) (event.Subscription, error) {
+
+	logs, sub, err := _Counter.contract.WatchLogs(opts, "Incremented")
+	if err != nil {
+		return nil, err
+	}
+	return event.NewSubscription(func(quit <-chan struct{}) error {
+		defer sub.Unsubscribe()
+		for {
+			select {
+			case log := <-logs:
+				// New log arrived, parse the event and forward to the user
+				event := new(CounterIncremented)
+				if err := _Counter.contract.UnpackLog(event, "Incremented", log); err != nil {
+					return err
+				}
+				event.Raw = log
+
+				select {
+				case sink <- event:
+				case err := <-sub.Err():
+					return err
+				case <-quit:
+					return nil
+				}
+			case err := <-sub.Err():
+				return err
+			case <-quit:
+				return nil
+			}
+		}
+	}), nil
+}
+
+// ParseIncremented is a log parse operation binding the contract event 0xda0bc8b9b52da793a50e130494716550dab510a10a485be3f1b23d4da60ff4be.
+//
+// Solidity: event Incremented(int256 count)
+func (_Counter *CounterFilterer) ParseIncremented(log types.Log) (*CounterIncremented, error) {
+	event := new(CounterIncremented)
+	if err := _Counter.contract.UnpackLog(event, "Incremented", log); err != nil {
+		return nil, err
+	}
+	event.Raw = log
+	return event, nil
+}
+
+// CounterIncrementedByUserIterator is returned from FilterIncrementedByUser and is used to iterate over the raw logs and unpacked data for IncrementedByUser events raised by the Counter contract.
+type CounterIncrementedByUserIterator struct {
+	Event *CounterIncrementedByUser // Event containing the contract specifics and raw log
+
+	contract *bind.BoundContract // Generic contract to use for unpacking event data
+	event    string              // Event name to use for unpacking event data
+
+	logs chan types.Log        // Log channel receiving the found contract events
+	sub  ethereum.Subscription // Subscription for errors, completion and termination
+	done bool                  // Whether the subscription completed delivering logs
+	fail error                 // Occurred error to stop iteration
+}
+
+// Next advances the iterator to the subsequent event, returning whether there
+// are any more events found. In case of a retrieval or parsing error, false is
+// returned and Error() can be queried for the exact failure.
+func (it *CounterIncrementedByUserIterator) Next() bool {
+	// If the iterator failed, stop iterating
+	if it.fail != nil {
+		return false
+	}
+	// If the iterator completed, deliver directly whatever's available
+	if it.done {
+		select {
+		case log := <-it.logs:
+			it.Event = new(CounterIncrementedByUser)
+			if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+				it.fail = err
+				return false
+			}
+			it.Event.Raw = log
+			return true
+
+		default:
+			return false
+		}
+	}
+	// Iterator still in progress, wait for either a data or an error event
+	select {
+	case log := <-it.logs:
+		it.Event = new(CounterIncrementedByUser)
+		if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+			it.fail = err
+			return false
+		}
+		it.Event.Raw = log
+		return true
+
+	case err := <-it.sub.Err():
+		it.done = true
+		it.fail = err
+		return it.Next()
+	}
+}
+
+// Error returns any retrieval or parsing error occurred during filtering.
+func (it *CounterIncrementedByUserIterator) Error() error {
+	return it.fail
+}
+
+// Close terminates the iteration process, releasing any pending underlying
+// resources.
+func (it *CounterIncrementedByUserIterator) Close() error {
+	it.sub.Unsubscribe()
+	return nil
+}
+
+// CounterIncrementedByUser represents a IncrementedByUser event raised by the Counter contract.
+type CounterIncrementedByUser struct {
+	User  common.Address
+	Count *big.Int
+	Raw   types.Log // Blockchain specific contextual infos
+}
+
+// FilterIncrementedByUser is a free log retrieval operation binding the contract event 0x5832be325e40e91e7a991db4415bdfa9c689e8007072fdb8de3be47757a14557.
+//
+// Solidity: event IncrementedByUser(address user, int256 count)
+func (_Counter *CounterFilterer) FilterIncrementedByUser(opts *bind.FilterOpts) (*CounterIncrementedByUserIterator, error) {
+
+	logs, sub, err := _Counter.contract.FilterLogs(opts, "IncrementedByUser")
+	if err != nil {
+		return nil, err
+	}
+	return &CounterIncrementedByUserIterator{contract: _Counter.contract, event: "IncrementedByUser", logs: logs, sub: sub}, nil
+}
+
+// WatchIncrementedByUser is a free log subscription operation binding the contract event 0x5832be325e40e91e7a991db4415bdfa9c689e8007072fdb8de3be47757a14557.
+//
+// Solidity: event IncrementedByUser(address user, int256 count)
+func (_Counter *CounterFilterer) WatchIncrementedByUser(opts *bind.WatchOpts, sink chan<- *CounterIncrementedByUser) (event.Subscription, error) {
+
+	logs, sub, err := _Counter.contract.WatchLogs(opts, "IncrementedByUser")
+	if err != nil {
+		return nil, err
+	}
+	return event.NewSubscription(func(quit <-chan struct{}) error {
+		defer sub.Unsubscribe()
+		for {
+			select {
+			case log := <-logs:
+				// New log arrived, parse the event and forward to the user
+				event := new(CounterIncrementedByUser)
+				if err := _Counter.contract.UnpackLog(event, "IncrementedByUser", log); err != nil {
+					return err
+				}
+				event.Raw = log
+
+				select {
+				case sink <- event:
+				case err := <-sub.Err():
+					return err
+				case <-quit:
+					return nil
+				}
+			case err := <-sub.Err():
+				return err
+			case <-quit:
+				return nil
+			}
+		}
+	}), nil
+}
+
+// ParseIncrementedByUser is a log parse operation binding the contract event 0x5832be325e40e91e7a991db4415bdfa9c689e8007072fdb8de3be47757a14557.
+//
+// Solidity: event IncrementedByUser(address user, int256 count)
+func (_Counter *CounterFilterer) ParseIncrementedByUser(log types.Log) (*CounterIncrementedByUser, error) {
+	event := new(CounterIncrementedByUser)
+	if err := _Counter.contract.UnpackLog(event, "IncrementedByUser", log); err != nil {
+		return nil, err
+	}
+	event.Raw = log
+	return event, nil
+}
diff --git a/ethergo/example/counter/counter.contractinfo.json b/ethergo/example/counter/counter.contractinfo.json
index 0766d19bc4..3117834c3b 100644
--- a/ethergo/example/counter/counter.contractinfo.json
+++ b/ethergo/example/counter/counter.contractinfo.json
@@ -1 +1 @@
-{"/solidity/counter.sol:Counter":{"code":"0x608060405260008055600060015534801561001957600080fd5b506102b0806100296000396000f3fe608060405234801561001057600080fd5b50600436106100675760003560e01c80639f6f1ec1116100505780639f6f1ec11461007e578063a87d942c14610094578063f5c5ad831461009c57600080fd5b80635b34b9661461006c5780636c57353514610076575b600080fd5b6100746100a4565b005b6100746100bd565b6001545b60405190815260200160405180910390f35b600054610082565b610074610151565b60016000808282546100b69190610163565b9091555050565b3373d8da6bf26964af9d7eed9e03e53415d37aa960451461013e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f4f6e6c7920566974616c696b2063616e20636f756e7420627920313000000000604482015260640160405180910390fd5b600a600160008282546100b69190610163565b60016000808282546100b691906101d7565b6000808212827f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0384138115161561019d5761019d61024b565b827f80000000000000000000000000000000000000000000000000000000000000000384128116156101d1576101d161024b565b50500190565b6000808312837f8000000000000000000000000000000000000000000000000000000000000000018312811516156102115761021161024b565b837f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0183138116156102455761024561024b565b50500390565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fdfea26469706673582212201e03c8b68dcbcef6344fae810afa5d485b33bc238c0e8cb1508114b9c0ca702964736f6c63430008040033","runtime-code":"0x608060405234801561001057600080fd5b50600436106100675760003560e01c80639f6f1ec1116100505780639f6f1ec11461007e578063a87d942c14610094578063f5c5ad831461009c57600080fd5b80635b34b9661461006c5780636c57353514610076575b600080fd5b6100746100a4565b005b6100746100bd565b6001545b60405190815260200160405180910390f35b600054610082565b610074610151565b60016000808282546100b69190610163565b9091555050565b3373d8da6bf26964af9d7eed9e03e53415d37aa960451461013e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f4f6e6c7920566974616c696b2063616e20636f756e7420627920313000000000604482015260640160405180910390fd5b600a600160008282546100b69190610163565b60016000808282546100b691906101d7565b6000808212827f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0384138115161561019d5761019d61024b565b827f80000000000000000000000000000000000000000000000000000000000000000384128116156101d1576101d161024b565b50500190565b6000808312837f8000000000000000000000000000000000000000000000000000000000000000018312811516156102115761021161024b565b837f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0183138116156102455761024561024b565b50500390565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fdfea26469706673582212201e03c8b68dcbcef6344fae810afa5d485b33bc238c0e8cb1508114b9c0ca702964736f6c63430008040033","info":{"source":"// SPDX-License-Identifier: MIT\npragma solidity ^0.8.4;\n\ncontract Counter {\n    // this is used for testing account impersonation\n    address constant VITALIK = address(0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045);\n\n    int private count = 0;\n    int private vitaikCount = 0;\n\n    function incrementCounter() public {\n        count += 1;\n    }\n    function decrementCounter() public {\n        count -= 1;\n    }\n\n    function vitalikIncrement() public {\n        require(msg.sender == VITALIK, \"Only Vitalik can count by 10\");\n        vitaikCount += 10;\n    }\n\n    function getCount() public view returns (int) {\n        return count;\n    }\n\n    function getVitalikCount() public view returns (int) {\n        return vitaikCount;\n    }\n}\n","language":"Solidity","languageVersion":"0.8.4","compilerVersion":"0.8.4","compilerOptions":"--combined-json bin,bin-runtime,srcmap,srcmap-runtime,abi,userdoc,devdoc,metadata,hashes --optimize --optimize-runs 10000 --allow-paths ., ./, ../","srcMap":"57:676:0:-:0;;;239:1;219:21;;272:1;246:27;;57:676;;;;;;;;;;;;;;;;","srcMapRuntime":"57:676:0:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;280:62;;;:::i;:::-;;415:141;;;:::i;643:88::-;713:11;;643:88;;;158:25:1;;;146:2;131:18;643:88:0;;;;;;;562:75;603:3;625:5;562:75;;347:62;;;:::i;280:::-;334:1;325:5;;:10;;;;;;;:::i;:::-;;;;-1:-1:-1;;280:62:0:o;415:141::-;468:10;169:42;468:21;460:62;;;;;;;396:2:1;460:62:0;;;378:21:1;435:2;415:18;;;408:30;474;454:18;;;447:58;522:18;;460:62:0;;;;;;;;547:2;532:11;;:17;;;;;;;:::i;347:62::-;401:1;392:5;;:10;;;;;;;:::i;551:369:1:-;590:3;625;622:1;618:11;736:1;668:66;664:74;661:1;657:82;652:2;645:10;641:99;638:2;;;743:18;;:::i;:::-;862:1;794:66;790:74;787:1;783:82;779:2;775:91;772:2;;;869:18;;:::i;:::-;-1:-1:-1;;905:9:1;;598:322::o;925:372::-;964:4;1000;997:1;993:12;1112:1;1044:66;1040:74;1037:1;1033:82;1028:2;1021:10;1017:99;1014:2;;;1119:18;;:::i;:::-;1238:1;1170:66;1166:74;1163:1;1159:82;1155:2;1151:91;1148:2;;;1245:18;;:::i;:::-;-1:-1:-1;;1282:9:1;;973:324::o;1302:184::-;1354:77;1351:1;1344:88;1451:4;1448:1;1441:15;1475:4;1472:1;1465:15","abiDefinition":[{"inputs":[],"name":"decrementCounter","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"getCount","outputs":[{"internalType":"int256","name":"","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getVitalikCount","outputs":[{"internalType":"int256","name":"","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"incrementCounter","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"vitalikIncrement","outputs":[],"stateMutability":"nonpayable","type":"function"}],"userDoc":{"kind":"user","methods":{},"version":1},"developerDoc":{"kind":"dev","methods":{},"version":1},"metadata":"{\"compiler\":{\"version\":\"0.8.4+commit.c7e474f2\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"name\":\"decrementCounter\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCount\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getVitalikCount\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"incrementCounter\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"vitalikIncrement\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}],\"devdoc\":{\"kind\":\"dev\",\"methods\":{},\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{},\"version\":1}},\"settings\":{\"compilationTarget\":{\"/solidity/counter.sol\":\"Counter\"},\"evmVersion\":\"istanbul\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":10000},\"remappings\":[]},\"sources\":{\"/solidity/counter.sol\":{\"keccak256\":\"0x42676ddc10b9e27a3896bfe453fc4e32f321449b6f1ad9bd61dc69248df0eea1\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://237ef43b2c1b1136dd04f57595b5e54f952a37bf02b0f56fc62407eea868171d\",\"dweb:/ipfs/QmZnBMnf1xZ2yfwqVjBZRR4W4CtLcLEn1FyHvRgAEShu7k\"]}},\"version\":1}"},"hashes":{"decrementCounter()":"f5c5ad83","getCount()":"a87d942c","getVitalikCount()":"9f6f1ec1","incrementCounter()":"5b34b966","vitalikIncrement()":"6c573535"}}}
\ No newline at end of file
+{"/solidity/counter.sol:Counter":{"code":"0x60a060405260008055600060015534801561001957600080fd5b5043608052608051610390610038600039600060a401526103906000f3fe608060405234801561001057600080fd5b50600436106100725760003560e01c8063a3ec191a11610050578063a3ec191a1461009f578063a87d942c146100c6578063f5c5ad83146100ce57600080fd5b80635b34b966146100775780636c573535146100815780639f6f1ec114610089575b600080fd5b61007f6100d6565b005b61007f610126565b6001545b60405190815260200160405180910390f35b61008d7f000000000000000000000000000000000000000000000000000000000000000081565b60005461008d565b61007f6101f9565b60016000808282546100e89190610243565b90915550506000546040519081527fda0bc8b9b52da793a50e130494716550dab510a10a485be3f1b23d4da60ff4be906020015b60405180910390a1565b3373d8da6bf26964af9d7eed9e03e53415d37aa96045146101a7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f4f6e6c7920566974616c696b2063616e20636f756e7420627920313000000000604482015260640160405180910390fd5b600a600160008282546101ba9190610243565b90915550506001546040805133815260208101929092527f5832be325e40e91e7a991db4415bdfa9c689e8007072fdb8de3be47757a14557910161011c565b600160008082825461020b91906102b7565b90915550506000546040519081527f22ccb5ba3d32a9221c3efe39ffab06d1ddc4bd6684975ea75fa60f95ccff53de9060200161011c565b6000808212827f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0384138115161561027d5761027d61032b565b827f80000000000000000000000000000000000000000000000000000000000000000384128116156102b1576102b161032b565b50500190565b6000808312837f8000000000000000000000000000000000000000000000000000000000000000018312811516156102f1576102f161032b565b837f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0183138116156103255761032561032b565b50500390565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fdfea2646970667358221220dd6810ba2d049a0f7354bf12f6a017744c806f0470f592679a80ef552f69b85164736f6c63430008040033","runtime-code":"0x608060405234801561001057600080fd5b50600436106100725760003560e01c8063a3ec191a11610050578063a3ec191a1461009f578063a87d942c146100c6578063f5c5ad83146100ce57600080fd5b80635b34b966146100775780636c573535146100815780639f6f1ec114610089575b600080fd5b61007f6100d6565b005b61007f610126565b6001545b60405190815260200160405180910390f35b61008d7f000000000000000000000000000000000000000000000000000000000000000081565b60005461008d565b61007f6101f9565b60016000808282546100e89190610243565b90915550506000546040519081527fda0bc8b9b52da793a50e130494716550dab510a10a485be3f1b23d4da60ff4be906020015b60405180910390a1565b3373d8da6bf26964af9d7eed9e03e53415d37aa96045146101a7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f4f6e6c7920566974616c696b2063616e20636f756e7420627920313000000000604482015260640160405180910390fd5b600a600160008282546101ba9190610243565b90915550506001546040805133815260208101929092527f5832be325e40e91e7a991db4415bdfa9c689e8007072fdb8de3be47757a14557910161011c565b600160008082825461020b91906102b7565b90915550506000546040519081527f22ccb5ba3d32a9221c3efe39ffab06d1ddc4bd6684975ea75fa60f95ccff53de9060200161011c565b6000808212827f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0384138115161561027d5761027d61032b565b827f80000000000000000000000000000000000000000000000000000000000000000384128116156102b1576102b161032b565b50500190565b6000808312837f8000000000000000000000000000000000000000000000000000000000000000018312811516156102f1576102f161032b565b837f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0183138116156103255761032561032b565b50500390565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fdfea2646970667358221220dd6810ba2d049a0f7354bf12f6a017744c806f0470f592679a80ef552f69b85164736f6c63430008040033","info":{"source":"// SPDX-License-Identifier: MIT\npragma solidity ^0.8.4;\n\ncontract Counter {\n    // this is used for testing account impersonation\n    address constant VITALIK = address(0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045);\n\n    event Incremented(int count);\n    event Decremented(int count);\n    event IncrementedByUser(address user, int count);\n\n    int private count = 0;\n    int private vitaikCount = 0;\n\n    // @dev the block the contract was deployed at\n    uint256 public immutable deployBlock;\n\n    constructor()  {\n        deployBlock = block.number;\n    }\n\n\n    function incrementCounter() public {\n        count += 1;\n        emit Incremented(count);\n    }\n    function decrementCounter() public {\n        count -= 1;\n        emit Decremented(count);\n    }\n\n    function vitalikIncrement() public {\n        require(msg.sender == VITALIK, \"Only Vitalik can count by 10\");\n        vitaikCount += 10;\n        emit IncrementedByUser(msg.sender, vitaikCount);\n    }\n\n    function getCount() public view returns (int) {\n        return count;\n    }\n\n    function getVitalikCount() public view returns (int) {\n        return vitaikCount;\n    }\n}\n","language":"Solidity","languageVersion":"0.8.4","compilerVersion":"0.8.4","compilerOptions":"--combined-json bin,bin-runtime,srcmap,srcmap-runtime,abi,userdoc,devdoc,metadata,hashes --optimize --optimize-runs 10000 --allow-paths ., ./, ../","srcMap":"57:1081:0:-:0;;;362:1;342:21;;395:1;369:27;;497:58;;;;;;;;;-1:-1:-1;536:12:0;522:26;;57:1081;;;;;;;;;;","srcMapRuntime":"57:1081:0:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;562:95;;;:::i;:::-;;763:198;;;:::i;1048:88::-;1118:11;;1048:88;;;458:25:1;;;446:2;431:18;1048:88:0;;;;;;;454:36;;;;;967:75;1008:3;1030:5;967:75;;662:95;;;:::i;562:::-;616:1;607:5;;:10;;;;;;;:::i;:::-;;;;-1:-1:-1;;644:5:0;;632:18;;458:25:1;;;632:18:0;;446:2:1;431:18;632::0;;;;;;;;562:95::o;763:198::-;816:10;169:42;816:21;808:62;;;;;;;696:2:1;808:62:0;;;678:21:1;735:2;715:18;;;708:30;774;754:18;;;747:58;822:18;;808:62:0;;;;;;;;895:2;880:11;;:17;;;;;;;:::i;:::-;;;;-1:-1:-1;;942:11:0;;912:42;;;930:10;186:74:1;;291:2;276:18;;269:34;;;;912:42:0;;159:18:1;912:42:0;141:168:1;662:95:0;716:1;707:5;;:10;;;;;;;:::i;:::-;;;;-1:-1:-1;;744:5:0;;732:18;;458:25:1;;;732:18:0;;446:2:1;431:18;732::0;413:76:1;1033:369;1072:3;1107;1104:1;1100:11;1218:1;1150:66;1146:74;1143:1;1139:82;1134:2;1127:10;1123:99;1120:2;;;1225:18;;:::i;:::-;1344:1;1276:66;1272:74;1269:1;1265:82;1261:2;1257:91;1254:2;;;1351:18;;:::i;:::-;-1:-1:-1;;1387:9:1;;1080:322::o;1407:372::-;1446:4;1482;1479:1;1475:12;1594:1;1526:66;1522:74;1519:1;1515:82;1510:2;1503:10;1499:99;1496:2;;;1601:18;;:::i;:::-;1720:1;1652:66;1648:74;1645:1;1641:82;1637:2;1633:91;1630:2;;;1727:18;;:::i;:::-;-1:-1:-1;;1764:9:1;;1455:324::o;1784:184::-;1836:77;1833:1;1826:88;1933:4;1930:1;1923:15;1957:4;1954:1;1947:15","abiDefinition":[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"int256","name":"count","type":"int256"}],"name":"Decremented","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"int256","name":"count","type":"int256"}],"name":"Incremented","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"user","type":"address"},{"indexed":false,"internalType":"int256","name":"count","type":"int256"}],"name":"IncrementedByUser","type":"event"},{"inputs":[],"name":"decrementCounter","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"deployBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getCount","outputs":[{"internalType":"int256","name":"","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getVitalikCount","outputs":[{"internalType":"int256","name":"","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"incrementCounter","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"vitalikIncrement","outputs":[],"stateMutability":"nonpayable","type":"function"}],"userDoc":{"kind":"user","methods":{},"version":1},"developerDoc":{"kind":"dev","methods":{},"version":1},"metadata":"{\"compiler\":{\"version\":\"0.8.4+commit.c7e474f2\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"int256\",\"name\":\"count\",\"type\":\"int256\"}],\"name\":\"Decremented\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"int256\",\"name\":\"count\",\"type\":\"int256\"}],\"name\":\"Incremented\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"user\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"int256\",\"name\":\"count\",\"type\":\"int256\"}],\"name\":\"IncrementedByUser\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"decrementCounter\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"deployBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCount\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getVitalikCount\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"incrementCounter\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"vitalikIncrement\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}],\"devdoc\":{\"kind\":\"dev\",\"methods\":{},\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{},\"version\":1}},\"settings\":{\"compilationTarget\":{\"/solidity/counter.sol\":\"Counter\"},\"evmVersion\":\"istanbul\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":10000},\"remappings\":[]},\"sources\":{\"/solidity/counter.sol\":{\"keccak256\":\"0x390b53ff5f95e07097f3bde2e637f0987013723a5694b49068f9f3cf9c6638a9\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://2343068f1a3e9757b578ce9c0e3fa8c6a447f9ae2986b381c25960ab07c4fec8\",\"dweb:/ipfs/QmXLh2vW9mvc8Zfe56eoJzpYkx9fUByeTVwNMDApCKcraH\"]}},\"version\":1}"},"hashes":{"decrementCounter()":"f5c5ad83","deployBlock()":"a3ec191a","getCount()":"a87d942c","getVitalikCount()":"9f6f1ec1","incrementCounter()":"5b34b966","vitalikIncrement()":"6c573535"}}}
\ No newline at end of file
diff --git a/ethergo/example/counter/counter.sol b/ethergo/example/counter/counter.sol
index 581c3ceca8..49da60c0e9 100644
--- a/ethergo/example/counter/counter.sol
+++ b/ethergo/example/counter/counter.sol
@@ -5,19 +5,34 @@ contract Counter {
     // this is used for testing account impersonation
     address constant VITALIK = address(0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045);
 
+    event Incremented(int count);
+    event Decremented(int count);
+    event IncrementedByUser(address user, int count);
+
     int private count = 0;
     int private vitaikCount = 0;
 
+    // @dev the block the contract was deployed at
+    uint256 public immutable deployBlock;
+
+    constructor()  {
+        deployBlock = block.number;
+    }
+
+
     function incrementCounter() public {
         count += 1;
+        emit Incremented(count);
     }
     function decrementCounter() public {
         count -= 1;
+        emit Decremented(count);
     }
 
     function vitalikIncrement() public {
         require(msg.sender == VITALIK, "Only Vitalik can count by 10");
         vitaikCount += 10;
+        emit IncrementedByUser(msg.sender, vitaikCount);
     }
 
     function getCount() public view returns (int) {
diff --git a/ethergo/example/deploymanager.go b/ethergo/example/deploymanager.go
new file mode 100644
index 0000000000..8f86031b7d
--- /dev/null
+++ b/ethergo/example/deploymanager.go
@@ -0,0 +1,30 @@
+package example
+
+import (
+	"context"
+	"github.com/synapsecns/sanguine/ethergo/backends"
+	"github.com/synapsecns/sanguine/ethergo/contracts"
+	"github.com/synapsecns/sanguine/ethergo/example/counter"
+	"github.com/synapsecns/sanguine/ethergo/manager"
+	"testing"
+)
+
+// DeployManager wraps DeployManager and allows typed contract handles to be returned.
+type DeployManager struct {
+	*manager.DeployerManager
+}
+
+// NewDeployManager creates a new DeployManager.
+func NewDeployManager(t *testing.T) *DeployManager {
+	t.Helper()
+
+	parentManager := manager.NewDeployerManager(t, NewCounterDeployer)
+	return &DeployManager{parentManager}
+}
+
+// GetCounter gets the pre-created counter.
+func (d *DeployManager) GetCounter(ctx context.Context, backend backends.SimulatedTestBackend) (contract contracts.DeployedContract, handle *counter.CounterRef) {
+	d.T().Helper()
+
+	return manager.GetContract[*counter.CounterRef](ctx, d.T(), d, backend, CounterType)
+}
diff --git a/ethergo/listener/db/doc.go b/ethergo/listener/db/doc.go
new file mode 100644
index 0000000000..cf130b4543
--- /dev/null
+++ b/ethergo/listener/db/doc.go
@@ -0,0 +1,2 @@
+// Package db provides the database layer for the chain listener.
+package db
diff --git a/ethergo/listener/db/service.go b/ethergo/listener/db/service.go
new file mode 100644
index 0000000000..fb2e7d922c
--- /dev/null
+++ b/ethergo/listener/db/service.go
@@ -0,0 +1,41 @@
+package db
+
+import (
+	"context"
+	"gorm.io/gorm"
+	"time"
+)
+
+// ChainListenerDB is the interface for the chain listener database.
+type ChainListenerDB interface {
+	// PutLatestBlock upsers the latest block on a given chain id to be new height.
+	PutLatestBlock(ctx context.Context, chainID, height uint64) error
+	// LatestBlockForChain gets the latest block for a given chain id.
+	// will return ErrNoLatestBlockForChainID if no block exists for the chain.
+	LatestBlockForChain(ctx context.Context, chainID uint64) (uint64, error)
+}
+
+// LastIndexed is used to make sure we haven't missed any events while offline.
+// since we event source - rather than use a state machine this is needed to make sure we haven't missed any events
+// by allowing us to go back and source any events we may have missed.
+//
+// this does not inherit from gorm.model to allow us to use ChainID as a primary key.
+type LastIndexed struct {
+	// CreatedAt is the creation time
+	CreatedAt time.Time
+	// UpdatedAt is the update time
+	UpdatedAt time.Time
+	// DeletedAt time
+	DeletedAt gorm.DeletedAt `gorm:"index"`
+	// ChainID is the chain id of the chain we're watching blocks on. This is our primary index.
+	ChainID uint64 `gorm:"column:chain_id;primaryKey;autoIncrement:false"`
+	// BlockHeight is the highest height we've seen on the chain
+	BlockNumber int `gorm:"block_number"`
+}
+
+// GetAllModels gets all models to migrate
+// see: https://medium.com/@SaifAbid/slice-interfaces-8c78f8b6345d for an explanation of why we can't do this at initialization time
+func GetAllModels() (allModels []interface{}) {
+	allModels = []interface{}{&LastIndexed{}}
+	return allModels
+}
diff --git a/ethergo/listener/db/store.go b/ethergo/listener/db/store.go
new file mode 100644
index 0000000000..396d2177e7
--- /dev/null
+++ b/ethergo/listener/db/store.go
@@ -0,0 +1,71 @@
+package db
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"github.com/synapsecns/sanguine/core/dbcommon"
+	"github.com/synapsecns/sanguine/core/metrics"
+	"gorm.io/gorm"
+	"gorm.io/gorm/clause"
+)
+
+// NewChainListenerStore creates a new transaction store.
+func NewChainListenerStore(db *gorm.DB, metrics metrics.Handler) *Store {
+	return &Store{
+		db:      db,
+		metrics: metrics,
+	}
+}
+
+// Store is the sqlite store. It extends the base store for sqlite specific queries.
+type Store struct {
+	db      *gorm.DB
+	metrics metrics.Handler
+}
+
+// PutLatestBlock upserts the latest block into the database.
+func (s Store) PutLatestBlock(ctx context.Context, chainID, height uint64) error {
+	tx := s.db.WithContext(ctx).Clauses(clause.OnConflict{
+		Columns:   []clause.Column{{Name: chainIDFieldName}},
+		DoUpdates: clause.AssignmentColumns([]string{chainIDFieldName, blockNumberFieldName}),
+	}).Create(&LastIndexed{
+		ChainID:     chainID,
+		BlockNumber: int(height),
+	})
+
+	if tx.Error != nil {
+		return fmt.Errorf("could not block updated: %w", tx.Error)
+	}
+	return nil
+}
+
+// LatestBlockForChain gets the latest block for a chain.
+func (s Store) LatestBlockForChain(ctx context.Context, chainID uint64) (uint64, error) {
+	blockWatchModel := LastIndexed{ChainID: chainID}
+	err := s.db.WithContext(ctx).First(&blockWatchModel).Error
+	if err != nil {
+		if errors.Is(err, gorm.ErrRecordNotFound) {
+			return 0, ErrNoLatestBlockForChainID
+		}
+		return 0, fmt.Errorf("could not fetch latest block: %w", err)
+	}
+
+	return uint64(blockWatchModel.BlockNumber), nil
+}
+
+func init() {
+	namer := dbcommon.NewNamer(GetAllModels())
+	chainIDFieldName = namer.GetConsistentName("ChainID")
+	blockNumberFieldName = namer.GetConsistentName("BlockNumber")
+}
+
+var (
+	// chainIDFieldName gets the chain id field name.
+	chainIDFieldName string
+	// blockNumberFieldName is the name of the block number field.
+	blockNumberFieldName string
+)
+
+// ErrNoLatestBlockForChainID is returned when no block exists for the chain.
+var ErrNoLatestBlockForChainID = errors.New("no latest block for chainId")
diff --git a/services/rfq/relayer/listener/doc.go b/ethergo/listener/doc.go
similarity index 100%
rename from services/rfq/relayer/listener/doc.go
rename to ethergo/listener/doc.go
diff --git a/services/rfq/relayer/listener/export_test.go b/ethergo/listener/export_test.go
similarity index 65%
rename from services/rfq/relayer/listener/export_test.go
rename to ethergo/listener/export_test.go
index 060e7611c1..16c1a3faf1 100644
--- a/services/rfq/relayer/listener/export_test.go
+++ b/ethergo/listener/export_test.go
@@ -5,8 +5,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/synapsecns/sanguine/core/metrics"
 	"github.com/synapsecns/sanguine/ethergo/client"
-	"github.com/synapsecns/sanguine/services/rfq/contracts/fastbridge"
-	"github.com/synapsecns/sanguine/services/rfq/relayer/reldb"
+	"github.com/synapsecns/sanguine/ethergo/listener/db"
 )
 
 // TestChainListener wraps chain listener for testing.
@@ -21,18 +20,19 @@ func (c chainListener) GetMetadata(ctx context.Context) (startBlock, chainID uin
 }
 
 type TestChainListenerArgs struct {
-	Address  common.Address
-	Client   client.EVM
-	Contract *fastbridge.FastBridgeRef
-	Store    reldb.Service
-	Handler  metrics.Handler
+	Address      common.Address
+	InitialBlock uint64
+	Client       client.EVM
+	Store        db.ChainListenerDB
+	Handler      metrics.Handler
 }
 
 func NewTestChainListener(args TestChainListenerArgs) TestChainListener {
 	return &chainListener{
-		client:   args.Client,
-		contract: args.Contract,
-		store:    args.Store,
-		handler:  args.Handler,
+		client:       args.Client,
+		address:      args.Address,
+		initialBlock: args.InitialBlock,
+		store:        args.Store,
+		handler:      args.Handler,
 	}
 }
diff --git a/services/rfq/relayer/listener/listener.go b/ethergo/listener/listener.go
similarity index 82%
rename from services/rfq/relayer/listener/listener.go
rename to ethergo/listener/listener.go
index a876edf31a..3a9fa00749 100644
--- a/services/rfq/relayer/listener/listener.go
+++ b/ethergo/listener/listener.go
@@ -4,21 +4,20 @@ import (
 	"context"
 	"errors"
 	"fmt"
+	db2 "github.com/synapsecns/sanguine/ethergo/listener/db"
+	"math/big"
+	"time"
+
 	"github.com/ethereum/go-ethereum"
-	"github.com/ethereum/go-ethereum/accounts/abi/bind"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ipfs/go-log"
 	"github.com/jpillora/backoff"
 	"github.com/synapsecns/sanguine/core/metrics"
 	"github.com/synapsecns/sanguine/ethergo/client"
-	"github.com/synapsecns/sanguine/services/rfq/contracts/fastbridge"
-	"github.com/synapsecns/sanguine/services/rfq/relayer/reldb"
 	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/trace"
 	"golang.org/x/sync/errgroup"
-	"math/big"
-	"time"
 )
 
 // ContractListener listens for chain events and calls HandleLog.
@@ -37,11 +36,12 @@ type ContractListener interface {
 type HandleLog func(ctx context.Context, log types.Log) error
 
 type chainListener struct {
-	client   client.EVM
-	contract *fastbridge.FastBridgeRef
-	store    reldb.Service
-	handler  metrics.Handler
-	backoff  *backoff.Backoff
+	client       client.EVM
+	address      common.Address
+	initialBlock uint64
+	store        db2.ChainListenerDB
+	handler      metrics.Handler
+	backoff      *backoff.Backoff
 	// IMPORTANT! These fields cannot be used until they has been set. They are NOT
 	// set in the constructor
 	startBlock, chainID, latestBlock uint64
@@ -49,21 +49,21 @@ type chainListener struct {
 	// latestBlock         uint64
 }
 
-var logger = log.Logger("chainlistener-logger")
+var (
+	logger = log.Logger("chainlistener-logger")
+	// ErrNoLatestBlockForChainID is returned when no block exists for the chain.
+	ErrNoLatestBlockForChainID = db2.ErrNoLatestBlockForChainID
+)
 
 // NewChainListener creates a new chain listener.
-func NewChainListener(omnirpcClient client.EVM, store reldb.Service, address common.Address, handler metrics.Handler) (ContractListener, error) {
-	fastBridge, err := fastbridge.NewFastBridgeRef(address, omnirpcClient)
-	if err != nil {
-		return nil, fmt.Errorf("could not create fast bridge contract: %w", err)
-	}
-
+func NewChainListener(omnirpcClient client.EVM, store db2.ChainListenerDB, address common.Address, initialBlock uint64, handler metrics.Handler) (ContractListener, error) {
 	return &chainListener{
-		handler:  handler,
-		store:    store,
-		client:   omnirpcClient,
-		contract: fastBridge,
-		backoff:  newBackoffConfig(),
+		handler:      handler,
+		address:      address,
+		initialBlock: initialBlock,
+		store:        store,
+		client:       omnirpcClient,
+		backoff:      newBackoffConfig(),
 	}, nil
 }
 
@@ -91,13 +91,12 @@ func (c *chainListener) Listen(ctx context.Context, handler HandleLog) (err erro
 			if err != nil {
 				logger.Warn(err)
 			}
-
 		}
 	}
 }
 
 func (c *chainListener) Address() common.Address {
-	return c.contract.Address()
+	return c.address
 }
 
 func (c *chainListener) LatestBlock() uint64 {
@@ -125,9 +124,8 @@ func (c *chainListener) doPoll(parentCtx context.Context, handler HandleLog) (er
 	}
 
 	// Check if latest block is the same as start block (for chains with slow block times)
-
 	if c.latestBlock == c.startBlock {
-		return
+		return nil
 	}
 
 	// Handle if the listener is more than one get logs range behind the head
@@ -164,7 +162,7 @@ func (c *chainListener) doPoll(parentCtx context.Context, handler HandleLog) (er
 }
 
 func (c chainListener) getMetadata(parentCtx context.Context) (startBlock, chainID uint64, err error) {
-	var deployBlock, lastIndexed uint64
+	var lastIndexed uint64
 	ctx, span := c.handler.Tracer().Start(parentCtx, "getMetadata")
 
 	defer func() {
@@ -174,16 +172,6 @@ func (c chainListener) getMetadata(parentCtx context.Context) (startBlock, chain
 	// TODO: consider some kind of backoff here in case rpcs are down at boot.
 	// this becomes more of an issue as we add more chains
 	g, ctx := errgroup.WithContext(ctx)
-	g.Go(func() error {
-		deployBlock, err := c.contract.DeployBlock(&bind.CallOpts{Context: ctx})
-		if err != nil {
-			return fmt.Errorf("could not get deploy block: %w", err)
-		}
-
-		startBlock = deployBlock.Uint64()
-		return nil
-	})
-
 	g.Go(func() error {
 		// TODO: one thing I've been going back and forth on is whether or not this method should be chain aware
 		// passing in the chain ID would allow us to pull everything directly from the config, but be less testable
@@ -197,7 +185,7 @@ func (c chainListener) getMetadata(parentCtx context.Context) (startBlock, chain
 		chainID = rpcChainID.Uint64()
 
 		lastIndexed, err = c.store.LatestBlockForChain(ctx, chainID)
-		if errors.Is(err, reldb.ErrNoLatestBlockForChainID) {
+		if errors.Is(err, ErrNoLatestBlockForChainID) {
 			// TODO: consider making this negative 1, requires type change
 			lastIndexed = 0
 			return nil
@@ -213,8 +201,10 @@ func (c chainListener) getMetadata(parentCtx context.Context) (startBlock, chain
 		return 0, 0, fmt.Errorf("could not get metadata: %w", err)
 	}
 
-	if lastIndexed > deployBlock {
+	if lastIndexed > c.startBlock {
 		startBlock = lastIndexed
+	} else {
+		startBlock = c.initialBlock
 	}
 
 	return startBlock, chainID, nil
@@ -233,6 +223,6 @@ func (c chainListener) buildFilterQuery(fromBlock, toBlock uint64) ethereum.Filt
 	return ethereum.FilterQuery{
 		FromBlock: new(big.Int).SetUint64(fromBlock),
 		ToBlock:   new(big.Int).SetUint64(toBlock),
-		Addresses: []common.Address{c.contract.Address()},
+		Addresses: []common.Address{c.address},
 	}
 }
diff --git a/ethergo/listener/listener_test.go b/ethergo/listener/listener_test.go
new file mode 100644
index 0000000000..24b314afa9
--- /dev/null
+++ b/ethergo/listener/listener_test.go
@@ -0,0 +1,58 @@
+package listener_test
+
+import (
+	"context"
+	"github.com/ethereum/go-ethereum/accounts/abi/bind"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/synapsecns/sanguine/ethergo/listener"
+	"sync"
+)
+
+func (l *ListenerTestSuite) TestListenForEvents() {
+	_, handle := l.manager.GetCounter(l.GetTestContext(), l.backend)
+	var wg sync.WaitGroup
+	const iterations = 10
+	for i := 0; i < iterations; i++ {
+		i := i
+		wg.Add(1)
+		go func(_ int) {
+			defer wg.Done()
+
+			auth := l.backend.GetTxContext(l.GetTestContext(), nil)
+
+			//nolint:typecheck
+			bridgeRequestTX, err := handle.IncrementCounter(auth.TransactOpts)
+			l.NoError(err)
+			l.NotNil(bridgeRequestTX)
+
+			l.backend.WaitForConfirmation(l.GetTestContext(), bridgeRequestTX)
+
+			bridgeResponseTX, err := handle.DecrementCounter(auth.TransactOpts)
+			l.NoError(err)
+			l.NotNil(bridgeResponseTX)
+			l.backend.WaitForConfirmation(l.GetTestContext(), bridgeResponseTX)
+		}(i)
+	}
+
+	wg.Wait()
+
+	startBlock, err := handle.DeployBlock(&bind.CallOpts{Context: l.GetTestContext()})
+	l.NoError(err)
+
+	cl, err := listener.NewChainListener(l.backend, l.store, handle.Address(), uint64(startBlock.Int64()), l.metrics)
+	l.NoError(err)
+
+	eventCount := 0
+
+	// TODO: check for timeout,but it will be extremely obvious if it gets hit.
+	listenCtx, cancel := context.WithCancel(l.GetTestContext())
+	_ = cl.Listen(listenCtx, func(ctx context.Context, log types.Log) error {
+		eventCount++
+
+		if eventCount == iterations*2 {
+			cancel()
+		}
+
+		return nil
+	})
+}
diff --git a/ethergo/listener/suite_test.go b/ethergo/listener/suite_test.go
new file mode 100644
index 0000000000..de38f6594e
--- /dev/null
+++ b/ethergo/listener/suite_test.go
@@ -0,0 +1,157 @@
+package listener_test
+
+import (
+	"context"
+	"fmt"
+	"github.com/brianvoe/gofakeit/v6"
+	"github.com/ipfs/go-log"
+	common_base "github.com/synapsecns/sanguine/core/dbcommon"
+	"github.com/synapsecns/sanguine/ethergo/example"
+	"github.com/synapsecns/sanguine/ethergo/example/counter"
+	"github.com/synapsecns/sanguine/ethergo/listener"
+	db2 "github.com/synapsecns/sanguine/ethergo/listener/db"
+	"gorm.io/gorm"
+	"gorm.io/gorm/schema"
+	"math/big"
+	"os"
+	"testing"
+	"time"
+
+	"github.com/Flaque/filet"
+	"github.com/ethereum/go-ethereum/accounts/abi/bind"
+	"github.com/stretchr/testify/suite"
+	"github.com/synapsecns/sanguine/core/metrics"
+	"github.com/synapsecns/sanguine/core/testsuite"
+	"github.com/synapsecns/sanguine/ethergo/backends"
+	"github.com/synapsecns/sanguine/ethergo/backends/geth"
+	"gorm.io/driver/sqlite"
+)
+
+const chainID = 10
+
+type ListenerTestSuite struct {
+	*testsuite.TestSuite
+	manager *example.DeployManager
+	backend backends.SimulatedTestBackend
+	store   db2.ChainListenerDB
+	metrics metrics.Handler
+	counter *counter.CounterRef
+}
+
+func NewListenerSuite(tb testing.TB) *ListenerTestSuite {
+	tb.Helper()
+
+	return &ListenerTestSuite{
+		TestSuite: testsuite.NewTestSuite(tb),
+	}
+}
+
+func TestListenerSuite(t *testing.T) {
+	suite.Run(t, NewListenerSuite(t))
+}
+
+func (l *ListenerTestSuite) SetupTest() {
+	l.TestSuite.SetupTest()
+
+	l.manager = example.NewDeployManager(l.T())
+	l.backend = geth.NewEmbeddedBackendForChainID(l.GetTestContext(), l.T(), big.NewInt(chainID))
+	var err error
+	l.metrics = metrics.NewNullHandler()
+	l.store, err = NewSqliteStore(l.GetTestContext(), filet.TmpDir(l.T(), ""), l.metrics)
+	l.Require().NoError(err)
+
+	_, l.counter = l.manager.GetCounter(l.GetTestContext(), l.backend)
+}
+
+func (l *ListenerTestSuite) TestGetMetadataNoStore() {
+	deployBlock, err := l.counter.DeployBlock(&bind.CallOpts{Context: l.GetTestContext()})
+	l.NoError(err)
+
+	// nothing stored, should use start block
+	cl := listener.NewTestChainListener(listener.TestChainListenerArgs{
+		Address:      l.counter.Address(),
+		InitialBlock: deployBlock.Uint64(),
+		Client:       l.backend,
+		Store:        l.store,
+		Handler:      l.metrics,
+	})
+
+	startBlock, myChainID, err := cl.GetMetadata(l.GetTestContext())
+	l.NoError(err)
+	l.Equal(myChainID, uint64(chainID))
+	l.Equal(startBlock, deployBlock.Uint64())
+}
+
+func (l *ListenerTestSuite) TestStartBlock() {
+	cl := listener.NewTestChainListener(listener.TestChainListenerArgs{
+		Address: l.counter.Address(),
+		Client:  l.backend,
+		Store:   l.store,
+		Handler: l.metrics,
+	})
+
+	deployBlock, err := l.counter.DeployBlock(&bind.CallOpts{Context: l.GetTestContext()})
+	l.NoError(err)
+
+	expectedLastIndexed := deployBlock.Uint64() + 10
+	err = l.store.PutLatestBlock(l.GetTestContext(), chainID, expectedLastIndexed)
+	l.NoError(err)
+
+	startBlock, cid, err := cl.GetMetadata(l.GetTestContext())
+	l.NoError(err)
+	l.Equal(cid, uint64(chainID))
+	l.Equal(startBlock, expectedLastIndexed)
+}
+
+func (l *ListenerTestSuite) TestListen() {
+
+}
+
+// NewSqliteStore creates a new sqlite data store.
+func NewSqliteStore(parentCtx context.Context, dbPath string, handler metrics.Handler) (_ *db2.Store, err error) {
+	logger := log.Logger("sqlite-store")
+
+	logger.Debugf("creating sqlite store at %s", dbPath)
+
+	ctx, span := handler.Tracer().Start(parentCtx, "start-sqlite")
+	defer func() {
+		metrics.EndSpanWithErr(span, err)
+	}()
+
+	// create the directory to the store if it doesn't exist
+	err = os.MkdirAll(dbPath, os.ModePerm)
+	if err != nil {
+		return nil, fmt.Errorf("could not create sqlite store")
+	}
+
+	logger.Warnf("submitter database is at %s/synapse.db", dbPath)
+
+	namingStrategy := schema.NamingStrategy{
+		TablePrefix: fmt.Sprintf("test%d_%d_", gofakeit.Int64(), time.Now().Unix()),
+	}
+
+	gdb, err := gorm.Open(sqlite.Open(fmt.Sprintf("%s/%s", dbPath, "synapse.db")), &gorm.Config{
+		DisableForeignKeyConstraintWhenMigrating: true,
+		Logger:                                   common_base.GetGormLogger(logger),
+		FullSaveAssociations:                     true,
+		SkipDefaultTransaction:                   true,
+		NamingStrategy:                           namingStrategy,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("could not connect to db %s: %w", dbPath, err)
+	}
+
+	err = gdb.AutoMigrate(&db2.LastIndexed{})
+	if err != nil {
+		return nil, fmt.Errorf("could not migrate models: %w", err)
+	}
+
+	handler.AddGormCallbacks(gdb)
+
+	err = gdb.WithContext(ctx).AutoMigrate(db2.GetAllModels()...)
+
+	if err != nil {
+		return nil, fmt.Errorf("could not migrate models: %w", err)
+	}
+	return db2.NewChainListenerStore(gdb, handler), nil
+}
diff --git a/make/repo.Makefile b/make/repo.Makefile
index 82eb90ad48..67f889f9d0 100644
--- a/make/repo.Makefile
+++ b/make/repo.Makefile
@@ -30,6 +30,9 @@ tidy: ## Runs go mod tidy on all go.mod files in the repo
 	go work sync
 	$(GIT_ROOT)/make/scripts/tidy.sh
 
+lint-go: ## Runs make lint in all go.mod files in the repo.
+	$(GIT_ROOT)/make/scripts/lint.sh
+
 docker-clean: ## stops and removes all containers at once
 	docker ps -aq | xargs docker stop | xargs docker rm
 	docker network prune
diff --git a/make/scripts/lint.sh b/make/scripts/lint.sh
new file mode 100755
index 0000000000..ce08255ac4
--- /dev/null
+++ b/make/scripts/lint.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+set -e
+find . -name go.mod -print0 | while IFS= read -r -d '' f; do
+  echo "linting $(dirname "$f")"
+  (cd "$(dirname "$f")" || exit; go mod tidy)
+done
diff --git a/packages/explorer-ui/CHANGELOG.md b/packages/explorer-ui/CHANGELOG.md
index 2edad23376..a06eebcbf6 100644
--- a/packages/explorer-ui/CHANGELOG.md
+++ b/packages/explorer-ui/CHANGELOG.md
@@ -3,6 +3,14 @@
 All notable changes to this project will be documented in this file.
 See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
 
+## [0.1.37](https://github.com/synapsecns/sanguine/compare/@synapsecns/explorer-ui@0.1.36...@synapsecns/explorer-ui@0.1.37) (2024-03-02)
+
+**Note:** Version bump only for package @synapsecns/explorer-ui
+
+
+
+
+
 ## [0.1.36](https://github.com/synapsecns/sanguine/compare/@synapsecns/explorer-ui@0.1.35...@synapsecns/explorer-ui@0.1.36) (2024-02-13)
 
 **Note:** Version bump only for package @synapsecns/explorer-ui
diff --git a/packages/explorer-ui/graphql/queries/index.ts b/packages/explorer-ui/graphql/queries/index.ts
index c5c588e50a..227d739766 100644
--- a/packages/explorer-ui/graphql/queries/index.ts
+++ b/packages/explorer-ui/graphql/queries/index.ts
@@ -208,6 +208,7 @@ export const DAILY_STATISTICS_BY_CHAIN = gql`
       canto
       dogechain
       base
+      blast
       total
     }
   }
diff --git a/packages/explorer-ui/package.json b/packages/explorer-ui/package.json
index f4ce04d77c..a25e064358 100644
--- a/packages/explorer-ui/package.json
+++ b/packages/explorer-ui/package.json
@@ -1,6 +1,6 @@
 {
   "name": "@synapsecns/explorer-ui",
-  "version": "0.1.36",
+  "version": "0.1.37",
   "private": true,
   "engines": {
     "node": ">=16.0.0"
diff --git a/packages/rest-api/CHANGELOG.md b/packages/rest-api/CHANGELOG.md
index e13f9defbc..fd32596587 100644
--- a/packages/rest-api/CHANGELOG.md
+++ b/packages/rest-api/CHANGELOG.md
@@ -3,6 +3,14 @@
 All notable changes to this project will be documented in this file.
 See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
 
+## [1.0.56](https://github.com/synapsecns/sanguine/compare/@synapsecns/rest-api@1.0.55...@synapsecns/rest-api@1.0.56) (2024-03-04)
+
+**Note:** Version bump only for package @synapsecns/rest-api
+
+
+
+
+
 ## [1.0.55](https://github.com/synapsecns/sanguine/compare/@synapsecns/rest-api@1.0.54...@synapsecns/rest-api@1.0.55) (2024-03-01)
 
 **Note:** Version bump only for package @synapsecns/rest-api
diff --git a/packages/rest-api/package.json b/packages/rest-api/package.json
index 553afef17b..114d053a09 100644
--- a/packages/rest-api/package.json
+++ b/packages/rest-api/package.json
@@ -1,6 +1,6 @@
 {
   "name": "@synapsecns/rest-api",
-  "version": "1.0.55",
+  "version": "1.0.56",
   "private": "true",
   "engines": {
     "node": ">=16.0.0"
@@ -23,7 +23,7 @@
     "@ethersproject/bignumber": "^5.7.0",
     "@ethersproject/providers": "^5.7.2",
     "@ethersproject/units": "5.7.0",
-    "@synapsecns/sdk-router": "^0.3.27",
+    "@synapsecns/sdk-router": "^0.3.28",
     "bignumber": "^1.1.0",
     "ethers": "5.7.2",
     "express": "^4.18.2",
diff --git a/packages/sdk-router/CHANGELOG.md b/packages/sdk-router/CHANGELOG.md
index 725f40155e..ef97622853 100644
--- a/packages/sdk-router/CHANGELOG.md
+++ b/packages/sdk-router/CHANGELOG.md
@@ -3,6 +3,14 @@
 All notable changes to this project will be documented in this file.
 See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
 
+## [0.3.28](https://github.com/synapsecns/sanguine/compare/@synapsecns/sdk-router@0.3.27...@synapsecns/sdk-router@0.3.28) (2024-03-04)
+
+**Note:** Version bump only for package @synapsecns/sdk-router
+
+
+
+
+
 ## [0.3.27](https://github.com/synapsecns/sanguine/compare/@synapsecns/sdk-router@0.3.26...@synapsecns/sdk-router@0.3.27) (2024-03-01)
 
 **Note:** Version bump only for package @synapsecns/sdk-router
diff --git a/packages/sdk-router/package.json b/packages/sdk-router/package.json
index 8b07540b84..4372638823 100644
--- a/packages/sdk-router/package.json
+++ b/packages/sdk-router/package.json
@@ -1,7 +1,7 @@
 {
   "name": "@synapsecns/sdk-router",
   "description": "An SDK for interacting with the Synapse Protocol",
-  "version": "0.3.27",
+  "version": "0.3.28",
   "license": "MIT",
   "main": "dist/index.js",
   "typings": "dist/index.d.ts",
diff --git a/packages/sdk-router/src/constants/addresses.ts b/packages/sdk-router/src/constants/addresses.ts
index 74b49c3170..42f4e02443 100644
--- a/packages/sdk-router/src/constants/addresses.ts
+++ b/packages/sdk-router/src/constants/addresses.ts
@@ -2,6 +2,7 @@ import {
   CCTP_SUPPORTED_CHAIN_IDS,
   RFQ_SUPPORTED_CHAIN_IDS,
   SUPPORTED_CHAIN_IDS,
+  SupportedChainId,
 } from './chainIds'
 
 export type AddressMap = {
@@ -33,8 +34,9 @@ const generateAddressMap = (
  * SynapseRouter contract address for all chains except ones from ROUTER_EXCEPTION_MAP.
  */
 const ROUTER_ADDRESS = '0x7E7A0e201FD38d3ADAA9523Da6C109a07118C96a'
-const ROUTER_EXCEPTION_MAP: AddressMap = {}
-
+const ROUTER_EXCEPTION_MAP: AddressMap = {
+  [SupportedChainId.BLAST]: '0x0000000000365b1d5B142732CF4d33BcddED21Fc',
+}
 export const ROUTER_ADDRESS_MAP: AddressMap = generateAddressMap(
   SUPPORTED_CHAIN_IDS,
   ROUTER_ADDRESS,
@@ -46,7 +48,6 @@ export const ROUTER_ADDRESS_MAP: AddressMap = generateAddressMap(
  */
 const CCTP_ROUTER_ADDRESS = '0xd5a597d6e7ddf373a92C8f477DAAA673b0902F48'
 const CCTP_ROUTER_EXCEPTION_MAP: AddressMap = {}
-
 export const CCTP_ROUTER_ADDRESS_MAP: AddressMap = generateAddressMap(
   CCTP_SUPPORTED_CHAIN_IDS,
   CCTP_ROUTER_ADDRESS,
diff --git a/packages/solidity-devops/CHANGELOG.md b/packages/solidity-devops/CHANGELOG.md
index 01143ed87b..ed857b4e92 100644
--- a/packages/solidity-devops/CHANGELOG.md
+++ b/packages/solidity-devops/CHANGELOG.md
@@ -3,6 +3,26 @@
 All notable changes to this project will be documented in this file.
 See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
 
+## [0.1.5](https://github.com/synapsecns/sanguine/compare/@synapsecns/solidity-devops@0.2.0...@synapsecns/solidity-devops@0.1.5) (2024-03-06)
+
+
+### Reverts
+
+* Revert "feat(solidity-devops): print init code hash before CREATE2 deployment" ([8b842f8](https://github.com/synapsecns/sanguine/commit/8b842f8fbdc036d647a9fc4eb668b01d9d03aa6b))
+* Revert "Publish" ([ed1de44](https://github.com/synapsecns/sanguine/commit/ed1de4437ae4426c929b514b06116ea624311465))
+
+
+
+
+
+## [0.1.4](https://github.com/synapsecns/sanguine/compare/@synapsecns/solidity-devops@0.1.3...@synapsecns/solidity-devops@0.1.4) (2024-03-04)
+
+**Note:** Version bump only for package @synapsecns/solidity-devops
+
+
+
+
+
 ## [0.1.3](https://github.com/synapsecns/sanguine/compare/@synapsecns/solidity-devops@0.1.2...@synapsecns/solidity-devops@0.1.3) (2024-03-01)
 
 **Note:** Version bump only for package @synapsecns/solidity-devops
diff --git a/packages/solidity-devops/js/forgeScriptRun.js b/packages/solidity-devops/js/forgeScriptRun.js
index b185f0f97a..dee50e3906 100755
--- a/packages/solidity-devops/js/forgeScriptRun.js
+++ b/packages/solidity-devops/js/forgeScriptRun.js
@@ -11,6 +11,7 @@ const {
 } = require('./utils/deployments.js')
 const { loadEnv } = require('./utils/env.js')
 const { forgeScript } = require('./utils/forge.js')
+const { logInfo } = require('./utils/logger.js')
 const {
   parseCommandLineArgs,
   isBroadcasted,
@@ -50,6 +51,10 @@ const currentTimestamp = Date.now()
 forgeScript(scriptFN, forgeOptions)
 
 const newDeployments = getNewDeployments(chainName, currentTimestamp)
+if (newDeployments.length === 0) {
+  logInfo('No new deployments found')
+  process.exit(0)
+}
 const newReceipts = getNewDeploymentReceipts(chainName, scriptFN)
 newDeployments.forEach((contractAlias) => {
   const artifact = getConfirmedFreshDeployment(chainName, contractAlias)
diff --git a/packages/solidity-devops/js/utils/utils.js b/packages/solidity-devops/js/utils/utils.js
index b82c221c47..623068d56d 100644
--- a/packages/solidity-devops/js/utils/utils.js
+++ b/packages/solidity-devops/js/utils/utils.js
@@ -1,7 +1,7 @@
 const fs = require('fs')
 const { execSync } = require('child_process')
 
-const { logCommand } = require('./logger.js')
+const { logCommand, logError } = require('./logger.js')
 
 /**
  * Asserts that a condition is true. If not, logs an error message and exits the process.
@@ -57,16 +57,19 @@ const getCommandOutput = (command) => {
 }
 
 /**
- * Runs a command (printing its output to the console), and exits the process if it fails.
+ * Runs a command (printing its output to the console). If the command fails, logs an error message.
  *
  * @param {string} command - The command to run
+ * @returns {bool} Whether the command succeeded
  */
-const runCommand = (command, ig) => {
+const runCommand = (command) => {
   try {
     logCommand(`${command}`)
     execSync(command, { stdio: 'inherit' })
+    return true
   } catch (error) {
-    process.exit(1)
+    logError(`Command failed: ${command}`)
+    return false
   }
 }
 
diff --git a/packages/solidity-devops/package.json b/packages/solidity-devops/package.json
index abf2f0f556..7b415ce63f 100644
--- a/packages/solidity-devops/package.json
+++ b/packages/solidity-devops/package.json
@@ -1,6 +1,6 @@
 {
   "name": "@synapsecns/solidity-devops",
-  "version": "0.1.3",
+  "version": "0.1.5",
   "description": "A collection of utils to effortlessly test, deploy and maintain the smart contracts on EVM compatible blockchains",
   "license": "MIT",
   "repository": {
diff --git a/packages/solidity-devops/src/deploy/Deployer.sol b/packages/solidity-devops/src/deploy/Deployer.sol
index 26b47277f2..cd78e891bb 100644
--- a/packages/solidity-devops/src/deploy/Deployer.sol
+++ b/packages/solidity-devops/src/deploy/Deployer.sol
@@ -95,6 +95,8 @@ abstract contract Deployer is ChainAwareReader, Logger {
     {
         bytes memory initCode = getInitCode(contractName, constructorArgs);
         bytes32 salt = getDeploymentSalt();
+        // Print init code hash for potential vanity address mining
+        printLogWithIndent(StringUtils.concat("Init code hash: ", vm.toString(keccak256(initCode))));
         printLogWithIndent(StringUtils.concat("Using salt: ", vm.toString(salt)));
         deployedAt = factoryDeployCreate2(getCreate2Factory(), initCode, getDeploymentSalt());
         // Erase single-use salt
diff --git a/packages/synapse-interface/CHANGELOG.md b/packages/synapse-interface/CHANGELOG.md
index d711ce33d8..3a88ecaa20 100644
--- a/packages/synapse-interface/CHANGELOG.md
+++ b/packages/synapse-interface/CHANGELOG.md
@@ -3,6 +3,65 @@
 All notable changes to this project will be documented in this file.
 See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
 
+# [0.2.0](https://github.com/synapsecns/sanguine/compare/@synapsecns/synapse-interface@0.1.304...@synapsecns/synapse-interface@0.2.0) (2024-03-07)
+
+
+### Features
+
+* **synapse-interface:** Adds switch network button to swap from and to chains ([#2121](https://github.com/synapsecns/sanguine/issues/2121)) ([951badb](https://github.com/synapsecns/sanguine/commit/951badb1dfaba380cd0bdf996757be89929f63b6))
+
+
+
+
+
+## [0.1.304](https://github.com/synapsecns/sanguine/compare/@synapsecns/synapse-interface@0.1.303...@synapsecns/synapse-interface@0.1.304) (2024-03-07)
+
+**Note:** Version bump only for package @synapsecns/synapse-interface
+
+
+
+
+
+## [0.1.303](https://github.com/synapsecns/sanguine/compare/@synapsecns/synapse-interface@0.1.302...@synapsecns/synapse-interface@0.1.303) (2024-03-06)
+
+**Note:** Version bump only for package @synapsecns/synapse-interface
+
+
+
+
+
+## [0.1.302](https://github.com/synapsecns/sanguine/compare/@synapsecns/synapse-interface@0.1.301...@synapsecns/synapse-interface@0.1.302) (2024-03-06)
+
+**Note:** Version bump only for package @synapsecns/synapse-interface
+
+
+
+
+
+## [0.1.301](https://github.com/synapsecns/sanguine/compare/@synapsecns/synapse-interface@0.1.300...@synapsecns/synapse-interface@0.1.301) (2024-03-06)
+
+**Note:** Version bump only for package @synapsecns/synapse-interface
+
+
+
+
+
+## [0.1.300](https://github.com/synapsecns/sanguine/compare/@synapsecns/synapse-interface@0.1.299...@synapsecns/synapse-interface@0.1.300) (2024-03-04)
+
+**Note:** Version bump only for package @synapsecns/synapse-interface
+
+
+
+
+
+## [0.1.299](https://github.com/synapsecns/sanguine/compare/@synapsecns/synapse-interface@0.1.298...@synapsecns/synapse-interface@0.1.299) (2024-03-04)
+
+**Note:** Version bump only for package @synapsecns/synapse-interface
+
+
+
+
+
 ## [0.1.298](https://github.com/synapsecns/sanguine/compare/@synapsecns/synapse-interface@0.1.297...@synapsecns/synapse-interface@0.1.298) (2024-03-01)
 
 **Note:** Version bump only for package @synapsecns/synapse-interface
diff --git a/packages/synapse-interface/components/StateManagedBridge/BridgeExchangeRateInfo.tsx b/packages/synapse-interface/components/StateManagedBridge/BridgeExchangeRateInfo.tsx
index 1211f73579..3c919ad21d 100644
--- a/packages/synapse-interface/components/StateManagedBridge/BridgeExchangeRateInfo.tsx
+++ b/packages/synapse-interface/components/StateManagedBridge/BridgeExchangeRateInfo.tsx
@@ -1,33 +1,29 @@
 import numeral from 'numeral'
+import Image from 'next/image'
 import { useMemo } from 'react'
-import {
-  formatBigIntToPercentString,
-  stringToBigInt,
-} from '@/utils/bigint/format'
-import { CHAINS_BY_ID } from '@constants/chains'
-import * as CHAINS from '@constants/chains/master'
+import { useAppSelector } from '@/store/hooks'
+import { useBridgeState } from '@/slices/bridge/hooks'
 import { useCoingeckoPrice } from '@hooks/useCoingeckoPrice'
-import Image from 'next/image'
-import { formatBigIntToString } from '@/utils/bigint/format'
 import {
   ELIGIBILITY_DEFAULT_TEXT,
   useStipEligibility,
 } from '@/utils/hooks/useStipEligibility'
-import { useBridgeState } from '@/slices/bridge/hooks'
+import { formatBigIntToString } from '@/utils/bigint/format'
+import { formatBigIntToPercentString } from '@/utils/bigint/format'
 import { EMPTY_BRIDGE_QUOTE } from '@/constants/bridge'
-import { useAppSelector } from '@/store/hooks'
+import { CHAINS_BY_ID } from '@constants/chains'
+import * as CHAINS from '@constants/chains/master'
 
 const MAX_ARB_REBATE_PER_ADDRESS = 2000
 
 const BridgeExchangeRateInfo = () => {
   return (
     
- + {/* */}
- - + {/* */}
@@ -150,42 +146,6 @@ const Rebate = () => { ) } -const Fee = () => { - const { - debouncedFromValue, - fromToken, - fromChainId, - isLoading, - bridgeQuote: { feeAmount, originQuery }, - } = useBridgeState() - - if (!originQuery || originQuery.minAmountOut === 0n) return - - const adjustedFeeAmount = - (BigInt(feeAmount) * - stringToBigInt( - `${debouncedFromValue}`, - fromToken?.decimals[fromChainId] - )) / - BigInt(originQuery?.minAmountOut) - - const feeString = formatBigIntToString( - adjustedFeeAmount, - fromToken?.decimals[fromChainId], - 4 - ) - - return ( -
-
Fee
-
- {isLoading ? '-' : feeString}{' '} - {fromToken?.symbol} -
-
- ) -} - const TimeEstimate = () => { const { fromToken, bridgeQuote } = useBridgeState() diff --git a/packages/synapse-interface/components/StateManagedBridge/OutputContainer.tsx b/packages/synapse-interface/components/StateManagedBridge/OutputContainer.tsx index c8d3ea0323..01045c145a 100644 --- a/packages/synapse-interface/components/StateManagedBridge/OutputContainer.tsx +++ b/packages/synapse-interface/components/StateManagedBridge/OutputContainer.tsx @@ -24,7 +24,7 @@ export const OutputContainer = ({}) => { // update address for destination address if we have a destination address return ( -
+
{/* {address && ( diff --git a/packages/synapse-interface/components/_Transaction/_Transactions.tsx b/packages/synapse-interface/components/_Transaction/_Transactions.tsx index 0a70aec33b..a78d4c1774 100644 --- a/packages/synapse-interface/components/_Transaction/_Transactions.tsx +++ b/packages/synapse-interface/components/_Transaction/_Transactions.tsx @@ -3,7 +3,7 @@ import { use_TransactionsState } from '@/slices/_transactions/hooks' import { _TransactionDetails } from '@/slices/_transactions/reducer' import { _Transaction } from './_Transaction' import { checkTransactionsExist } from '@/utils/checkTransactionsExist' -import { useIntervalTimer } from './helpers/useIntervalTimer' +import { useIntervalTimer } from '../../utils/hooks/useIntervalTimer' /** TODO: Update naming once refactoring of previous Activity/Tx flow is done */ export const _Transactions = ({ diff --git a/packages/synapse-interface/components/buttons/SwitchButton.tsx b/packages/synapse-interface/components/buttons/SwitchButton.tsx index e632a19d47..8047c4d9f5 100644 --- a/packages/synapse-interface/components/buttons/SwitchButton.tsx +++ b/packages/synapse-interface/components/buttons/SwitchButton.tsx @@ -1,17 +1,7 @@ -import { SwitchVerticalIcon } from '@heroicons/react/outline' import { useState } from 'react' -export default function SwitchButton({ - className, - innerClassName, - onClick, -}: { - className?: string - innerClassName?: string - onClick: () => void -}) { +export function SwitchButton({ onClick }: { onClick: () => void }) { const [isActive, setIsActive] = useState(false) - const handleClick = () => { onClick() setIsActive(true) @@ -21,28 +11,44 @@ export default function SwitchButton({ return (
-
- -
+
) } + +function SwitchButtonSvg({ onClick }: { onClick: () => void }) { + return ( + + + + + + + ) +} diff --git a/packages/synapse-interface/components/layouts/LandingPageWrapper/index.tsx b/packages/synapse-interface/components/layouts/LandingPageWrapper/index.tsx index f25bad03f7..e7d614b268 100644 --- a/packages/synapse-interface/components/layouts/LandingPageWrapper/index.tsx +++ b/packages/synapse-interface/components/layouts/LandingPageWrapper/index.tsx @@ -160,14 +160,16 @@ export function PopoverPanelContainer({ } function TopBarButtons() { - const topBarNavLinks = Object.entries(NAVIGATION).map(([key, value]) => ( - - )) + const topBarNavLinks = Object.entries(NAVIGATION) + .filter(([key, value]) => value.path !== NAVIGATION.Countdown.path) + .map(([key, value]) => ( + + )) return <>{topBarNavLinks} } @@ -223,14 +225,16 @@ function SocialButtons() { } function MobileBarButtons() { - const mobileBarItems = Object.entries(NAVIGATION).map(([key, value]) => ( - - )) + const mobileBarItems = Object.entries(NAVIGATION) + .filter(([key, value]) => value.path !== NAVIGATION.Countdown.path) + .map(([key, value]) => ( + + )) return <>{mobileBarItems} } diff --git a/packages/synapse-interface/constants/routes.ts b/packages/synapse-interface/constants/routes.ts index b33db9f19e..bad9f44af7 100644 --- a/packages/synapse-interface/constants/routes.ts +++ b/packages/synapse-interface/constants/routes.ts @@ -8,6 +8,7 @@ import { BRIDGE_PATH, INTERCHAIN_LINK, SOLANA_BRIDGE_LINK, + COUNTDOWN_PATH, } from './urls' export interface RouteObject { @@ -63,4 +64,9 @@ export const NAVIGATION: RouteObject = { text: 'Solana Bridge', match: null, }, + Countdown: { + path: COUNTDOWN_PATH, + text: 'Countdown', + match: COUNTDOWN_PATH, + }, } diff --git a/packages/synapse-interface/constants/urls/index.tsx b/packages/synapse-interface/constants/urls/index.tsx index d81344327d..5cb6f970c7 100644 --- a/packages/synapse-interface/constants/urls/index.tsx +++ b/packages/synapse-interface/constants/urls/index.tsx @@ -20,6 +20,7 @@ export const STAKE_PATH = '/stake' export const POOLS_PATH = '/pools' export const POOL_PATH = '/pool' export const LANDING_PATH = '/landing' +export const COUNTDOWN_PATH = '/4844' /** External Links */ export const EXPLORER_KAPPA = 'https://explorer.synapseprotocol.com/tx/' diff --git a/packages/synapse-interface/contexts/BackgroundListenerProvider.tsx b/packages/synapse-interface/contexts/BackgroundListenerProvider.tsx index d1434f4f2e..501484ef5b 100644 --- a/packages/synapse-interface/contexts/BackgroundListenerProvider.tsx +++ b/packages/synapse-interface/contexts/BackgroundListenerProvider.tsx @@ -1,10 +1,12 @@ +import React, { createContext } from 'react' + import { useApplicationListener } from '@/utils/hooks/useApplicationListener' import { useBridgeListener } from '@/utils/hooks/useBridgeListener' import { usePortfolioListener } from '@/utils/hooks/usePortfolioListener' import { useRiskEvent } from '@/utils/hooks/useRiskEvent' import { useTransactionListener } from '@/utils/hooks/useTransactionListener' import { use_TransactionsListener } from '@/utils/hooks/use_TransactionsListener' -import React, { createContext, useContext, useEffect } from 'react' +import { useFetchPricesOnInterval } from '@/utils/hooks/useFetchPricesOnInterval' const BackgroundListenerContext = createContext(null) @@ -15,6 +17,7 @@ export const BackgroundListenerProvider = ({ children }) => { use_TransactionsListener() useBridgeListener() useRiskEvent() + useFetchPricesOnInterval() return ( diff --git a/packages/synapse-interface/contexts/UserProvider.tsx b/packages/synapse-interface/contexts/UserProvider.tsx index 157a68e447..e23a018f9c 100644 --- a/packages/synapse-interface/contexts/UserProvider.tsx +++ b/packages/synapse-interface/contexts/UserProvider.tsx @@ -8,18 +8,6 @@ import { setSwapChainId } from '@/slices/swap/reducer' import { fetchAndStorePortfolioBalances } from '@/slices/portfolio/hooks' import { useAppDispatch } from '@/store/hooks' import { resetPortfolioState } from '@/slices/portfolio/actions' -import { - fetchAllEthStablecoinPrices, - fetchArbPrice, - fetchAvaxPrice, - fetchCoingeckoPrices, - fetchDaiePrice, - fetchEthPrice, - fetchGmxPrice, - fetchMetisPrice, - fetchMusdcPrice, - fetchSynPrices, -} from '@/slices/priceDataSlice' import { isBlacklisted } from '@/utils/isBlacklisted' import { screenAddress } from '@/utils/screenAddress' import { @@ -61,16 +49,6 @@ export const UserProvider = ({ children }) => { useEffect(() => { if (isClient) { - dispatch(fetchSynPrices()) - dispatch(fetchEthPrice()) - dispatch(fetchAvaxPrice()) - dispatch(fetchMetisPrice()) - dispatch(fetchArbPrice()) - dispatch(fetchGmxPrice()) - dispatch(fetchAllEthStablecoinPrices()) - dispatch(fetchCoingeckoPrices()) - dispatch(fetchMusdcPrice()) - dispatch(fetchDaiePrice()) dispatch(fetchFeeAndRebate()) } }, [isClient]) diff --git a/packages/synapse-interface/package.json b/packages/synapse-interface/package.json index e33041c789..968f852213 100644 --- a/packages/synapse-interface/package.json +++ b/packages/synapse-interface/package.json @@ -1,6 +1,6 @@ { "name": "@synapsecns/synapse-interface", - "version": "0.1.298", + "version": "0.2.0", "private": true, "engines": { "node": ">=16.0.0" @@ -42,7 +42,7 @@ "@reduxjs/toolkit": "^1.9.5", "@rtk-query/graphql-request-base-query": "^2.2.0", "@segment/analytics-next": "^1.53.0", - "@synapsecns/sdk-router": "^0.3.27", + "@synapsecns/sdk-router": "^0.3.28", "@tailwindcss/aspect-ratio": "^0.4.2", "@tailwindcss/forms": "^0.5.3", "@tailwindcss/typography": "^0.5.9", diff --git a/packages/synapse-interface/pages/4844/index.tsx b/packages/synapse-interface/pages/4844/index.tsx new file mode 100644 index 0000000000..33b5dfc93a --- /dev/null +++ b/packages/synapse-interface/pages/4844/index.tsx @@ -0,0 +1,112 @@ +import { useRouter } from 'next/router' +import { LandingPageWrapper } from '@/components/layouts/LandingPageWrapper' +import { useIntervalTimer } from '@/utils/hooks/useIntervalTimer' +import ExternalLinkIcon from '@/components/icons/ExternalLinkIcon' + +const Countdown = () => { + useIntervalTimer(1000) + + const { daysRemaining, hoursRemaining, minutesRemaining, secondsRemaining } = + calculateTimeUntilTarget() + + return ( + +
+
Countdown to Dencun Upgrade
+ +
+
+
{daysRemaining}
+
Days
+
+ +
+
{hoursRemaining}
+
Hours
+
+ +
+
{minutesRemaining}
+
Minutes
+
+ +
+
{secondsRemaining}
+
Seconds
+
+
+ + +
+
+ ) +} + +export default Countdown + +const calculateTimeUntilTarget = () => { + const currentDate = new Date() + const currentDay = currentDate.getDate() + const currentHour = currentDate.getHours() + + let targetDate: Date + + /** + * Shift target time to actual time after daylight savings + * Daylight Savings time occurs on March 10th, 2024 @ 2AM on PST, CST, EST + */ + if (currentDay >= 10 && currentHour >= 2) { + targetDate = new Date(Date.UTC(2024, 2, 13, 13, 55, 0)) + } else { + targetDate = new Date(Date.UTC(2024, 2, 13, 12, 55, 0)) + } + + const timeDifference = targetDate.getTime() - currentDate.getTime() + + const daysRemaining = Math.floor(timeDifference / (1000 * 60 * 60 * 24)) + const hoursRemaining = Math.floor( + (timeDifference % (1000 * 60 * 60 * 24)) / (1000 * 60 * 60) + ) + .toString() + .padStart(2, '0') + const minutesRemaining = Math.floor( + (timeDifference % (1000 * 60 * 60)) / (1000 * 60) + ) + .toString() + .padStart(2, '0') + const secondsRemaining = Math.floor((timeDifference % (1000 * 60)) / 1000) + .toString() + .padStart(2, '0') + + return { + daysRemaining, + hoursRemaining, + minutesRemaining, + secondsRemaining, + } +} diff --git a/packages/synapse-interface/pages/pool/PoolBody.tsx b/packages/synapse-interface/pages/pool/PoolBody.tsx index f4f5e8dd34..b8f3e75682 100644 --- a/packages/synapse-interface/pages/pool/PoolBody.tsx +++ b/packages/synapse-interface/pages/pool/PoolBody.tsx @@ -126,7 +126,7 @@ const PoolBody = ({ borderRadius: '4px', }} label={`Switch to ${ - chains.find((c) => c.id === pool.chainId).name + chains.find((c) => c.id === pool.chainId)?.name }`} pendingLabel="Switching chains" onClick={() => diff --git a/packages/synapse-interface/pages/state-managed-bridge/index.tsx b/packages/synapse-interface/pages/state-managed-bridge/index.tsx index bc8cfe0dc3..534b00f88f 100644 --- a/packages/synapse-interface/pages/state-managed-bridge/index.tsx +++ b/packages/synapse-interface/pages/state-managed-bridge/index.tsx @@ -7,7 +7,13 @@ import { useRouter } from 'next/router' import { segmentAnalyticsEvent } from '@/contexts/SegmentAnalyticsProvider' import { useBridgeState } from '@/slices/bridge/hooks' -import { BridgeState } from '@/slices/bridge/reducer' +import { + BridgeState, + setFromChainId, + setFromToken, + setToChainId, + setToToken, +} from '@/slices/bridge/reducer' import { updateFromValue, setBridgeQuote, @@ -79,6 +85,7 @@ import { fetchGmxPrice, } from '@/slices/priceDataSlice' import { isTransactionReceiptError } from '@/utils/isTransactionReceiptError' +import { SwitchButton } from '@/components/buttons/SwitchButton' const StateManagedBridge = () => { const { address } = useAccount() @@ -278,10 +285,6 @@ const StateManagedBridge = () => { toast.dismiss(quoteToastRef.current.id) - dispatch(fetchEthPrice()) - dispatch(fetchArbPrice()) - dispatch(fetchGmxPrice()) - const message = `Route found for bridging ${debouncedFromValue} ${fromToken?.symbol} on ${CHAINS_BY_ID[fromChainId]?.name} to ${toToken.symbol} on ${CHAINS_BY_ID[toChainId]?.name}` console.log(message) @@ -580,6 +583,14 @@ const StateManagedBridge = () => { + { + dispatch(setFromChainId(toChainId)) + dispatch(setFromToken(toToken)) + dispatch(setToChainId(fromChainId)) + dispatch(setToToken(fromToken)) + }} + /> { + const dispatch = useAppDispatch() + + useEffect(() => { + const fetchPrices = () => { + dispatch(fetchSynPrices()) + dispatch(fetchEthPrice()) + dispatch(fetchAvaxPrice()) + dispatch(fetchMetisPrice()) + dispatch(fetchArbPrice()) + dispatch(fetchGmxPrice()) + dispatch(fetchAllEthStablecoinPrices()) + dispatch(fetchCoingeckoPrices()) + dispatch(fetchMusdcPrice()) + dispatch(fetchDaiePrice()) + } + + // Fetch on mount + fetchPrices() + + // Fetch every five minutes + const interval = setInterval(fetchPrices, 300000) + + return () => clearInterval(interval) + }, [dispatch]) +} diff --git a/packages/synapse-interface/components/_Transaction/helpers/useIntervalTimer.ts b/packages/synapse-interface/utils/hooks/useIntervalTimer.ts similarity index 100% rename from packages/synapse-interface/components/_Transaction/helpers/useIntervalTimer.ts rename to packages/synapse-interface/utils/hooks/useIntervalTimer.ts diff --git a/packages/synapse-interface/utils/hooks/useSyncQueryParamsWithBridgeState.ts b/packages/synapse-interface/utils/hooks/useSyncQueryParamsWithBridgeState.ts index bff64b0da4..74b33710ac 100644 --- a/packages/synapse-interface/utils/hooks/useSyncQueryParamsWithBridgeState.ts +++ b/packages/synapse-interface/utils/hooks/useSyncQueryParamsWithBridgeState.ts @@ -30,24 +30,24 @@ const useSyncQueryParamsWithBridgeState = () => { const fromTokenParam = urlParams.get('fromToken') const toTokenParam = urlParams.get('toToken') - if (fromChainParam !== null) { + if (fromChainParam !== null && Number(fromChainParam) !== fromChainId) { if (allowedFromChainIds.includes(Number(fromChainParam))) { dispatch(setFromChainId(Number(fromChainParam))) } } - if (fromTokenParam !== null) { + if (fromTokenParam !== null && fromTokenParam !== fromToken?.symbol) { const token = findKeyByRouteSymbol(fromTokenParam, BRIDGEABLE) dispatch(setFromToken(token)) } - if (toChainParam !== null) { + if (toChainParam !== null && Number(toChainParam) !== toChainId) { if (allowedToChainIds.includes(Number(toChainParam))) { dispatch(setToChainId(Number(toChainParam))) } } - if (toTokenParam !== null) { + if (toTokenParam !== null && toTokenParam !== toToken?.symbol) { const token = findKeyByRouteSymbol(toTokenParam, BRIDGEABLE) dispatch(setToToken(token)) } diff --git a/packages/widget/CHANGELOG.md b/packages/widget/CHANGELOG.md index eaf8a2b591..172599aacc 100644 --- a/packages/widget/CHANGELOG.md +++ b/packages/widget/CHANGELOG.md @@ -3,6 +3,25 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [0.1.0](https://github.com/synapsecns/sanguine/compare/@synapsecns/widget@0.0.64...@synapsecns/widget@0.1.0) (2024-03-06) + + +### Features + +* **widget:** adds useBridgeSelections helper for consumer selection awareness ([#2217](https://github.com/synapsecns/sanguine/issues/2217)) ([febdab8](https://github.com/synapsecns/sanguine/commit/febdab8ca35494f52099f63f4b83d4901edcf767)) + + + + + +## [0.0.64](https://github.com/synapsecns/sanguine/compare/@synapsecns/widget@0.0.63...@synapsecns/widget@0.0.64) (2024-03-04) + +**Note:** Version bump only for package @synapsecns/widget + + + + + ## [0.0.63](https://github.com/synapsecns/sanguine/compare/@synapsecns/widget@0.0.62...@synapsecns/widget@0.0.63) (2024-03-01) **Note:** Version bump only for package @synapsecns/widget diff --git a/packages/widget/README.md b/packages/widget/README.md index 3591c3db92..25dd3f4d63 100644 --- a/packages/widget/README.md +++ b/packages/widget/README.md @@ -120,6 +120,35 @@ const MyApp = () => { Note: Token naming convention is based on the tokens provided by `@synapsecns/widget`. For example, USDC on Metis is `METISUSDC` instead of simply `USDC`. The package's `src/constants/bridgeable.ts` file contains a detailed list of supported tokens and the chains they live on. Additionally, to see a detailed list of Synapse Protocol supported chains, please see `src/constants/chains.ts`. +## useBridgeSelections Hook + +The widget also provides a `useBridgeSelections` hook that can be used to access the selected tokens and chains. This hook returns an object of type `BridgeSelections` which has fields of `originChain`, `originToken`, `destinationChain`, and `destinationToken`. + +`originChain` and `destinationChain` structure: + +``` +{ + id, + name, +} +``` + +`originToken` and `destinationToken` structure: + +``` +{ + symbol, + address +} +``` + +In the consumer app: + +```ts +const { originChain, originToken, destinationChain, destinationToken } = + useBridgeSelections() +``` + ## Theme Customization The widget is designed to be easily customized to match your app's theme. The widget accepts an optional `customTheme` configurable `bgColor` parameter for `'dark'`, `'light'`, and custom color modes: diff --git a/packages/widget/package.json b/packages/widget/package.json index 535a4a09f5..ae011e9496 100644 --- a/packages/widget/package.json +++ b/packages/widget/package.json @@ -1,7 +1,7 @@ { "name": "@synapsecns/widget", "description": "Widget library for interacting with the Synapse Protocol", - "version": "0.0.63", + "version": "0.1.0", "license": "MIT", "main": "dist/cjs/index.js", "module": "dist/esm/index.js", @@ -66,7 +66,7 @@ "@ethersproject/providers": "^5.7.2", "@ethersproject/units": "^5.7.0", "@reduxjs/toolkit": "^2.0.1", - "@synapsecns/sdk-router": "^0.3.27", + "@synapsecns/sdk-router": "^0.3.28", "ethers": "^6.9.1", "lodash": "^4.17.21", "react-redux": "^9.0.2" diff --git a/packages/widget/src/components/ui/ChainPopoverSelect.tsx b/packages/widget/src/components/ui/ChainPopoverSelect.tsx index 02cc00aa0f..6ab65963ad 100644 --- a/packages/widget/src/components/ui/ChainPopoverSelect.tsx +++ b/packages/widget/src/components/ui/ChainPopoverSelect.tsx @@ -70,6 +70,7 @@ export const ChainPopoverSelect = ({ ref={popoverRef} >
togglePopover()} style={{ background: 'var(--synapse-select-bg)' }} className={` diff --git a/packages/widget/src/components/ui/TokenPopoverSelect.tsx b/packages/widget/src/components/ui/TokenPopoverSelect.tsx index e1a21e68d9..a9e9ef5390 100644 --- a/packages/widget/src/components/ui/TokenPopoverSelect.tsx +++ b/packages/widget/src/components/ui/TokenPopoverSelect.tsx @@ -14,6 +14,7 @@ type PopoverSelectProps = { balances: TokenBalance[] onSelect: (selected: BridgeableToken) => void selected: BridgeableToken + isOrigin: boolean } export const TokenPopoverSelect = ({ @@ -22,6 +23,7 @@ export const TokenPopoverSelect = ({ balances, onSelect, selected, + isOrigin, }: PopoverSelectProps) => { const { popoverRef, isOpen, togglePopover, closePopover } = usePopover() @@ -74,6 +76,7 @@ export const TokenPopoverSelect = ({ ref={popoverRef} >
togglePopover()} style={{ background: 'var(--synapse-select-bg)' }} className={` diff --git a/packages/widget/src/components/ui/TokenSelect.tsx b/packages/widget/src/components/ui/TokenSelect.tsx index cd48777ee7..018c5af6d2 100644 --- a/packages/widget/src/components/ui/TokenSelect.tsx +++ b/packages/widget/src/components/ui/TokenSelect.tsx @@ -111,6 +111,7 @@ export const TokenSelect = ({ label, isOrigin, token, onChange }: Props) => { onChange(selected) }} selected={token} + isOrigin={isOrigin} /> ) } diff --git a/packages/widget/src/hooks/useBridgeSelectionData.ts b/packages/widget/src/hooks/useBridgeSelectionData.ts new file mode 100644 index 0000000000..46debf633e --- /dev/null +++ b/packages/widget/src/hooks/useBridgeSelectionData.ts @@ -0,0 +1,117 @@ +import { useState, useEffect } from 'react' +import { BridgeSelections } from 'types' + +import { CHAINS_ARRAY } from '@/constants/chains' +import { findTokenByRouteSymbol } from '@/utils/findTokenByRouteSymbol' + +export const useBridgeSelectionData = (): BridgeSelections => { + const [originChain, setOriginChain] = useState('') + const [originToken, setOriginToken] = useState('') + const [destinationChain, setDestinationChain] = useState('') + const [destinationToken, setDestinationToken] = useState('') + + useEffect(() => { + let originChainSelect = null + let destinationChainSelect = null + + let originTokenSelect = null + let destinationTokenSelect = null + + const getChainValues = () => { + if ( + originChainSelect && + destinationChainSelect && + originTokenSelect && + destinationTokenSelect + ) { + const originChainName = originChainSelect.textContent?.trim() || '' + const destinationChainName = + destinationChainSelect.textContent?.trim() || '' + + const originTokenSymbol = originTokenSelect.textContent?.trim() || '' + const destinationTokenSymbol = + destinationTokenSelect.textContent?.trim() || '' + + setOriginChain(originChainName === 'Network' ? null : originChainName) + setOriginToken(originTokenSymbol === 'Token' ? null : originTokenSymbol) + setDestinationChain( + destinationChainName === 'Network' ? null : destinationChainName + ) + setDestinationToken( + destinationTokenSymbol === 'Token' ? null : destinationTokenSymbol + ) + } + } + + const checkElements = () => { + originChainSelect = document.getElementById('origin-chain-select') + destinationChainSelect = document.getElementById( + 'destination-chain-select' + ) + originTokenSelect = document.getElementById('origin-token-select') + destinationTokenSelect = document.getElementById( + 'destination-token-select' + ) + + if ( + originChainSelect && + destinationChainSelect && + originTokenSelect && + destinationTokenSelect + ) { + // Get initial values when elements are available + getChainValues() + + const observer = new MutationObserver(getChainValues) + const config = { childList: true, characterData: true, subtree: true } + + observer.observe(originChainSelect, config) + observer.observe(destinationChainSelect, config) + observer.observe(originTokenSelect, config) + observer.observe(destinationTokenSelect, config) + + return () => { + observer.disconnect() + } + } else { + // If elements are not available, check again after a short delay + const timerId = setTimeout(checkElements, 100) + return () => clearTimeout(timerId) + } + } + + checkElements() + }, []) + + const originChainId = CHAINS_ARRAY.find( + (chain) => chain.name === originChain + )?.id + const destinationChainId = CHAINS_ARRAY.find( + (chain) => chain.name === destinationChain + )?.id + + const originTokenAddress = + findTokenByRouteSymbol(originToken)?.addresses[originChainId] ?? null + const destinationTokenAddress = + findTokenByRouteSymbol(destinationToken)?.addresses[destinationChainId] ?? + null + + return { + originChain: { + id: originChainId, + name: originChain, + }, + destinationChain: { + id: destinationChainId, + name: destinationChain, + }, + originToken: { + symbol: originToken, + address: originTokenAddress, + }, + destinationToken: { + symbol: destinationToken, + address: destinationTokenAddress, + }, + } +} diff --git a/packages/widget/src/index.tsx b/packages/widget/src/index.tsx index deb0bcaaf0..cc49adf7e0 100644 --- a/packages/widget/src/index.tsx +++ b/packages/widget/src/index.tsx @@ -10,6 +10,7 @@ import { store } from '@/state/store' import * as BRIDGEABLE from '@/constants/bridgeable' import { CHAINS_ARRAY } from '@/constants/chains' import { BackgroundListenerProvider } from '@/providers/BackgroundListenerProvider' +import { useBridgeSelectionData } from '@/hooks/useBridgeSelectionData' export const Bridge = ({ web3Provider, @@ -43,6 +44,8 @@ export const Bridge = ({ ) } +export const useBridgeSelections = () => useBridgeSelectionData() + export const AGEUR = BRIDGEABLE.AGEUR export const AVAX = BRIDGEABLE.AVAX export const BTCB = BRIDGEABLE.BTCB diff --git a/packages/widget/src/types/index.d.ts b/packages/widget/src/types/index.d.ts index 9b92901094..295911ca5f 100644 --- a/packages/widget/src/types/index.d.ts +++ b/packages/widget/src/types/index.d.ts @@ -12,6 +12,25 @@ export interface BridgeableToken { imgUrl: string } +export interface BridgeSelections { + originChain: { + id: number | null + name: string | null + } + destinationChain: { + id: number | null + name: string | null + } + originToken: { + symbol: string | null + address: string | null + } + destinationToken: { + symbol: string | null + address: string | null + } +} + export interface CustomThemeVariables { bgColor?: string '--synapse-text'?: string @@ -66,6 +85,8 @@ export interface Chain { export declare function Bridge(props: BridgeProps): JSX.Element +export declare function useBridgeSelections(): BridgeSelections + export declare const AGEUR: BridgeableToken export declare const AVAX: BridgeableToken export declare const BTCB: BridgeableToken diff --git a/services/explorer/node/explorer.go b/services/explorer/node/explorer.go index 30277f9650..eb63aa4a4c 100644 --- a/services/explorer/node/explorer.go +++ b/services/explorer/node/explorer.go @@ -3,10 +3,11 @@ package node import ( "context" "fmt" - "github.com/synapsecns/sanguine/core/metrics" "net/http" "time" + "github.com/synapsecns/sanguine/core/metrics" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/synapsecns/sanguine/services/explorer/backfill" @@ -24,6 +25,7 @@ import ( // ExplorerBackfiller is a backfiller that aggregates all backfilling from ChainBackfillers. type ExplorerBackfiller struct { + // consumerDB is the database to store consumer data in. consumerDB db.ConsumerDB // clients is a mapping of chain IDs -> clients. diff --git a/services/rfq/api/cmd/commands.go b/services/rfq/api/cmd/commands.go index d6f088603e..df9c61d1c2 100644 --- a/services/rfq/api/cmd/commands.go +++ b/services/rfq/api/cmd/commands.go @@ -22,7 +22,7 @@ var configFlag = &cli.StringFlag{ TakesFile: true, } -// runCommand runs the cctp relayer. +// runCommand runs the rfq api. var runCommand = &cli.Command{ Name: "run", Description: "run the API Server", @@ -53,7 +53,7 @@ var runCommand = &cli.Command{ err = apiServer.Run(c.Context) if err != nil { - return fmt.Errorf("could not run cctp relayer: %w", err) + return fmt.Errorf("could not run rfq relayer: %w", err) } return nil }, diff --git a/services/rfq/e2e/rfq_test.go b/services/rfq/e2e/rfq_test.go index c462c0ee9e..e87cef2739 100644 --- a/services/rfq/e2e/rfq_test.go +++ b/services/rfq/e2e/rfq_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/suite" "github.com/synapsecns/sanguine/core" @@ -14,23 +15,27 @@ import ( "github.com/synapsecns/sanguine/ethergo/backends/anvil" "github.com/synapsecns/sanguine/ethergo/signer/signer/localsigner" "github.com/synapsecns/sanguine/ethergo/signer/wallet" + cctpTest "github.com/synapsecns/sanguine/services/cctp-relayer/testutil" omnirpcClient "github.com/synapsecns/sanguine/services/omnirpc/client" "github.com/synapsecns/sanguine/services/rfq/api/client" "github.com/synapsecns/sanguine/services/rfq/contracts/fastbridge" "github.com/synapsecns/sanguine/services/rfq/relayer/chain" + "github.com/synapsecns/sanguine/services/rfq/relayer/reldb" "github.com/synapsecns/sanguine/services/rfq/relayer/service" "github.com/synapsecns/sanguine/services/rfq/testutil" ) type IntegrationSuite struct { *testsuite.TestSuite - manager *testutil.DeployManager - originBackend backends.SimulatedTestBackend - destBackend backends.SimulatedTestBackend + manager *testutil.DeployManager + cctpDeployManager *cctpTest.DeployManager + originBackend backends.SimulatedTestBackend + destBackend backends.SimulatedTestBackend //omniserver is the omnirpc server address omniServer string omniClient omnirpcClient.RPCClient metrics metrics.Handler + store reldb.Service apiServer string relayer *service.Relayer relayerWallet wallet.Wallet @@ -50,7 +55,7 @@ func TestIntegrationSuite(t *testing.T) { const ( originBackendChainID = 1 - destBackendChainID = 2 + destBackendChainID = 43114 ) // SetupTest sets up each test in the integration suite. We need to do a few things here: @@ -68,6 +73,7 @@ func (i *IntegrationSuite) SetupTest() { } i.manager = testutil.NewDeployManager(i.T()) + i.cctpDeployManager = cctpTest.NewDeployManager(i.T()) // TODO: consider jaeger i.metrics = metrics.NewNullHandler() // setup backends for ethereum & omnirpc @@ -93,22 +99,38 @@ func (i *IntegrationSuite) TestUSDCtoUSDC() { if core.GetEnvBool("CI", false) { i.T().Skip("skipping until anvil issues are fixed in CI") } - // Before we do anything, we're going to mint ourselves some USDC on the destination chain. - // 100k should do. - i.manager.MintToAddress(i.GetTestContext(), i.destBackend, testutil.USDCType, i.relayerWallet.Address(), big.NewInt(100000)) - destUSDC := i.manager.Get(i.GetTestContext(), i.destBackend, testutil.USDCType) + + // load token contracts + const startAmount = 1000 + const rfqAmount = 900 + opts := i.destBackend.GetTxContext(i.GetTestContext(), nil) + destUSDC, destUSDCHandle := i.cctpDeployManager.GetMockMintBurnTokenType(i.GetTestContext(), i.destBackend) + realStartAmount, err := testutil.AdjustAmount(i.GetTestContext(), big.NewInt(startAmount), destUSDC.ContractHandle()) + i.NoError(err) + realRFQAmount, err := testutil.AdjustAmount(i.GetTestContext(), big.NewInt(rfqAmount), destUSDC.ContractHandle()) + i.NoError(err) + + // add initial usdc to relayer on destination + tx, err := destUSDCHandle.MintPublic(opts.TransactOpts, i.relayerWallet.Address(), realStartAmount) + i.Nil(err) + i.destBackend.WaitForConfirmation(i.GetTestContext(), tx) i.Approve(i.destBackend, destUSDC, i.relayerWallet) - // let's give the user some money as well, $500 should do. - const userWantAmount = 500 - i.manager.MintToAddress(i.GetTestContext(), i.originBackend, testutil.USDCType, i.userWallet.Address(), big.NewInt(userWantAmount)) - originUSDC := i.manager.Get(i.GetTestContext(), i.originBackend, testutil.USDCType) + // add initial USDC to relayer on origin + optsOrigin := i.originBackend.GetTxContext(i.GetTestContext(), nil) + originUSDC, originUSDCHandle := i.cctpDeployManager.GetMockMintBurnTokenType(i.GetTestContext(), i.originBackend) + tx, err = originUSDCHandle.MintPublic(optsOrigin.TransactOpts, i.relayerWallet.Address(), realStartAmount) + i.Nil(err) + i.originBackend.WaitForConfirmation(i.GetTestContext(), tx) + i.Approve(i.originBackend, originUSDC, i.relayerWallet) + + // add initial USDC to user on origin + tx, err = originUSDCHandle.MintPublic(optsOrigin.TransactOpts, i.userWallet.Address(), realRFQAmount) + i.Nil(err) + i.originBackend.WaitForConfirmation(i.GetTestContext(), tx) i.Approve(i.originBackend, originUSDC, i.userWallet) // non decimal adjusted user want amount - realWantAmount, err := testutil.AdjustAmount(i.GetTestContext(), big.NewInt(userWantAmount), destUSDC.ContractHandle()) - i.NoError(err) - // now our friendly user is going to check the quote and send us some USDC on the origin chain. i.Eventually(func() bool { // first he's gonna check the quotes. @@ -122,7 +144,7 @@ func (i *IntegrationSuite) TestUSDCtoUSDC() { for _, quote := range allQuotes { if common.HexToAddress(quote.DestTokenAddr) == destUSDC.Address() { destAmountBigInt, _ := new(big.Int).SetString(quote.DestAmount, 10) - if destAmountBigInt.Cmp(realWantAmount) > 0 { + if destAmountBigInt.Cmp(realRFQAmount) > 0 { // we found our quote! // now we can move on return true @@ -136,14 +158,14 @@ func (i *IntegrationSuite) TestUSDCtoUSDC() { _, originFastBridge := i.manager.GetFastBridge(i.GetTestContext(), i.originBackend) auth := i.originBackend.GetTxContext(i.GetTestContext(), i.userWallet.AddressPtr()) // we want 499 usdc for 500 requested within a day - tx, err := originFastBridge.Bridge(auth.TransactOpts, fastbridge.IFastBridgeBridgeParams{ + tx, err = originFastBridge.Bridge(auth.TransactOpts, fastbridge.IFastBridgeBridgeParams{ DstChainId: uint32(i.destBackend.GetChainID()), To: i.userWallet.Address(), OriginToken: originUSDC.Address(), SendChainGas: true, DestToken: destUSDC.Address(), - OriginAmount: realWantAmount, - DestAmount: new(big.Int).Sub(realWantAmount, big.NewInt(10_000_000)), + OriginAmount: realRFQAmount, + DestAmount: new(big.Int).Sub(realRFQAmount, big.NewInt(10_000_000)), Deadline: new(big.Int).SetInt64(time.Now().Add(time.Hour * 24).Unix()), }) i.NoError(err) @@ -188,7 +210,7 @@ func (i *IntegrationSuite) TestUSDCtoUSDC() { // we should now have some usdc on the origin chain since we claimed // this should be offered up as inventory destAmountBigInt, _ := new(big.Int).SetString(quote.DestAmount, 10) - if destAmountBigInt.Cmp(big.NewInt(0)) > 0 { + if destAmountBigInt.Cmp(realStartAmount) > 0 { // we found our quote! // now we can move on return true @@ -197,6 +219,27 @@ func (i *IntegrationSuite) TestUSDCtoUSDC() { } return false }) + + i.Eventually(func() bool { + // check to see if the USDC balance has decreased on destination due to rebalance + balance, err := originUSDCHandle.BalanceOf(&bind.CallOpts{Context: i.GetTestContext()}, i.relayerWallet.Address()) + i.NoError(err) + balanceThresh, _ := new(big.Float).Mul(big.NewFloat(1.5), new(big.Float).SetInt(realStartAmount)).Int(nil) + if balance.Cmp(balanceThresh) > 0 { + return false + } + + // check to see if there is a pending rebalance from the destination back to origin + // TODO: validate more of the rebalance- expose in db interface just for testing? + destPending, err := i.store.HasPendingRebalance(i.GetTestContext(), uint64(i.destBackend.GetChainID())) + i.NoError(err) + if !destPending { + return false + } + originPending, err := i.store.HasPendingRebalance(i.GetTestContext(), uint64(i.originBackend.GetChainID())) + i.NoError(err) + return originPending + }) } // nolint: cyclop @@ -257,26 +300,28 @@ func (i *IntegrationSuite) TestETHtoETH() { i.originBackend.WaitForConfirmation(i.GetTestContext(), tx) // TODO: this, but cleaner - anvilClient, err := anvil.Dial(i.GetTestContext(), i.originBackend.RPCAddress()) - i.NoError(err) - - go func() { - for { - select { - case <-i.GetTestContext().Done(): - return - case <-time.After(time.Second * 4): - // increase time by 30 mintutes every second, should be enough to get us a fastish e2e test - // we don't need to worry about deadline since we're only doing this on origin - err = anvilClient.IncreaseTime(i.GetTestContext(), 60*30) - i.NoError(err) + for _, rpcAddr := range []string{i.originBackend.RPCAddress(), i.destBackend.RPCAddress()} { + anvilClient, err := anvil.Dial(i.GetTestContext(), rpcAddr) + i.NoError(err) - // because can claim works on last block timestamp, we need to do something - err = anvilClient.Mine(i.GetTestContext(), 1) - i.NoError(err) + go func() { + for { + select { + case <-i.GetTestContext().Done(): + return + case <-time.After(time.Second * 4): + // increase time by 30 mintutes every second, should be enough to get us a fastish e2e test + // we don't need to worry about deadline since we're only doing this on origin + err = anvilClient.IncreaseTime(i.GetTestContext(), 60*30) + i.NoError(err) + + // because can claim works on last block timestamp, we need to do something + err = anvilClient.Mine(i.GetTestContext(), 1) + i.NoError(err) + } } - } - }() + }() + } // since relayer started w/ 0 ETH, once they're offering the inventory up on origin chain we know the workflow completed i.Eventually(func() bool { diff --git a/services/rfq/e2e/setup_test.go b/services/rfq/e2e/setup_test.go index 7b4207003b..11cc2aa62c 100644 --- a/services/rfq/e2e/setup_test.go +++ b/services/rfq/e2e/setup_test.go @@ -25,6 +25,7 @@ import ( "github.com/synapsecns/sanguine/ethergo/contracts" signerConfig "github.com/synapsecns/sanguine/ethergo/signer/config" "github.com/synapsecns/sanguine/ethergo/signer/wallet" + cctpTest "github.com/synapsecns/sanguine/services/cctp-relayer/testutil" omnirpcClient "github.com/synapsecns/sanguine/services/omnirpc/client" "github.com/synapsecns/sanguine/services/omnirpc/testhelper" apiConfig "github.com/synapsecns/sanguine/services/rfq/api/config" @@ -33,6 +34,7 @@ import ( "github.com/synapsecns/sanguine/services/rfq/contracts/ierc20" "github.com/synapsecns/sanguine/services/rfq/relayer/chain" "github.com/synapsecns/sanguine/services/rfq/relayer/relconfig" + "github.com/synapsecns/sanguine/services/rfq/relayer/reldb/connect" "github.com/synapsecns/sanguine/services/rfq/relayer/service" "github.com/synapsecns/sanguine/services/rfq/testutil" ) @@ -113,6 +115,8 @@ func (i *IntegrationSuite) setupBackends() { i.omniServer = testhelper.NewOmnirpcServer(i.GetTestContext(), i.T(), i.originBackend, i.destBackend) i.omniClient = omnirpcClient.NewOmnirpcClient(i.omniServer, i.metrics, omnirpcClient.WithCaptureReqRes()) + + i.setupCCTP() } // setupBe sets up one backend @@ -120,7 +124,7 @@ func (i *IntegrationSuite) setupBE(backend backends.SimulatedTestBackend) { // prdeploys are contracts we want to deploy before running the test to speed it up. Obviously, these can be deployed when we need them as well, // but this way we can do something while we're waiting for the other backend to startup. // no need to wait for these to deploy since they can happen in background as soon as the backend is up. - predeployTokens := []contracts.ContractType{testutil.DAIType, testutil.USDTType, testutil.USDCType, testutil.WETH9Type} + predeployTokens := []contracts.ContractType{testutil.DAIType, testutil.USDTType, testutil.WETH9Type} predeploys := append(predeployTokens, testutil.FastBridgeType) slices.Reverse(predeploys) // return fast bridge first @@ -150,6 +154,50 @@ func (i *IntegrationSuite) setupBE(backend backends.SimulatedTestBackend) { } +func (i *IntegrationSuite) setupCCTP() { + // deploy the contract to all backends + testBackends := core.ToSlice(i.originBackend, i.destBackend) + i.cctpDeployManager.BulkDeploy(i.GetTestContext(), testBackends, cctpTest.SynapseCCTPType, cctpTest.MockMintBurnTokenType) + + // register remote deployments and tokens + for _, backend := range testBackends { + cctpContract, cctpHandle := i.cctpDeployManager.GetSynapseCCTP(i.GetTestContext(), backend) + _, tokenMessengeHandle := i.cctpDeployManager.GetMockTokenMessengerType(i.GetTestContext(), backend) + + // on the above contract, set the remote for each backend + for _, backendToSetFrom := range core.ToSlice(i.originBackend, i.destBackend) { + // we don't need to set the backends own remote! + if backendToSetFrom.GetChainID() == backend.GetChainID() { + continue + } + + remoteCCTP, _ := i.cctpDeployManager.GetSynapseCCTP(i.GetTestContext(), backendToSetFrom) + remoteMessenger, _ := i.cctpDeployManager.GetMockTokenMessengerType(i.GetTestContext(), backendToSetFrom) + + txOpts := backend.GetTxContext(i.GetTestContext(), cctpContract.OwnerPtr()) + // set the remote cctp contract on this cctp contract + // TODO: verify chainID / domain are correct + remoteDomain := cctpTest.ChainIDDomainMap[uint32(remoteCCTP.ChainID().Int64())] + + tx, err := cctpHandle.SetRemoteDomainConfig(txOpts.TransactOpts, + big.NewInt(remoteCCTP.ChainID().Int64()), remoteDomain, remoteCCTP.Address()) + i.Require().NoError(err) + backend.WaitForConfirmation(i.GetTestContext(), tx) + + // register the remote token messenger on the tokenMessenger contract + _, err = tokenMessengeHandle.SetRemoteTokenMessenger(txOpts.TransactOpts, uint32(backendToSetFrom.GetChainID()), addressToBytes32(remoteMessenger.Address())) + i.Nil(err) + } + } +} + +// addressToBytes32 converts an address to a bytes32. +func addressToBytes32(addr common.Address) [32]byte { + var buf [32]byte + copy(buf[:], addr[:]) + return buf +} + // Approve checks if the token is approved and approves it if not. func (i *IntegrationSuite) Approve(backend backends.SimulatedTestBackend, token contracts.DeployedContract, user wallet.Wallet) { erc20, err := ierc20.NewIERC20(token.Address(), backend) @@ -193,11 +241,14 @@ func (i *IntegrationSuite) setupRelayer() { relayerAPIPort, err := freeport.GetFreePort() i.NoError(err) dsn := filet.TmpDir(i.T(), "") + cctpContractOrigin, _ := i.cctpDeployManager.GetSynapseCCTP(i.GetTestContext(), i.originBackend) + cctpContractDest, _ := i.cctpDeployManager.GetSynapseCCTP(i.GetTestContext(), i.destBackend) cfg := relconfig.Config{ // generated ex-post facto Chains: map[int]relconfig.ChainConfig{ originBackendChainID: { - Bridge: i.manager.Get(i.GetTestContext(), i.originBackend, testutil.FastBridgeType).Address().String(), + RFQAddress: i.manager.Get(i.GetTestContext(), i.originBackend, testutil.FastBridgeType).Address().String(), + CCTPAddress: cctpContractOrigin.Address().Hex(), Confirmations: 0, Tokens: map[string]relconfig.TokenConfig{ "ETH": { @@ -209,7 +260,8 @@ func (i *IntegrationSuite) setupRelayer() { NativeToken: "ETH", }, destBackendChainID: { - Bridge: i.manager.Get(i.GetTestContext(), i.destBackend, testutil.FastBridgeType).Address().String(), + RFQAddress: i.manager.Get(i.GetTestContext(), i.destBackend, testutil.FastBridgeType).Address().String(), + CCTPAddress: cctpContractDest.Address().Hex(), Confirmations: 0, Tokens: map[string]relconfig.TokenConfig{ "ETH": { @@ -243,15 +295,22 @@ func (i *IntegrationSuite) setupRelayer() { GasPriceCacheTTLSeconds: 60, TokenPriceCacheTTLSeconds: 60, }, + RebalanceInterval: 0, } // in the first backend, we want to deploy a bunch of different tokens // TODO: functionalize me. for _, backend := range core.ToSlice(i.originBackend, i.destBackend) { - tokenTypes := []contracts.ContractType{testutil.DAIType, testutil.USDTType, testutil.USDCType, testutil.WETH9Type} + tokenTypes := []contracts.ContractType{testutil.DAIType, testutil.USDTType, testutil.WETH9Type, cctpTest.MockMintBurnTokenType} for _, tokenType := range tokenTypes { - tokenAddress := i.manager.Get(i.GetTestContext(), backend, tokenType).Address().String() + useCCTP := tokenType == cctpTest.MockMintBurnTokenType + var tokenAddress string + if useCCTP { + tokenAddress = i.cctpDeployManager.Get(i.GetTestContext(), backend, cctpTest.MockMintBurnTokenType).Address().String() + } else { + tokenAddress = i.manager.Get(i.GetTestContext(), backend, tokenType).Address().String() + } quotableTokenID := fmt.Sprintf("%d-%s", backend.GetChainID(), tokenAddress) tokenCaller, err := ierc20.NewIerc20Ref(common.HexToAddress(tokenAddress), backend) @@ -260,26 +319,47 @@ func (i *IntegrationSuite) setupRelayer() { decimals, err := tokenCaller.Decimals(&bind.CallOpts{Context: i.GetTestContext()}) i.NoError(err) + rebalanceMethod := "" + if useCCTP { + rebalanceMethod = "cctp" + } + // first the simple part, add the token to the token map cfg.Chains[int(backend.GetChainID())].Tokens[tokenType.Name()] = relconfig.TokenConfig{ - Address: tokenAddress, - Decimals: decimals, - PriceUSD: 1, // TODO: this will break on non-stables + Address: tokenAddress, + Decimals: decimals, + PriceUSD: 1, // TODO: this will break on non-stables + RebalanceMethod: rebalanceMethod, + MaintenanceBalancePct: 20, + InitialBalancePct: 50, } compatibleTokens := []contracts.ContractType{tokenType} - // DAI/USDT are fungible - if tokenType == testutil.DAIType || tokenType == testutil.USDCType { - compatibleTokens = []contracts.ContractType{testutil.DAIType, testutil.USDCType} + // DAI/USDC are fungible + if tokenType == testutil.DAIType || tokenType == cctpTest.MockMintBurnTokenType { + compatibleTokens = []contracts.ContractType{testutil.DAIType, cctpTest.MockMintBurnTokenType} } // now we need to add the token to the quotable tokens map for _, token := range compatibleTokens { otherBackend := i.getOtherBackend(backend) - otherToken := i.manager.Get(i.GetTestContext(), otherBackend, token).Address().String() + var otherToken string + if token == cctpTest.MockMintBurnTokenType { + otherToken = i.cctpDeployManager.Get(i.GetTestContext(), otherBackend, cctpTest.MockMintBurnTokenType).Address().String() + } else { + otherToken = i.manager.Get(i.GetTestContext(), otherBackend, token).Address().String() + } cfg.QuotableTokens[quotableTokenID] = append(cfg.QuotableTokens[quotableTokenID], fmt.Sprintf("%d-%s", otherBackend.GetChainID(), otherToken)) } + + // register the token with cctp contract + cctpContract, cctpHandle := i.cctpDeployManager.GetSynapseCCTP(i.GetTestContext(), backend) + txOpts := backend.GetTxContext(i.GetTestContext(), cctpContract.OwnerPtr()) + tokenName := fmt.Sprintf("CCTP.%s", tokenType.Name()) + tx, err := cctpHandle.AddToken(txOpts.TransactOpts, tokenName, tokenCaller.Address(), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)) + i.Require().NoError(err) + backend.WaitForConfirmation(i.GetTestContext(), tx) } } @@ -297,4 +377,9 @@ func (i *IntegrationSuite) setupRelayer() { go func() { err = i.relayer.Start(i.GetTestContext()) }() + + dbType, err := dbcommon.DBTypeFromString(cfg.Database.Type) + i.NoError(err) + i.store, err = connect.Connect(i.GetTestContext(), dbType, cfg.Database.DSN, i.metrics) + i.NoError(err) } diff --git a/services/rfq/go.mod b/services/rfq/go.mod index cf69460fd1..f6d0289d79 100644 --- a/services/rfq/go.mod +++ b/services/rfq/go.mod @@ -14,7 +14,6 @@ require ( github.com/ipfs/go-log v1.0.5 github.com/jellydator/ttlcache/v3 v3.1.1 github.com/jftuga/ellipsis v1.0.0 - github.com/jpillora/backoff v1.0.0 github.com/lmittmann/w3 v0.10.0 github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 github.com/puzpuzpuz/xsync/v2 v2.5.1 @@ -22,7 +21,8 @@ require ( github.com/stretchr/testify v1.8.4 github.com/synapsecns/sanguine/contrib/screener-api v0.0.0-00010101000000-000000000000 github.com/synapsecns/sanguine/core v0.0.0-00010101000000-000000000000 - github.com/synapsecns/sanguine/ethergo v0.0.2 + github.com/synapsecns/sanguine/ethergo v0.1.0 + github.com/synapsecns/sanguine/services/cctp-relayer v0.0.0-00010101000000-000000000000 github.com/synapsecns/sanguine/services/omnirpc v0.0.0-00010101000000-000000000000 github.com/urfave/cli/v2 v2.25.7 go.opentelemetry.io/otel v1.22.0 @@ -128,7 +128,6 @@ require ( github.com/go-stack/stack v1.8.1 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/gofrs/flock v0.8.1 // indirect - github.com/gofrs/uuid v4.2.0+incompatible // indirect github.com/gogo/protobuf v1.3.3 // indirect github.com/golang-jwt/jwt/v4 v4.4.3 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -167,6 +166,7 @@ require ( github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect + github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/klauspost/compress v1.17.3 // indirect @@ -288,6 +288,7 @@ replace ( github.com/synapsecns/sanguine/contrib/screener-api => ../../contrib/screener-api github.com/synapsecns/sanguine/core => ../../core github.com/synapsecns/sanguine/ethergo => ../../ethergo + github.com/synapsecns/sanguine/services/cctp-relayer => ../cctp-relayer github.com/synapsecns/sanguine/services/omnirpc => ../omnirpc github.com/synapsecns/sanguine/services/scribe => ../scribe github.com/synapsecns/sanguine/tools => ../../tools diff --git a/services/rfq/relayer/chain/chain.go b/services/rfq/relayer/chain/chain.go index 36da42a345..1655c36cfa 100644 --- a/services/rfq/relayer/chain/chain.go +++ b/services/rfq/relayer/chain/chain.go @@ -11,9 +11,9 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/synapsecns/sanguine/core" "github.com/synapsecns/sanguine/ethergo/client" + "github.com/synapsecns/sanguine/ethergo/listener" "github.com/synapsecns/sanguine/ethergo/submitter" "github.com/synapsecns/sanguine/services/rfq/contracts/fastbridge" - "github.com/synapsecns/sanguine/services/rfq/relayer/listener" "github.com/synapsecns/sanguine/services/rfq/relayer/reldb" ) diff --git a/services/rfq/relayer/cmd/commands.go b/services/rfq/relayer/cmd/commands.go index f3fe24267e..0e790a8843 100644 --- a/services/rfq/relayer/cmd/commands.go +++ b/services/rfq/relayer/cmd/commands.go @@ -17,10 +17,10 @@ var configFlag = &cli.StringFlag{ TakesFile: true, } -// runCommand runs the cctp relayer. +// runCommand runs the rfq relayer. var runCommand = &cli.Command{ Name: "run", - Description: "run the API Server", + Description: "run the relayer", Flags: []cli.Flag{configFlag, &commandline.LogLevel}, Action: func(c *cli.Context) (err error) { commandline.SetLogLevel(c) diff --git a/services/rfq/relayer/inventory/export_test.go b/services/rfq/relayer/inventory/export_test.go new file mode 100644 index 0000000000..1a7c2dd7ad --- /dev/null +++ b/services/rfq/relayer/inventory/export_test.go @@ -0,0 +1,11 @@ +package inventory + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/synapsecns/sanguine/services/rfq/relayer/relconfig" +) + +// GetRebalance is a wrapper around the internal getRebalance function. +func GetRebalance(cfg relconfig.Config, tokens map[int]map[common.Address]*TokenMetadata, chainID int, token common.Address) (*RebalanceData, error) { + return getRebalance(nil, cfg, tokens, chainID, token) +} diff --git a/services/rfq/relayer/inventory/manager.go b/services/rfq/relayer/inventory/manager.go index 7e4fc35f17..658d1a7220 100644 --- a/services/rfq/relayer/inventory/manager.go +++ b/services/rfq/relayer/inventory/manager.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/big" + "strconv" "sync" "time" @@ -17,6 +18,7 @@ import ( "github.com/lmittmann/w3/w3types" "github.com/synapsecns/sanguine/core" "github.com/synapsecns/sanguine/core/metrics" + "github.com/synapsecns/sanguine/ethergo/client" "github.com/synapsecns/sanguine/ethergo/submitter" "github.com/synapsecns/sanguine/services/rfq/contracts/ierc20" "github.com/synapsecns/sanguine/services/rfq/relayer/chain" @@ -32,6 +34,8 @@ import ( // //go:generate go run github.com/vektra/mockery/v2 --name Manager --output ./mocks --case=underscore type Manager interface { + // Start starts the inventory manager. + Start(ctx context.Context) (err error) // GetCommittableBalance gets the total balance available for quotes // this does not include on-chain balances committed in previous quotes that may be // refunded in the event of a revert. @@ -39,14 +43,17 @@ type Manager interface { // GetCommittableBalances gets the total balances committable for all tracked tokens. GetCommittableBalances(ctx context.Context, options ...BalanceFetchArgOption) (map[int]map[common.Address]*big.Int, error) // ApproveAllTokens approves all tokens for the relayer address. - ApproveAllTokens(ctx context.Context, submitter submitter.TransactionSubmitter) error + ApproveAllTokens(ctx context.Context) error // HasSufficientGas checks if there is sufficient gas for a given route. HasSufficientGas(ctx context.Context, origin, dest int) (bool, error) + // Rebalance checks whether a given token should be rebalanced, and + // executes the rebalance if necessary. + Rebalance(ctx context.Context, chainID int, token common.Address) error } type inventoryManagerImpl struct { - // map chainID->address->tokenMetadata - tokens map[int]map[common.Address]*tokenMetadata + // map chainID->address->TokenMetadata + tokens map[int]map[common.Address]*TokenMetadata // map chainID->balance gasBalances map[int]*big.Int // mux contains the mutex @@ -59,7 +66,12 @@ type inventoryManagerImpl struct { relayerAddress common.Address // chainClient is an omnirpc client chainClient submitter.ClientFetcher - db reldb.Service + // txSubmitter is the transaction submitter + txSubmitter submitter.TransactionSubmitter + // rebalanceManagers is the map of rebalance managers + rebalanceManagers map[relconfig.RebalanceMethod]RebalanceManager + // db is the database + db reldb.Service } // GetCommittableBalance gets the committable balances. @@ -99,7 +111,7 @@ func (i *inventoryManagerImpl) GetCommittableBalances(ctx context.Context, optio for chainID, tokenMap := range i.tokens { res[chainID] = map[common.Address]*big.Int{} for address, tokenData := range tokenMap { - res[chainID][address] = core.CopyBigInt(tokenData.balance) + res[chainID][address] = core.CopyBigInt(tokenData.Balance) // now subtract by in flight quotes. // Yeah, this is an algorithmically atrocious for // TODO: fix, but we're really talking about 4 tokens @@ -116,12 +128,16 @@ func (i *inventoryManagerImpl) GetCommittableBalances(ctx context.Context, optio return res, nil } -type tokenMetadata struct { - name string - balance *big.Int - decimals uint8 - startAllowance *big.Int - isGasToken bool +// TokenMetadata contains metadata for a token. +type TokenMetadata struct { + Name string + Balance *big.Int + Decimals uint8 + StartAllowanceRFQ *big.Int + StartAllowanceCCTP *big.Int + IsGasToken bool + ChainID int + Addr common.Address } var ( @@ -134,46 +150,115 @@ var ( // TODO: replace w/ config. const defaultPollPeriod = 5 -// NewInventoryManager creates a list of tokens we should use. -func NewInventoryManager(ctx context.Context, clientFetcher submitter.ClientFetcher, handler metrics.Handler, cfg relconfig.Config, relayer common.Address, db reldb.Service) (Manager, error) { +// NewInventoryManager creates a new inventory manager. +// TODO: too many args here. +// +//nolint:gocognit +func NewInventoryManager(ctx context.Context, clientFetcher submitter.ClientFetcher, handler metrics.Handler, cfg relconfig.Config, relayer common.Address, txSubmitter submitter.TransactionSubmitter, db reldb.Service) (Manager, error) { + rebalanceMethods, err := cfg.GetRebalanceMethods() + if err != nil { + return nil, fmt.Errorf("could not get rebalance methods: %w", err) + } + rebalanceManagers := make(map[relconfig.RebalanceMethod]RebalanceManager) + for method := range rebalanceMethods { + //nolint:exhaustive + switch method { + case relconfig.RebalanceMethodCCTP: + rebalanceManagers[method] = newRebalanceManagerCCTP(cfg, handler, clientFetcher, txSubmitter, relayer, db) + default: + return nil, fmt.Errorf("unsupported rebalance method: %s", method) + } + } + i := inventoryManagerImpl{ - relayerAddress: relayer, - handler: handler, - cfg: cfg, - chainClient: clientFetcher, - db: db, + relayerAddress: relayer, + handler: handler, + cfg: cfg, + chainClient: clientFetcher, + txSubmitter: txSubmitter, + rebalanceManagers: rebalanceManagers, + db: db, } - err := i.initializeTokens(ctx, cfg) + err = i.initializeTokens(ctx, cfg) if err != nil { return nil, fmt.Errorf("could not initialize tokens: %w", err) } - // TODO: move - go func() { + return &i, nil +} + +//nolint:gocognit,cyclop +func (i *inventoryManagerImpl) Start(ctx context.Context) error { + g, _ := errgroup.WithContext(ctx) + for _, rebalanceManager := range i.rebalanceManagers { + rebalanceManager := rebalanceManager + g.Go(func() error { + err := rebalanceManager.Start(ctx) + if err != nil { + return fmt.Errorf("could not start rebalance manager: %w", err) + } + return nil + }) + } + + // continuously refresh balances + g.Go(func() error { for { select { case <-ctx.Done(): - return - case <-time.After(defaultPollPeriod * time.Second): + return fmt.Errorf("context canceled: %w", ctx.Err()) + case <-time.After(250 * time.Millisecond): // this returning an error isn't really possible unless a config error happens // TODO: need better error handling. - err = i.refreshBalances(ctx) + err := i.refreshBalances(ctx) if err != nil { logger.Errorf("could not refresh balances") - return + //nolint:nilerr + return nil } } } - }() + }) - return &i, nil + // continuously check for rebalances + rebalanceInterval := i.cfg.GetRebalanceInterval() + if rebalanceInterval > 0 { + g.Go(func() error { + for { + select { + case <-ctx.Done(): + return fmt.Errorf("context canceled: %w", ctx.Err()) + case <-time.After(rebalanceInterval): + err := i.refreshBalances(ctx) + if err != nil { + return fmt.Errorf("could not refresh balances: %w", err) + } + for chainID, chainConfig := range i.cfg.Chains { + for tokenName, tokenConfig := range chainConfig.Tokens { + err = i.Rebalance(ctx, chainID, common.HexToAddress(tokenConfig.Address)) + if err != nil { + logger.Errorf("could not rebalance %s on chain %d: %v", tokenName, chainID, err) + } + } + } + } + } + }) + } + + err := g.Wait() + if err != nil { + return fmt.Errorf("error starting inventory manager: %w", err) + } + return nil } const maxBatchSize = 10 // ApproveAllTokens approves all checks if allowance is set and if not approves. -func (i *inventoryManagerImpl) ApproveAllTokens(ctx context.Context, submitter submitter.TransactionSubmitter) error { +// nolint:gocognit,nestif,cyclop +func (i *inventoryManagerImpl) ApproveAllTokens(ctx context.Context) error { i.mux.RLock() defer i.mux.RUnlock() @@ -184,26 +269,31 @@ func (i *inventoryManagerImpl) ApproveAllTokens(ctx context.Context, submitter s } for address, token := range tokenMap { - // if startAllowance is 0 - if address != chain.EthAddress && token.startAllowance.Cmp(big.NewInt(0)) == 0 { - chainID := chainID // capture func literal - address := address // capture func literal - // init an approval in submitter. Note: in the case where submitter hasn't finished from last boot, this will double submit approvals unfortanutely - _, err = submitter.SubmitTransaction(ctx, big.NewInt(int64(chainID)), func(transactor *bind.TransactOpts) (tx *types.Transaction, err error) { - erc20, err := ierc20.NewIERC20(address, backendClient) - if err != nil { - return nil, fmt.Errorf("could not get erc20: %w", err) - } - - approveAmount, err := erc20.Approve(transactor, common.HexToAddress(i.cfg.Chains[chainID].Bridge), abi.MaxInt256) - if err != nil { - return nil, fmt.Errorf("could not approve: %w", err) - } + // approve RFQ contract. + // Note: in the case where submitter hasn't finished from last boot, + // this will double submit approvals unfortunately. + if address != chain.EthAddress && token.StartAllowanceRFQ.Cmp(big.NewInt(0)) == 0 { + tokenAddr := address // capture func literal + contractAddr, err := i.cfg.GetRFQAddress(chainID) + if err != nil { + return fmt.Errorf("could not get RFQ address: %w", err) + } + err = i.approve(ctx, tokenAddr, common.HexToAddress(contractAddr), backendClient) + if err != nil { + return fmt.Errorf("could not approve RFQ contract: %w", err) + } + } - return approveAmount, nil - }) + // approve CCTP contract + if address != chain.EthAddress && token.StartAllowanceCCTP.Cmp(big.NewInt(0)) == 0 { + tokenAddr := address // capture func literal + contractAddr, err := i.cfg.GetCCTPAddress(chainID) if err != nil { - return fmt.Errorf("could not submit approval: %w", err) + return fmt.Errorf("could not get CCTP address: %w", err) + } + err = i.approve(ctx, tokenAddr, common.HexToAddress(contractAddr), backendClient) + if err != nil { + return fmt.Errorf("could not approve CCTP contract: %w", err) } } } @@ -211,6 +301,30 @@ func (i *inventoryManagerImpl) ApproveAllTokens(ctx context.Context, submitter s return nil } +// approve submits an ERC20 approval for a given token and contract address. +func (i *inventoryManagerImpl) approve(ctx context.Context, tokenAddr, contractAddr common.Address, backendClient client.EVM) (err error) { + erc20, err := ierc20.NewIERC20(tokenAddr, backendClient) + if err != nil { + return fmt.Errorf("could not get erc20: %w", err) + } + chainID, err := backendClient.ChainID(ctx) + if err != nil { + return fmt.Errorf("could not get chain id: %w", err) + } + + _, err = i.txSubmitter.SubmitTransaction(ctx, chainID, func(transactor *bind.TransactOpts) (tx *types.Transaction, err error) { + tx, err = erc20.Approve(transactor, contractAddr, abi.MaxInt256) + if err != nil { + return nil, fmt.Errorf("could not approve: %w", err) + } + return tx, nil + }) + if err != nil { + return fmt.Errorf("could not submit approval: %w", err) + } + return nil +} + // HasSufficientGas checks if there is sufficient gas for a given route. func (i *inventoryManagerImpl) HasSufficientGas(ctx context.Context, origin, dest int) (sufficient bool, err error) { gasThresh, err := i.cfg.GetMinGasToken(dest) @@ -230,7 +344,156 @@ func (i *inventoryManagerImpl) HasSufficientGas(ctx context.Context, origin, des return sufficient, nil } -// initializes tokens converts the configuration into a data structure we can use to determine inventory +// Rebalance checks whether a given token should be rebalanced, and executes the rebalance if necessary. +// Note that if there are multiple tokens whose balance is below the maintenance balance, only the lowest balance +// will be rebalanced. +func (i *inventoryManagerImpl) Rebalance(parentCtx context.Context, chainID int, token common.Address) error { + // evaluate the rebalance method + method, err := i.cfg.GetRebalanceMethod(chainID, token.Hex()) + if err != nil { + return fmt.Errorf("could not get rebalance method: %w", err) + } + if method == relconfig.RebalanceMethodNone { + return nil + } + ctx, span := i.handler.Tracer().Start(parentCtx, "Rebalance", trace.WithAttributes( + attribute.Int(metrics.ChainID, chainID), + attribute.String("token", token.Hex()), + attribute.String("rebalance_method", method.String()), + )) + defer func(err error) { + metrics.EndSpanWithErr(span, err) + }(err) + + // build the rebalance action + rebalance, err := getRebalance(span, i.cfg, i.tokens, chainID, token) + if err != nil { + return fmt.Errorf("could not get rebalance: %w", err) + } + if rebalance == nil { + return nil + } + span.SetAttributes( + attribute.String("rebalance_origin", strconv.Itoa(rebalance.OriginMetadata.ChainID)), + attribute.String("rebalance_dest", strconv.Itoa(rebalance.DestMetadata.ChainID)), + attribute.String("rebalance_amount", rebalance.Amount.String()), + ) + + // make sure there are no pending rebalances that touch the given path + pending, err := i.db.HasPendingRebalance(ctx, uint64(rebalance.OriginMetadata.ChainID), uint64(rebalance.DestMetadata.ChainID)) + if err != nil { + return fmt.Errorf("could not check pending rebalance: %w", err) + } + span.SetAttributes(attribute.Bool("rebalance_pending", pending)) + if pending { + return nil + } + + // execute the rebalance + manager, ok := i.rebalanceManagers[method] + if !ok { + return fmt.Errorf("no rebalance manager for method: %s", method) + } + err = manager.Execute(ctx, rebalance) + if err != nil { + return fmt.Errorf("could not execute rebalance: %w", err) + } + return nil +} + +//nolint:cyclop,gocognit +func getRebalance(span trace.Span, cfg relconfig.Config, tokens map[int]map[common.Address]*TokenMetadata, chainID int, token common.Address) (rebalance *RebalanceData, err error) { + maintenancePct, err := cfg.GetMaintenanceBalancePct(chainID, token.Hex()) + if err != nil { + return nil, fmt.Errorf("could not get maintenance pct: %w", err) + } + + // get token metadata + var rebalanceTokenData *TokenMetadata + for address, tokenData := range tokens[chainID] { + if address == token { + rebalanceTokenData = tokenData + break + } + } + + // get total balance for given token across all chains + totalBalance := big.NewInt(0) + for _, tokenMap := range tokens { + for _, tokenData := range tokenMap { + if tokenData.Name == rebalanceTokenData.Name { + totalBalance.Add(totalBalance, tokenData.Balance) + } + } + } + + // check if any balances are below maintenance threshold + var minTokenData, maxTokenData *TokenMetadata + for _, tokenMap := range tokens { + for _, tokenData := range tokenMap { + if tokenData.Name == rebalanceTokenData.Name { + if minTokenData == nil || tokenData.Balance.Cmp(minTokenData.Balance) < 0 { + minTokenData = tokenData + } + if maxTokenData == nil || tokenData.Balance.Cmp(maxTokenData.Balance) > 0 { + maxTokenData = tokenData + } + } + } + } + + // get the initialPct for the origin chain + initialPct, err := cfg.GetInitialBalancePct(maxTokenData.ChainID, maxTokenData.Addr.Hex()) + if err != nil { + return nil, fmt.Errorf("could not get initial pct: %w", err) + } + maintenanceThresh, _ := new(big.Float).Mul(new(big.Float).SetInt(totalBalance), big.NewFloat(maintenancePct/100)).Int(nil) + if span != nil { + span.SetAttributes(attribute.Float64("maintenance_pct", maintenancePct)) + span.SetAttributes(attribute.Float64("initial_pct", initialPct)) + span.SetAttributes(attribute.String("max_token_balance", maxTokenData.Balance.String())) + span.SetAttributes(attribute.String("min_token_balance", minTokenData.Balance.String())) + span.SetAttributes(attribute.String("total_balance", totalBalance.String())) + span.SetAttributes(attribute.String("maintenance_thresh", maintenanceThresh.String())) + } + + // check if the minimum balance is below the threshold and trigger rebalance + if minTokenData.Balance.Cmp(maintenanceThresh) > 0 { + return rebalance, nil + } + + // calculate the amount to rebalance vs the initial threshold on origin + initialThresh, _ := new(big.Float).Mul(new(big.Float).SetInt(totalBalance), big.NewFloat(initialPct/100)).Int(nil) + amount := new(big.Int).Sub(maxTokenData.Balance, initialThresh) + + // no need to rebalance since amount would be negative + if amount.Cmp(big.NewInt(0)) < 0 { + //nolint:nilnil + return nil, nil + } + + // clip the rebalance amount by the configured max + maxAmount := cfg.GetMaxRebalanceAmount(maxTokenData.ChainID, maxTokenData.Addr) + if amount.Cmp(maxAmount) > 0 { + amount = maxAmount + } + if span != nil { + span.SetAttributes( + attribute.String("initial_thresh", initialThresh.String()), + attribute.String("rebalance_amount", amount.String()), + attribute.String("max_rebalance_amount", maxAmount.String()), + ) + } + + rebalance = &RebalanceData{ + OriginMetadata: maxTokenData, + DestMetadata: minTokenData, + Amount: amount, + } + return rebalance, nil +} + +// initializeTokens converts the configuration into a data structure we can use to determine inventory // it gets metadata like name, decimals, etc once and exports these to prometheus for ease of debugging. func (i *inventoryManagerImpl) initializeTokens(parentCtx context.Context, cfg relconfig.Config) (err error) { i.mux.Lock() @@ -247,7 +510,7 @@ func (i *inventoryManagerImpl) initializeTokens(parentCtx context.Context, cfg r meter := i.handler.Meter("github.com/synapsecns/sanguine/services/rfq/relayer/inventory") // TODO: this needs to be a struct bound variable otherwise will be stuck. - i.tokens = make(map[int]map[common.Address]*tokenMetadata) + i.tokens = make(map[int]map[common.Address]*TokenMetadata) i.gasBalances = make(map[int]*big.Int) type registerCall func() error @@ -258,7 +521,7 @@ func (i *inventoryManagerImpl) initializeTokens(parentCtx context.Context, cfg r // iterate through all tokens to get the metadata for chainID, chainCfg := range cfg.GetChains() { - i.tokens[chainID] = map[common.Address]*tokenMetadata{} + i.tokens[chainID] = map[common.Address]*TokenMetadata{} // set up balance fetching for this chain's gas token i.gasBalances[chainID] = new(big.Int) @@ -272,39 +535,51 @@ func (i *inventoryManagerImpl) initializeTokens(parentCtx context.Context, cfg r if err != nil { return fmt.Errorf("could not get native token: %w", err) } - rtoken := &tokenMetadata{ - isGasToken: tokenName == nativeToken, + rtoken := &TokenMetadata{ + IsGasToken: tokenName == nativeToken, + ChainID: chainID, } var token common.Address - if rtoken.isGasToken { + if rtoken.IsGasToken { token = chain.EthAddress } else { token = common.HexToAddress(tokenCfg.Address) } i.tokens[chainID][token] = rtoken + rtoken.Addr = token // requires non-nil pointer - rtoken.balance = new(big.Int) - rtoken.startAllowance = new(big.Int) - - if rtoken.isGasToken { - rtoken.decimals = 18 - rtoken.name = tokenName - rtoken.balance = i.gasBalances[chainID] + rtoken.Balance = new(big.Int) + rtoken.StartAllowanceRFQ = new(big.Int) + rtoken.StartAllowanceCCTP = new(big.Int) + + if rtoken.IsGasToken { + rtoken.Decimals = 18 + rtoken.Name = tokenName + rtoken.Balance = i.gasBalances[chainID] // TODO: start allowance? } else { + rfqAddr, err := cfg.GetRFQAddress(chainID) + if err != nil { + return fmt.Errorf("could not get rfq address: %w", err) + } + cctpAddr, err := cfg.GetCCTPAddress(chainID) + if err != nil { + return fmt.Errorf("could not get cctp address: %w", err) + } deferredCalls[chainID] = append(deferredCalls[chainID], - eth.CallFunc(funcBalanceOf, token, i.relayerAddress).Returns(rtoken.balance), - eth.CallFunc(funcDecimals, token).Returns(&rtoken.decimals), - eth.CallFunc(funcName, token).Returns(&rtoken.name), - eth.CallFunc(funcAllowance, token, i.relayerAddress, common.HexToAddress(i.cfg.Chains[chainID].Bridge)).Returns(rtoken.startAllowance), + eth.CallFunc(funcBalanceOf, token, i.relayerAddress).Returns(rtoken.Balance), + eth.CallFunc(funcDecimals, token).Returns(&rtoken.Decimals), + eth.CallFunc(funcName, token).Returns(&rtoken.Name), + eth.CallFunc(funcAllowance, token, i.relayerAddress, common.HexToAddress(rfqAddr)).Returns(rtoken.StartAllowanceRFQ), + eth.CallFunc(funcAllowance, token, i.relayerAddress, common.HexToAddress(cctpAddr)).Returns(rtoken.StartAllowanceCCTP), ) } chainID := chainID // capture func literal deferredRegisters = append(deferredRegisters, func() error { - //nolint: wrapcheck + //nolint:wrapcheck return i.registerMetric(meter, chainID, token) }) } @@ -373,8 +648,8 @@ func (i *inventoryManagerImpl) refreshBalances(ctx context.Context) error { // queue token balance fetches for tokenAddress, token := range tokenMap { // TODO: make sure Returns does nothing on error - if !token.isGasToken { - deferredCalls = append(deferredCalls, eth.CallFunc(funcBalanceOf, tokenAddress, i.relayerAddress).Returns(token.balance)) + if !token.IsGasToken { + deferredCalls = append(deferredCalls, eth.CallFunc(funcBalanceOf, tokenAddress, i.relayerAddress).Returns(token.Balance)) } } @@ -408,10 +683,10 @@ func (i *inventoryManagerImpl) registerMetric(meter metric.Meter, chainID int, t } attributes := attribute.NewSet(attribute.Int(metrics.ChainID, chainID), attribute.String("relayer_address", i.relayerAddress.String()), - attribute.String("token_name", tokenData.name), attribute.Int("decimals", int(tokenData.decimals)), + attribute.String("token_name", tokenData.Name), attribute.Int("decimals", int(tokenData.Decimals)), attribute.String("token_address", token.String())) - observer.ObserveFloat64(balanceGauge, core.BigToDecimals(tokenData.balance, tokenData.decimals), metric.WithAttributeSet(attributes)) + observer.ObserveFloat64(balanceGauge, core.BigToDecimals(tokenData.Balance, tokenData.Decimals), metric.WithAttributeSet(attributes)) return nil }, balanceGauge); err != nil { diff --git a/services/rfq/relayer/inventory/manager_test.go b/services/rfq/relayer/inventory/manager_test.go index 01909cd920..11d33709f6 100644 --- a/services/rfq/relayer/inventory/manager_test.go +++ b/services/rfq/relayer/inventory/manager_test.go @@ -52,8 +52,118 @@ func (i *InventoryTestSuite) TestInventoryBootAndRefresh() { } } - im, err := inventory.NewInventoryManager(i.GetTestContext(), omnirpcClient.NewOmnirpcClient(i.omnirpcURL, metrics.Get()), metrics.Get(), cfg, i.relayer.Address(), i.db) + im, err := inventory.NewInventoryManager(i.GetTestContext(), omnirpcClient.NewOmnirpcClient(i.omnirpcURL, metrics.Get()), metrics.Get(), cfg, i.relayer.Address(), nil, i.db) i.Require().NoError(err) _ = im } + +func (i *InventoryTestSuite) TestGetRebalance() { + origin := 1 + dest := 2 + extra := 3 + usdcDataOrigin := inventory.TokenMetadata{ + Name: "USDC", + Decimals: 6, + ChainID: origin, + Addr: common.HexToAddress("0x0000000000000000000000000000000000000123"), + } + usdcDataDest := inventory.TokenMetadata{ + Name: "USDC", + Decimals: 6, + ChainID: dest, + Addr: common.HexToAddress("0x0000000000000000000000000000000000000456"), + } + usdcDataExtra := inventory.TokenMetadata{ + Name: "USDC", + Decimals: 6, + ChainID: extra, + Addr: common.HexToAddress("0x0000000000000000000000000000000000000789"), + } + tokens := map[int]map[common.Address]*inventory.TokenMetadata{ + origin: { + usdcDataOrigin.Addr: &usdcDataOrigin, + }, + dest: { + usdcDataDest.Addr: &usdcDataDest, + }, + } + getConfig := func(maxRebalanceAmount string) relconfig.Config { + return relconfig.Config{ + Chains: map[int]relconfig.ChainConfig{ + origin: { + Tokens: map[string]relconfig.TokenConfig{ + "USDC": { + Address: usdcDataOrigin.Addr.Hex(), + Decimals: 6, + MaintenanceBalancePct: 20, + InitialBalancePct: 50, + MaxRebalanceAmount: maxRebalanceAmount, + }, + }, + }, + dest: { + Tokens: map[string]relconfig.TokenConfig{ + "USDC": { + Address: usdcDataDest.Addr.Hex(), + Decimals: 6, + MaintenanceBalancePct: 20, + InitialBalancePct: 50, + MaxRebalanceAmount: maxRebalanceAmount, + }, + }, + }, + extra: { + Tokens: map[string]relconfig.TokenConfig{ + "USDC": { + Address: usdcDataExtra.Addr.Hex(), + Decimals: 6, + MaintenanceBalancePct: 0, + InitialBalancePct: 0, + MaxRebalanceAmount: maxRebalanceAmount, + }, + }, + }, + }, + } + } + + // 10 USDC on both chains; no rebalance needed + cfg := getConfig("") + usdcDataOrigin.Balance = big.NewInt(1e7) + usdcDataDest.Balance = big.NewInt(1e7) + rebalance, err := inventory.GetRebalance(cfg, tokens, origin, usdcDataOrigin.Addr) + i.NoError(err) + i.Nil(rebalance) + + // Set origin balance below maintenance threshold; need rebalance + usdcDataOrigin.Balance = big.NewInt(9e6) + usdcDataDest.Balance = big.NewInt(1e6) + rebalance, err = inventory.GetRebalance(cfg, tokens, origin, usdcDataOrigin.Addr) + i.NoError(err) + expected := &inventory.RebalanceData{ + OriginMetadata: &usdcDataOrigin, + DestMetadata: &usdcDataDest, + Amount: big.NewInt(4e6), + } + i.Equal(expected, rebalance) + + // Set max rebalance amount + cfgWithMax := getConfig("1.1") + rebalance, err = inventory.GetRebalance(cfgWithMax, tokens, origin, usdcDataOrigin.Addr) + i.NoError(err) + expected = &inventory.RebalanceData{ + OriginMetadata: &usdcDataOrigin, + DestMetadata: &usdcDataDest, + Amount: big.NewInt(1.1e6), + } + i.Equal(expected, rebalance) + + // Increase initial threshold so that no rebalance can occur from origin + usdcDataOrigin.Balance = big.NewInt(2e6) + usdcDataDest.Balance = big.NewInt(1e6) + usdcDataExtra.Balance = big.NewInt(7e6) + rebalance, err = inventory.GetRebalance(cfg, tokens, origin, usdcDataOrigin.Addr) + i.NoError(err) + i.Nil(rebalance) +} diff --git a/services/rfq/relayer/inventory/mocks/manager.go b/services/rfq/relayer/inventory/mocks/manager.go index 4c097661b8..1e9cc66c85 100644 --- a/services/rfq/relayer/inventory/mocks/manager.go +++ b/services/rfq/relayer/inventory/mocks/manager.go @@ -11,8 +11,6 @@ import ( inventory "github.com/synapsecns/sanguine/services/rfq/relayer/inventory" mock "github.com/stretchr/testify/mock" - - submitter "github.com/synapsecns/sanguine/ethergo/submitter" ) // Manager is an autogenerated mock type for the Manager type @@ -20,13 +18,13 @@ type Manager struct { mock.Mock } -// ApproveAllTokens provides a mock function with given fields: ctx, _a1 -func (_m *Manager) ApproveAllTokens(ctx context.Context, _a1 submitter.TransactionSubmitter) error { - ret := _m.Called(ctx, _a1) +// ApproveAllTokens provides a mock function with given fields: ctx +func (_m *Manager) ApproveAllTokens(ctx context.Context) error { + ret := _m.Called(ctx) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, submitter.TransactionSubmitter) error); ok { - r0 = rf(ctx, _a1) + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) } else { r0 = ret.Error(0) } @@ -115,6 +113,34 @@ func (_m *Manager) HasSufficientGas(ctx context.Context, origin int, dest int) ( return r0, r1 } +// Rebalance provides a mock function with given fields: ctx, chainID, token +func (_m *Manager) Rebalance(ctx context.Context, chainID int, token common.Address) error { + ret := _m.Called(ctx, chainID, token) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int, common.Address) error); ok { + r0 = rf(ctx, chainID, token) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: ctx +func (_m *Manager) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + type mockConstructorTestingTNewManager interface { mock.TestingT Cleanup(func()) diff --git a/services/rfq/relayer/inventory/rebalance.go b/services/rfq/relayer/inventory/rebalance.go new file mode 100644 index 0000000000..55535ecbdf --- /dev/null +++ b/services/rfq/relayer/inventory/rebalance.go @@ -0,0 +1,258 @@ +package inventory + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/synapsecns/sanguine/core/metrics" + "github.com/synapsecns/sanguine/ethergo/listener" + "github.com/synapsecns/sanguine/ethergo/submitter" + "github.com/synapsecns/sanguine/services/cctp-relayer/contracts/cctp" + "github.com/synapsecns/sanguine/services/rfq/relayer/relconfig" + "github.com/synapsecns/sanguine/services/rfq/relayer/reldb" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/errgroup" +) + +// RebalanceData contains metadata for a rebalance action. +type RebalanceData struct { + OriginMetadata *TokenMetadata + DestMetadata *TokenMetadata + Amount *big.Int +} + +// RebalanceManager is the interface for the rebalance manager. +type RebalanceManager interface { + // Start starts the rebalance manager. + Start(ctx context.Context) (err error) + // Execute executes a rebalance action. + Execute(ctx context.Context, rebalance *RebalanceData) error +} + +type rebalanceManagerCCTP struct { + // cfg is the config + cfg relconfig.Config + // handler is the metrics handler + handler metrics.Handler + // chainClient is an omnirpc client + chainClient submitter.ClientFetcher + // txSubmitter is the transaction submitter + txSubmitter submitter.TransactionSubmitter + // cctpContracts is the map of cctp contracts (used for rebalancing) + cctpContracts map[int]*cctp.SynapseCCTP + // relayerAddress contains the relayer address + relayerAddress common.Address + // chainListeners is the map of chain listeners for CCTP events + chainListeners map[int]listener.ContractListener + // db is the database + db reldb.Service +} + +func newRebalanceManagerCCTP(cfg relconfig.Config, handler metrics.Handler, chainClient submitter.ClientFetcher, txSubmitter submitter.TransactionSubmitter, relayerAddress common.Address, db reldb.Service) *rebalanceManagerCCTP { + return &rebalanceManagerCCTP{ + cfg: cfg, + handler: handler, + chainClient: chainClient, + txSubmitter: txSubmitter, + cctpContracts: make(map[int]*cctp.SynapseCCTP), + relayerAddress: relayerAddress, + chainListeners: make(map[int]listener.ContractListener), + db: db, + } +} + +func (c *rebalanceManagerCCTP) Start(ctx context.Context) (err error) { + err = c.initContracts(ctx) + if err != nil { + return fmt.Errorf("could not initialize contracts: %w", err) + } + + err = c.initListeners(ctx) + if err != nil { + return fmt.Errorf("could not initialize listeners: %w", err) + } + + g, _ := errgroup.WithContext(ctx) + for cid := range c.cfg.Chains { + // capture func literal + chainID := cid + g.Go(func() error { + return c.listen(ctx, chainID) + }) + } + + err = g.Wait() + if err != nil { + return fmt.Errorf("error listening to contract: %w", err) + } + return nil +} + +func (c *rebalanceManagerCCTP) initContracts(ctx context.Context) (err error) { + for chainID := range c.cfg.Chains { + contractAddr, err := c.cfg.GetCCTPAddress(chainID) + if err != nil { + return fmt.Errorf("could not get cctp address: %w", err) + } + chainClient, err := c.chainClient.GetClient(ctx, big.NewInt(int64(chainID))) + if err != nil { + return fmt.Errorf("could not get chain client: %w", err) + } + contract, err := cctp.NewSynapseCCTP(common.HexToAddress(contractAddr), chainClient) + if err != nil { + return fmt.Errorf("could not get cctp: %w", err) + } + c.cctpContracts[chainID] = contract + } + return nil +} + +func (c *rebalanceManagerCCTP) initListeners(ctx context.Context) (err error) { + for chainID := range c.cfg.GetChains() { + cctpAddr, err := c.cfg.GetCCTPAddress(chainID) + if err != nil { + return fmt.Errorf("could not get cctp address: %w", err) + } + chainClient, err := c.chainClient.GetClient(ctx, big.NewInt(int64(chainID))) + if err != nil { + return fmt.Errorf("could not get chain client: %w", err) + } + initialBlock, err := c.cfg.GetCCTPStartBlock(chainID) + if err != nil { + return fmt.Errorf("could not get cctp start block: %w", err) + } + chainListener, err := listener.NewChainListener(chainClient, c.db, common.HexToAddress(cctpAddr), initialBlock, c.handler) + if err != nil { + return fmt.Errorf("could not get chain listener: %w", err) + } + c.chainListeners[chainID] = chainListener + } + return nil +} + +func (c *rebalanceManagerCCTP) Execute(parentCtx context.Context, rebalance *RebalanceData) (err error) { + contract, ok := c.cctpContracts[rebalance.OriginMetadata.ChainID] + if !ok { + return fmt.Errorf("could not find cctp contract for chain %d", rebalance.OriginMetadata.ChainID) + } + ctx, span := c.handler.Tracer().Start(parentCtx, "rebalance.Execute", trace.WithAttributes( + attribute.Int("rebalance_origin", rebalance.OriginMetadata.ChainID), + attribute.Int("rebalance_dest", rebalance.DestMetadata.ChainID), + attribute.String("rebalance_amount", rebalance.Amount.String()), + )) + defer func(err error) { + metrics.EndSpanWithErr(span, err) + }(err) + + // perform rebalance by calling sendCircleToken() + _, err = c.txSubmitter.SubmitTransaction(ctx, big.NewInt(int64(rebalance.OriginMetadata.ChainID)), func(transactor *bind.TransactOpts) (tx *types.Transaction, err error) { + tx, err = contract.SendCircleToken( + transactor, + c.relayerAddress, + big.NewInt(int64(rebalance.DestMetadata.ChainID)), + rebalance.OriginMetadata.Addr, + rebalance.Amount, + 0, // TODO: inspect + []byte{}, // TODO: inspect + ) + if err != nil { + return nil, fmt.Errorf("could not send circle token: %w", err) + } + return tx, nil + }) + if err != nil { + return fmt.Errorf("could not submit CCTP rebalance: %w", err) + } + + // store the rebalance in the db + model := reldb.Rebalance{ + Origin: uint64(rebalance.OriginMetadata.ChainID), + Destination: uint64(rebalance.DestMetadata.ChainID), + OriginAmount: rebalance.Amount, + Status: reldb.RebalanceInitiated, + } + err = c.db.StoreRebalance(ctx, model) + if err != nil { + return fmt.Errorf("could not store rebalance: %w", err) + } + return nil +} + +// nolint:cyclop +func (c *rebalanceManagerCCTP) listen(parentCtx context.Context, chainID int) (err error) { + listener, ok := c.chainListeners[chainID] + if !ok { + return fmt.Errorf("could not find listener for chain %d", chainID) + } + ethClient, err := c.chainClient.GetClient(parentCtx, big.NewInt(int64(chainID))) + if err != nil { + return fmt.Errorf("could not get chain client: %w", err) + } + cctpAddr := common.HexToAddress(c.cfg.Chains[chainID].CCTPAddress) + parser, err := cctp.NewSynapseCCTPEvents(cctpAddr, ethClient) + if err != nil { + return fmt.Errorf("could not get cctp events: %w", err) + } + + err = listener.Listen(parentCtx, func(parentCtx context.Context, log types.Log) (err error) { + ctx, span := c.handler.Tracer().Start(parentCtx, "rebalance.Listen", trace.WithAttributes( + attribute.Int(metrics.ChainID, chainID), + )) + defer func(err error) { + metrics.EndSpanWithErr(span, err) + }(err) + + switch log.Topics[0] { + case cctp.CircleRequestSentTopic: + parsedEvent, err := parser.ParseCircleRequestSent(log) + if err != nil { + logger.Warnf("could not parse circle request sent: %w", err) + return nil + } + if parsedEvent.Sender != c.relayerAddress { + return nil + } + span.SetAttributes( + attribute.String("log_type", "CircleRequestSent"), + attribute.String("request_id", hexutil.Encode(parsedEvent.RequestID[:])), + ) + origin := uint64(chainID) + err = c.db.UpdateRebalanceStatus(ctx, parsedEvent.RequestID, &origin, reldb.RebalancePending) + if err != nil { + logger.Warnf("could not update rebalance status: %w", err) + return nil + } + case cctp.CircleRequestFulfilledTopic: + parsedEvent, err := parser.ParseCircleRequestFulfilled(log) + if err != nil { + logger.Warnf("could not parse circle request fulfilled: %w", err) + return nil + } + if parsedEvent.Recipient != c.relayerAddress { + return nil + } + span.SetAttributes( + attribute.String("log_type", "CircleRequestFulfilled"), + attribute.String("request_id", hexutil.Encode(parsedEvent.RequestID[:])), + ) + err = c.db.UpdateRebalanceStatus(parentCtx, parsedEvent.RequestID, nil, reldb.RebalanceCompleted) + if err != nil { + logger.Warnf("could not update rebalance status: %w", err) + return nil + } + default: + logger.Warnf("unknown event %s", log.Topics[0]) + } + return nil + }) + if err != nil { + return fmt.Errorf("could not listen to contract: %w", err) + } + return nil +} diff --git a/services/rfq/relayer/listener/listener_test.go b/services/rfq/relayer/listener/listener_test.go deleted file mode 100644 index 0797bc9833..0000000000 --- a/services/rfq/relayer/listener/listener_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package listener_test - -import ( - "context" - "math/big" - "sync" - "time" - - "github.com/brianvoe/gofakeit/v6" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/synapsecns/sanguine/services/rfq/contracts/testcontracts/fastbridgemock" - "github.com/synapsecns/sanguine/services/rfq/relayer/listener" -) - -func (l *ListenerTestSuite) TestListenForEvents() { - _, handle := l.manager.GetMockFastBridge(l.GetTestContext(), l.backend) - var wg sync.WaitGroup - const iterations = 50 - for i := 0; i < iterations; i++ { - i := i - go func(num int) { - wg.Add(1) - defer wg.Done() - - testAddress := common.BigToAddress(big.NewInt(int64(i))) - auth := l.backend.GetTxContext(l.GetTestContext(), nil) - - //nolint: typecheck - txID := [32]byte(crypto.Keccak256(testAddress.Bytes())) - bridgeRequestTX, err := handle.MockBridgeRequest(auth.TransactOpts, txID, testAddress, fastbridgemock.IFastBridgeBridgeParams{ - DstChainId: gofakeit.Uint32(), - Sender: testAddress, - To: testAddress, - OriginToken: testAddress, - DestToken: testAddress, - OriginAmount: new(big.Int).SetUint64(gofakeit.Uint64()), - DestAmount: new(big.Int).SetUint64(gofakeit.Uint64()), - SendChainGas: false, - Deadline: new(big.Int).SetUint64(uint64(time.Now().Add(-1 * time.Second * time.Duration(gofakeit.Uint16())).Unix())), - }) - l.NoError(err) - l.NotNil(bridgeRequestTX) - - l.backend.WaitForConfirmation(l.GetTestContext(), bridgeRequestTX) - - bridgeResponseTX, err := handle.MockBridgeRelayer(auth.TransactOpts, - // transactionID - txID, - // relayer - testAddress, - // to - testAddress, - // originChainID - uint32(gofakeit.Uint16()), - // originToken - testAddress, - // destToken - testAddress, - // originAmount - new(big.Int).SetUint64(gofakeit.Uint64()), - // destAmount - new(big.Int).SetUint64(gofakeit.Uint64()), - // gasAmount - new(big.Int).SetUint64(gofakeit.Uint64())) - l.NoError(err) - l.NotNil(bridgeResponseTX) - l.backend.WaitForConfirmation(l.GetTestContext(), bridgeResponseTX) - }(i) - } - - wg.Wait() - - cl, err := listener.NewChainListener(l.backend, l.store, handle.Address(), l.metrics) - l.NoError(err) - - eventCount := 0 - - // TODO: check for timeout,but it will be extremely obvious if it gets hit. - listenCtx, cancel := context.WithCancel(l.GetTestContext()) - err = cl.Listen(listenCtx, func(ctx context.Context, log types.Log) error { - eventCount++ - - if eventCount == iterations*2 { - cancel() - } - - return nil - }) -} diff --git a/services/rfq/relayer/listener/suite_test.go b/services/rfq/relayer/listener/suite_test.go deleted file mode 100644 index 91cb814351..0000000000 --- a/services/rfq/relayer/listener/suite_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package listener_test - -import ( - "github.com/Flaque/filet" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/suite" - "github.com/synapsecns/sanguine/core/metrics" - "github.com/synapsecns/sanguine/core/testsuite" - "github.com/synapsecns/sanguine/ethergo/backends" - "github.com/synapsecns/sanguine/ethergo/backends/geth" - "github.com/synapsecns/sanguine/ethergo/contracts" - "github.com/synapsecns/sanguine/services/rfq/contracts/fastbridge" - "github.com/synapsecns/sanguine/services/rfq/relayer/listener" - "github.com/synapsecns/sanguine/services/rfq/relayer/reldb" - "github.com/synapsecns/sanguine/services/rfq/relayer/reldb/sqlite" - "github.com/synapsecns/sanguine/services/rfq/testutil" - "math/big" - "testing" -) - -const chainID = 10 - -type ListenerTestSuite struct { - *testsuite.TestSuite - manager *testutil.DeployManager - backend backends.SimulatedTestBackend - store reldb.Service - metrics metrics.Handler - fastBridge *fastbridge.FastBridgeRef - fastBridgeMetadata contracts.DeployedContract -} - -func NewListenerSuite(tb testing.TB) *ListenerTestSuite { - return &ListenerTestSuite{ - TestSuite: testsuite.NewTestSuite(tb), - } -} - -func TestListenerSuite(t *testing.T) { - suite.Run(t, NewListenerSuite(t)) -} - -func (l *ListenerTestSuite) SetupTest() { - l.TestSuite.SetupTest() - - l.manager = testutil.NewDeployManager(l.T()) - l.backend = geth.NewEmbeddedBackendForChainID(l.GetTestContext(), l.T(), big.NewInt(chainID)) - var err error - l.metrics = metrics.NewNullHandler() - l.store, err = sqlite.NewSqliteStore(l.GetTestContext(), filet.TmpDir(l.T(), ""), l.metrics) - l.Require().NoError(err) - - l.fastBridgeMetadata, l.fastBridge = l.manager.GetFastBridge(l.GetTestContext(), l.backend) -} - -func (l *ListenerTestSuite) TestGetMetadataNoStore() { - // nothing stored, should use start block - cl := listener.NewTestChainListener(listener.TestChainListenerArgs{ - Address: common.Address{}, - Client: l.backend, - Contract: l.fastBridge, - Store: l.store, - Handler: l.metrics, - }) - - startBlock, myChainID, err := cl.GetMetadata(l.GetTestContext()) - l.NoError(err) - l.Equal(myChainID, uint64(chainID)) - - deployBlock, err := l.fastBridge.DeployBlock(&bind.CallOpts{Context: l.GetTestContext()}) - l.NoError(err) - l.Equal(startBlock, deployBlock.Uint64()) -} - -func (l *ListenerTestSuite) TestStartBlock() { - cl := listener.NewTestChainListener(listener.TestChainListenerArgs{ - Address: common.Address{}, - Client: l.backend, - Contract: l.fastBridge, - Store: l.store, - Handler: l.metrics, - }) - - deployBlock, err := l.fastBridge.DeployBlock(&bind.CallOpts{Context: l.GetTestContext()}) - l.NoError(err) - - expectedLastIndexed := deployBlock.Uint64() + 10 - err = l.store.PutLatestBlock(l.GetTestContext(), chainID, expectedLastIndexed) - l.NoError(err) - - startBlock, cid, err := cl.GetMetadata(l.GetTestContext()) - l.Equal(cid, uint64(chainID)) - l.Equal(startBlock, expectedLastIndexed) -} - -func (l *ListenerTestSuite) TestListen() { - -} diff --git a/services/rfq/relayer/metadata/metadata.go b/services/rfq/relayer/metadata/metadata.go index c860b5e863..c4342cd07e 100644 --- a/services/rfq/relayer/metadata/metadata.go +++ b/services/rfq/relayer/metadata/metadata.go @@ -1,4 +1,4 @@ -// Package metadata provides a metadata service for cctp relayer. +// Package metadata provides a metadata service for rfq. package metadata import "github.com/synapsecns/sanguine/core/config" diff --git a/services/rfq/relayer/quoter/quoter.go b/services/rfq/relayer/quoter/quoter.go index 0a7dce1f9b..efab9e2915 100644 --- a/services/rfq/relayer/quoter/quoter.go +++ b/services/rfq/relayer/quoter/quoter.go @@ -238,10 +238,9 @@ func (m *Manager) prepareAndSubmitQuotes(ctx context.Context, inv map[int]map[co // We can do this by looking at the quotableTokens map, and finding the key that matches the destination chain token. // Generates quotes for a given chain ID, address, and balance. func (m *Manager) generateQuotes(ctx context.Context, chainID int, address common.Address, balance *big.Int) ([]model.PutQuoteRequest, error) { - - destChainCfg, ok := m.config.Chains[chainID] - if !ok { - return nil, fmt.Errorf("error getting chain config for destination chain ID %d", chainID) + destRFQAddr, err := m.config.GetRFQAddress(chainID) + if err != nil { + return nil, fmt.Errorf("error getting destination RFQ address: %w", err) } destTokenID := fmt.Sprintf("%d-%s", chainID, address.Hex()) @@ -277,9 +276,9 @@ func (m *Manager) generateQuotes(ctx context.Context, chainID int, address commo if err != nil { return nil, fmt.Errorf("error getting total fee: %w", err) } - originChainCfg, ok := m.config.Chains[origin] - if !ok { - return nil, fmt.Errorf("error getting chain config for origin chain ID %d", origin) + originRFQAddr, err := m.config.GetRFQAddress(origin) + if err != nil { + return nil, fmt.Errorf("error getting RFQ address: %w", err) } // Build the quote @@ -295,8 +294,8 @@ func (m *Manager) generateQuotes(ctx context.Context, chainID int, address commo DestAmount: destAmount.String(), MaxOriginAmount: quoteAmount.String(), FixedFee: fee.String(), - OriginFastBridgeAddress: originChainCfg.Bridge, - DestFastBridgeAddress: destChainCfg.Bridge, + OriginFastBridgeAddress: originRFQAddr, + DestFastBridgeAddress: destRFQAddr, } quotes = append(quotes, quote) } diff --git a/services/rfq/relayer/relapi/server.go b/services/rfq/relayer/relapi/server.go index 7be4fed499..faad66141d 100644 --- a/services/rfq/relayer/relapi/server.go +++ b/services/rfq/relayer/relapi/server.go @@ -9,13 +9,15 @@ import ( "github.com/synapsecns/sanguine/core/ginhelper" "github.com/synapsecns/sanguine/ethergo/submitter" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/gin-gonic/gin" "github.com/synapsecns/sanguine/core/metrics" baseServer "github.com/synapsecns/sanguine/core/server" + "github.com/synapsecns/sanguine/ethergo/listener" omniClient "github.com/synapsecns/sanguine/services/omnirpc/client" + "github.com/synapsecns/sanguine/services/rfq/contracts/fastbridge" "github.com/synapsecns/sanguine/services/rfq/relayer/chain" - "github.com/synapsecns/sanguine/services/rfq/relayer/listener" "github.com/synapsecns/sanguine/services/rfq/relayer/relconfig" "github.com/synapsecns/sanguine/services/rfq/relayer/reldb" ) @@ -32,6 +34,8 @@ type RelayerAPIServer struct { // NewRelayerAPI holds the configuration, database connection, gin engine, RPC client, metrics handler, and fast bridge contracts. // It is used to initialize and run the API server. +// +//nolint:cyclop func NewRelayerAPI( ctx context.Context, cfg relconfig.Config, @@ -59,11 +63,23 @@ func NewRelayerAPI( if err != nil { return nil, fmt.Errorf("could not create omnirpc client: %w", err) } - chainListener, err := listener.NewChainListener(chainClient, store, common.HexToAddress(chainCfg.Bridge), handler) + rfqAddr, err := cfg.GetRFQAddress(chainID) + if err != nil { + return nil, fmt.Errorf("could not get rfq address: %w", err) + } + contract, err := fastbridge.NewFastBridgeRef(common.HexToAddress(rfqAddr), chainClient) + if err != nil { + return nil, fmt.Errorf("could not create fast bridge contract: %w", err) + } + startBlock, err := contract.DeployBlock(&bind.CallOpts{Context: ctx}) + if err != nil { + return nil, fmt.Errorf("could not get deploy block: %w", err) + } + chainListener, err := listener.NewChainListener(chainClient, store, common.HexToAddress(rfqAddr), uint64(startBlock.Int64()), handler) if err != nil { return nil, fmt.Errorf("could not get chain listener: %w", err) } - chains[uint32(chainID)], err = chain.NewChain(ctx, chainClient, common.HexToAddress(chainCfg.Bridge), chainListener, submitter) + chains[uint32(chainID)], err = chain.NewChain(ctx, chainClient, common.HexToAddress(chainCfg.RFQAddress), chainListener, submitter) if err != nil { return nil, fmt.Errorf("could not create chain: %w", err) } diff --git a/services/rfq/relayer/relapi/suite_test.go b/services/rfq/relayer/relapi/suite_test.go index 3522f96596..879d682db4 100644 --- a/services/rfq/relayer/relapi/suite_test.go +++ b/services/rfq/relayer/relapi/suite_test.go @@ -79,10 +79,10 @@ func (c *RelayerServerSuite) SetupTest() { testConfig := relconfig.Config{ Chains: map[int]relconfig.ChainConfig{ int(c.originChainID): { - Bridge: ethFastBridgeAddress.Hex(), + RFQAddress: ethFastBridgeAddress.Hex(), }, int(c.destChainID): { - Bridge: arbFastBridgeAddress.Hex(), + RFQAddress: arbFastBridgeAddress.Hex(), }, }, RelayerAPIPort: strconv.Itoa(port), diff --git a/services/rfq/relayer/relconfig/config.go b/services/rfq/relayer/relconfig/config.go index 860fdbad3a..12d3cb7ef6 100644 --- a/services/rfq/relayer/relconfig/config.go +++ b/services/rfq/relayer/relconfig/config.go @@ -6,6 +6,7 @@ import ( "os" "strconv" "strings" + "time" "github.com/ethereum/go-ethereum/common" "github.com/jftuga/ellipsis" @@ -43,14 +44,18 @@ type Config struct { FeePricer FeePricerConfig `yaml:"fee_pricer"` // ScreenerAPIUrl is the TRM API url. ScreenerAPIUrl string `yaml:"screener_api_url"` - // DBSelectorIntervalSeconds is the interval for the db selector. - DBSelectorIntervalSeconds int `yaml:"db_selector_interval_seconds"` + // DBSelectorInterval is the interval for the db selector. + DBSelectorInterval time.Duration `yaml:"db_selector_interval"` + // RebalanceInterval is the interval for rebalancing. + RebalanceInterval time.Duration `yaml:"rebalance_interval"` } // ChainConfig represents the configuration for a chain. type ChainConfig struct { - // Bridge is the bridge confirmation count. - Bridge string `yaml:"address"` + // Bridge is the rfq bridge contract address. + RFQAddress string `yaml:"rfq_address"` + // CCTPAddress is the cctp contract address. + CCTPAddress string `yaml:"cctp_address"` // Confirmations is the number of required confirmations Confirmations uint64 `yaml:"confirmations"` // Tokens is a map of token ID -> token config. @@ -77,6 +82,8 @@ type ChainConfig struct { QuoteOffsetBps float64 `yaml:"quote_offset_bps"` // FixedFeeMultiplier is the multiplier for the fixed fee. FixedFeeMultiplier float64 `yaml:"fixed_fee_multiplier"` + // CCTP start block is the block at which the chain listener will listen for CCTP events. + CCTPStartBlock uint64 `yaml:"cctp_start_block"` } // TokenConfig represents the configuration for a token. @@ -89,6 +96,14 @@ type TokenConfig struct { PriceUSD float64 `yaml:"price_usd"` // MinQuoteAmount is the minimum amount to quote for this token in human-readable units. MinQuoteAmount string `yaml:"min_quote_amount"` + // RebalanceMethod is the method to use for rebalancing. + RebalanceMethod string `yaml:"rebalance_method"` + // MaintenanceBalancePct is the percentage of the total balance under which a rebalance will be triggered. + MaintenanceBalancePct float64 `yaml:"maintenance_balance_pct"` + // InitialBalancePct is the percentage of the total balance to retain when triggering a rebalance. + InitialBalancePct float64 `yaml:"initial_balance_pct"` + // MaxRebalanceAmount is the maximum amount to rebalance in human-readable units. + MaxRebalanceAmount string `yaml:"max_rebalance_amount"` } // DatabaseConfig represents the configuration for the database. diff --git a/services/rfq/relayer/relconfig/config_test.go b/services/rfq/relayer/relconfig/config_test.go index 163413ddbb..b364ca2a7e 100644 --- a/services/rfq/relayer/relconfig/config_test.go +++ b/services/rfq/relayer/relconfig/config_test.go @@ -5,16 +5,21 @@ import ( "time" "github.com/alecthomas/assert" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" "github.com/synapsecns/sanguine/services/rfq/relayer/relconfig" ) +//nolint:maintidx func TestGetters(t *testing.T) { chainID := 1 badChainID := 2 + usdcAddr := "0x123" cfgWithBase := relconfig.Config{ Chains: map[int]relconfig.ChainConfig{ chainID: { - Bridge: "0x123", + RFQAddress: "0x123", + CCTPAddress: "0x456", Confirmations: 1, NativeToken: "MATIC", DeadlineBufferSeconds: 10, @@ -30,7 +35,8 @@ func TestGetters(t *testing.T) { }, }, BaseChainConfig: relconfig.ChainConfig{ - Bridge: "0x1234", + RFQAddress: "0x1234", + CCTPAddress: "0x456", Confirmations: 2, NativeToken: "ARB", DeadlineBufferSeconds: 11, @@ -48,7 +54,8 @@ func TestGetters(t *testing.T) { cfg := relconfig.Config{ Chains: map[int]relconfig.ChainConfig{ chainID: { - Bridge: "0x123", + RFQAddress: "0x123", + CCTPAddress: "0x456", Confirmations: 1, NativeToken: "MATIC", DeadlineBufferSeconds: 10, @@ -61,22 +68,43 @@ func TestGetters(t *testing.T) { QuotePct: 50, QuoteOffsetBps: 10, FixedFeeMultiplier: 1.1, + Tokens: map[string]relconfig.TokenConfig{ + "USDC": { + Address: usdcAddr, + Decimals: 6, + MaxRebalanceAmount: "1000", + }, + }, }, }, } - t.Run("GetBridge", func(t *testing.T) { - defaultVal, err := cfg.GetBridge(badChainID) + t.Run("GetRFQAddress", func(t *testing.T) { + defaultVal, err := cfg.GetRFQAddress(badChainID) assert.NoError(t, err) - assert.Equal(t, defaultVal, relconfig.DefaultChainConfig.Bridge) + assert.Equal(t, defaultVal, relconfig.DefaultChainConfig.RFQAddress) - baseVal, err := cfgWithBase.GetBridge(badChainID) + baseVal, err := cfgWithBase.GetRFQAddress(badChainID) assert.NoError(t, err) - assert.Equal(t, baseVal, cfgWithBase.BaseChainConfig.Bridge) + assert.Equal(t, baseVal, cfgWithBase.BaseChainConfig.RFQAddress) - chainVal, err := cfgWithBase.GetBridge(chainID) + chainVal, err := cfgWithBase.GetRFQAddress(chainID) assert.NoError(t, err) - assert.Equal(t, chainVal, cfgWithBase.Chains[chainID].Bridge) + assert.Equal(t, chainVal, cfgWithBase.Chains[chainID].RFQAddress) + }) + + t.Run("GetCCTPAddress", func(t *testing.T) { + defaultVal, err := cfg.GetCCTPAddress(badChainID) + assert.NoError(t, err) + assert.Equal(t, defaultVal, relconfig.DefaultChainConfig.CCTPAddress) + + baseVal, err := cfgWithBase.GetCCTPAddress(badChainID) + assert.NoError(t, err) + assert.Equal(t, baseVal, cfgWithBase.BaseChainConfig.CCTPAddress) + + chainVal, err := cfgWithBase.GetCCTPAddress(chainID) + assert.NoError(t, err) + assert.Equal(t, chainVal, cfgWithBase.Chains[chainID].CCTPAddress) }) t.Run("GetConfirmations", func(t *testing.T) { @@ -246,4 +274,12 @@ func TestGetters(t *testing.T) { assert.NoError(t, err) assert.Equal(t, chainVal, cfgWithBase.Chains[chainID].FixedFeeMultiplier) }) + + t.Run("GetMaxRebalanceAmount", func(t *testing.T) { + defaultVal := cfg.GetMaxRebalanceAmount(badChainID, common.HexToAddress(usdcAddr)) + assert.Equal(t, defaultVal.String(), abi.MaxInt256.String()) + + chainVal := cfg.GetMaxRebalanceAmount(chainID, common.HexToAddress(usdcAddr)) + assert.Equal(t, chainVal.String(), "1000000000") + }) } diff --git a/services/rfq/relayer/relconfig/enum.go b/services/rfq/relayer/relconfig/enum.go new file mode 100644 index 0000000000..2f2420a0fa --- /dev/null +++ b/services/rfq/relayer/relconfig/enum.go @@ -0,0 +1,15 @@ +package relconfig + +// RebalanceMethod is the method to rebalance. +// +//go:generate go run golang.org/x/tools/cmd/stringer -type=RebalanceMethod +type RebalanceMethod uint8 + +const ( + // RebalanceMethodNone is the default rebalance method. + RebalanceMethodNone RebalanceMethod = iota + // RebalanceMethodCCTP is the rebalance method for CCTP. + RebalanceMethodCCTP + // RebalanceMethodNative is the rebalance method for native bridge. + RebalanceMethodNative +) diff --git a/services/rfq/relayer/relconfig/getters.go b/services/rfq/relayer/relconfig/getters.go index 747a5483a0..2e0b5362de 100644 --- a/services/rfq/relayer/relconfig/getters.go +++ b/services/rfq/relayer/relconfig/getters.go @@ -6,6 +6,7 @@ import ( "reflect" "time" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/synapsecns/sanguine/ethergo/signer/config" ) @@ -87,16 +88,30 @@ func isNonZero(value interface{}) bool { return reflect.ValueOf(value).Interface() != reflect.Zero(reflect.TypeOf(value)).Interface() } -// GetBridge returns the Bridge for the given chainID. -func (c Config) GetBridge(chainID int) (value string, err error) { - rawValue, err := c.getChainConfigValue(chainID, "Bridge") +// GetRFQAddress returns the RFQ address for the given chainID. +func (c Config) GetRFQAddress(chainID int) (value string, err error) { + rawValue, err := c.getChainConfigValue(chainID, "RFQAddress") if err != nil { return value, err } value, ok := rawValue.(string) if !ok { - return value, fmt.Errorf("failed to cast Bridge to string") + return value, fmt.Errorf("failed to cast RFQAddress to string") + } + return value, nil +} + +// GetCCTPAddress returns the RFQ address for the given chainID. +func (c Config) GetCCTPAddress(chainID int) (value string, err error) { + rawValue, err := c.getChainConfigValue(chainID, "CCTPAddress") + if err != nil { + return value, err + } + + value, ok := rawValue.(string) + if !ok { + return value, fmt.Errorf("failed to cast CCTPAddress to string") } return value, nil } @@ -281,6 +296,20 @@ func (c Config) GetFixedFeeMultiplier(chainID int) (value float64, err error) { return value, nil } +// GetCCTPStartBlock returns the CCTPStartBlock for the given chainID. +func (c Config) GetCCTPStartBlock(chainID int) (value uint64, err error) { + rawValue, err := c.getChainConfigValue(chainID, "CCTPStartBlock") + if err != nil { + return value, err + } + + value, ok := rawValue.(uint64) + if !ok { + return value, fmt.Errorf("failed to cast CCTPStartBlock to int") + } + return value, nil +} + // GetL1FeeParams returns the L1 fee params for the given chain. func (c Config) GetL1FeeParams(chainID uint32, origin bool) (uint32, int, bool) { var gasEstimate int @@ -346,6 +375,83 @@ func (c Config) GetHTTPTimeout() time.Duration { return time.Duration(timeoutMs) * time.Millisecond } +func (c Config) getTokenConfigByAddr(chainID int, tokenAddr string) (cfg TokenConfig, name string, err error) { + chainConfig, ok := c.Chains[chainID] + if !ok { + return cfg, name, fmt.Errorf("no chain config for chain %d", chainID) + } + for tokenName, tokenConfig := range chainConfig.Tokens { + if common.HexToAddress(tokenConfig.Address).Hex() == common.HexToAddress(tokenAddr).Hex() { + return tokenConfig, tokenName, nil + } + } + return cfg, name, fmt.Errorf("no token config for chain %d and address %s", chainID, tokenAddr) +} + +// GetRebalanceMethod returns the rebalance method for the given chain and token address. +func (c Config) GetRebalanceMethod(chainID int, tokenAddr string) (method RebalanceMethod, err error) { + tokenConfig, tokenName, err := c.getTokenConfigByAddr(chainID, tokenAddr) + if err != nil { + return 0, err + } + for cid, chainCfg := range c.Chains { + tokenCfg, ok := chainCfg.Tokens[tokenName] + if ok { + if tokenConfig.RebalanceMethod != tokenCfg.RebalanceMethod { + return RebalanceMethodNone, fmt.Errorf("rebalance method mismatch for token %s on chains %d and %d", tokenName, chainID, cid) + } + } + } + switch tokenConfig.RebalanceMethod { + case "cctp": + return RebalanceMethodCCTP, nil + case "native": + return RebalanceMethodNative, nil + } + return RebalanceMethodNone, nil +} + +// GetRebalanceMethods returns all rebalance methods present in the config. +func (c Config) GetRebalanceMethods() (methods map[RebalanceMethod]bool, err error) { + methods = make(map[RebalanceMethod]bool) + for chainID, chainCfg := range c.Chains { + for _, tokenCfg := range chainCfg.Tokens { + method, err := c.GetRebalanceMethod(chainID, tokenCfg.Address) + if err != nil { + return nil, err + } + if method != RebalanceMethodNone { + methods[method] = true + } + } + } + return methods, nil +} + +// GetMaintenanceBalancePct returns the maintenance balance percentage for the given chain and token address. +func (c Config) GetMaintenanceBalancePct(chainID int, tokenAddr string) (float64, error) { + tokenConfig, _, err := c.getTokenConfigByAddr(chainID, tokenAddr) + if err != nil { + return 0, err + } + if tokenConfig.MaintenanceBalancePct <= 0 { + return 0, fmt.Errorf("maintenance balance pct must be positive: %f", tokenConfig.MaintenanceBalancePct) + } + return tokenConfig.MaintenanceBalancePct, nil +} + +// GetInitialBalancePct returns the initial balance percentage for the given chain and token address. +func (c Config) GetInitialBalancePct(chainID int, tokenAddr string) (float64, error) { + tokenConfig, _, err := c.getTokenConfigByAddr(chainID, tokenAddr) + if err != nil { + return 0, err + } + if tokenConfig.InitialBalancePct <= 0 { + return 0, fmt.Errorf("initial balance pct must be positive: %f", tokenConfig.InitialBalancePct) + } + return tokenConfig.InitialBalancePct, nil +} + // GetTokenID returns the tokenID for the given chain and address. func (c Config) GetTokenID(chain int, addr string) (string, error) { chainConfig, ok := c.Chains[chain] @@ -441,13 +547,56 @@ func (c Config) GetMinQuoteAmount(chainID int, addr common.Address) *big.Int { return quoteAmountScaled } +var defaultMaxRebalanceAmount = abi.MaxInt256 + +// GetMaxRebalanceAmount returns the max rebalance amount for the given chain and address. +// Note that this getter returns the value in native token decimals. +func (c Config) GetMaxRebalanceAmount(chainID int, addr common.Address) *big.Int { + chainCfg, ok := c.Chains[chainID] + if !ok { + return defaultMaxRebalanceAmount + } + + var tokenCfg *TokenConfig + for _, cfg := range chainCfg.Tokens { + if common.HexToAddress(cfg.Address).Hex() == addr.Hex() { + cfgCopy := cfg + tokenCfg = &cfgCopy + break + } + } + if tokenCfg == nil { + return defaultMaxRebalanceAmount + } + rebalanceAmountFlt, ok := new(big.Float).SetString(tokenCfg.MaxRebalanceAmount) + if !ok || rebalanceAmountFlt == nil { + return defaultMaxRebalanceAmount + } + + // Scale by the token decimals. + denomDecimalsFactor := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(tokenCfg.Decimals)), nil) + maxRebalanceAmountScaled, _ := new(big.Float).Mul(rebalanceAmountFlt, new(big.Float).SetInt(denomDecimalsFactor)).Int(nil) + return maxRebalanceAmountScaled +} + const defaultDBSelectorIntervalSeconds = 1 // GetDBSelectorInterval returns the interval for the DB selector. func (c Config) GetDBSelectorInterval() time.Duration { - interval := c.DBSelectorIntervalSeconds + interval := c.DBSelectorInterval if interval <= 0 { - return defaultDBSelectorIntervalSeconds + interval = time.Duration(defaultDBSelectorIntervalSeconds) * time.Second + } + return interval +} + +const defaultRebalanceIntervalSeconds = 30 + +// GetRebalanceInterval returns the interval for rebalancing. +func (c Config) GetRebalanceInterval() time.Duration { + interval := c.RebalanceInterval + if interval == 0 { + interval = time.Duration(defaultRebalanceIntervalSeconds) * time.Second } - return time.Duration(interval) * time.Second + return interval } diff --git a/services/rfq/relayer/relconfig/rebalancemethod_string.go b/services/rfq/relayer/relconfig/rebalancemethod_string.go new file mode 100644 index 0000000000..377c060921 --- /dev/null +++ b/services/rfq/relayer/relconfig/rebalancemethod_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=RebalanceMethod"; DO NOT EDIT. + +package relconfig + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[RebalanceMethodNone-0] + _ = x[RebalanceMethodCCTP-1] + _ = x[RebalanceMethodNative-2] +} + +const _RebalanceMethod_name = "RebalanceMethodNoneRebalanceMethodCCTPRebalanceMethodNative" + +var _RebalanceMethod_index = [...]uint8{0, 19, 38, 59} + +func (i RebalanceMethod) String() string { + if i >= RebalanceMethod(len(_RebalanceMethod_index)-1) { + return "RebalanceMethod(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _RebalanceMethod_name[_RebalanceMethod_index[i]:_RebalanceMethod_index[i+1]] +} diff --git a/services/rfq/relayer/reldb/base/block.go b/services/rfq/relayer/reldb/base/block.go deleted file mode 100644 index 75322aa224..0000000000 --- a/services/rfq/relayer/reldb/base/block.go +++ /dev/null @@ -1,40 +0,0 @@ -package base - -import ( - "context" - "errors" - "fmt" - "github.com/synapsecns/sanguine/services/rfq/relayer/reldb" - "gorm.io/gorm" - "gorm.io/gorm/clause" -) - -// PutLatestBlock upserts the latest block into the database. -func (s Store) PutLatestBlock(ctx context.Context, chainID, height uint64) error { - tx := s.DB().WithContext(ctx).Clauses(clause.OnConflict{ - Columns: []clause.Column{{Name: chainIDFieldName}}, - DoUpdates: clause.AssignmentColumns([]string{chainIDFieldName, blockNumberFieldName}), - }).Create(&LastIndexed{ - ChainID: chainID, - BlockNumber: int(height), - }) - - if tx.Error != nil { - return fmt.Errorf("could not block updated: %w", tx.Error) - } - return nil -} - -// LatestBlockForChain gets the latest block for a chain. -func (s Store) LatestBlockForChain(ctx context.Context, chainID uint64) (uint64, error) { - blockWatchModel := LastIndexed{ChainID: chainID} - err := s.db.WithContext(ctx).First(&blockWatchModel).Error - if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { - return 0, reldb.ErrNoLatestBlockForChainID - } - return 0, fmt.Errorf("could not fetch latest block: %w", err) - } - - return uint64(blockWatchModel.BlockNumber), nil -} diff --git a/services/rfq/relayer/reldb/base/model.go b/services/rfq/relayer/reldb/base/model.go index c8f36bbfe9..f4226369f9 100644 --- a/services/rfq/relayer/reldb/base/model.go +++ b/services/rfq/relayer/reldb/base/model.go @@ -14,25 +14,18 @@ import ( "github.com/synapsecns/sanguine/core/dbcommon" "github.com/synapsecns/sanguine/services/rfq/contracts/fastbridge" "github.com/synapsecns/sanguine/services/rfq/relayer/reldb" - "gorm.io/gorm" ) func init() { namer := dbcommon.NewNamer(GetAllModels()) - chainIDFieldName = namer.GetConsistentName("ChainID") - blockNumberFieldName = namer.GetConsistentName("BlockNumber") statusFieldName = namer.GetConsistentName("Status") transactionIDFieldName = namer.GetConsistentName("TransactionID") originTxHashFieldName = namer.GetConsistentName("OriginTxHash") destTxHashFieldName = namer.GetConsistentName("DestTxHash") + rebalanceIDFieldName = namer.GetConsistentName("RebalanceID") } var ( - // chainIDFieldName gets the chain id field name. - chainIDFieldName string - // blockNumberFieldName is the name of the block number field. - blockNumberFieldName string - statusFieldName string // transactionIDFieldName is the transactions id field name. transactionIDFieldName string @@ -40,26 +33,10 @@ var ( originTxHashFieldName string // destTxHashFieldName is the dest tx hash field name. destTxHashFieldName string + // rebalanceIDFieldName is the rebalances id field name. + rebalanceIDFieldName string ) -// LastIndexed is used to make sure we haven't missed any events while offline. -// since we event source - rather than use a state machine this is needed to make sure we haven't missed any events -// by allowing us to go back and source any events we may have missed. -// -// this does not inherit from gorm.model to allow us to use ChainID as a primary key. -type LastIndexed struct { - // CreatedAt is the creation time - CreatedAt time.Time - // UpdatedAt is the update time - UpdatedAt time.Time - // DeletedAt time - DeletedAt gorm.DeletedAt `gorm:"index"` - // ChainID is the chain id of the chain we're watching blocks on. This is our primary index. - ChainID uint64 `gorm:"column:chain_id;primaryKey;autoIncrement:false"` - // BlockHeight is the highest height we've seen on the chain - BlockNumber int `gorm:"block_number"` -} - // RequestForQuote is the primary event model. type RequestForQuote struct { // CreatedAt is the creation time @@ -112,6 +89,19 @@ type RequestForQuote struct { SendChainGas bool } +// Rebalance is the event model for a rebalance action. +type Rebalance struct { + CreatedAt time.Time + UpdatedAt time.Time + RebalanceID sql.NullString + Origin uint64 + Destination uint64 + OriginAmount string + Status reldb.RebalanceStatus + OriginTxHash sql.NullString + DestTxHash sql.NullString +} + // FromQuoteRequest converts a quote request to an object that can be stored in the db. // TODO: add validation for deadline > uint64 // TODO: roundtripper test. @@ -141,6 +131,25 @@ func FromQuoteRequest(request reldb.QuoteRequest) RequestForQuote { } } +// FromRebalance converts a rebalance to a db object. +func FromRebalance(rebalance reldb.Rebalance) Rebalance { + var id sql.NullString + if rebalance.RebalanceID == nil { + id = sql.NullString{Valid: false} + } else { + id = sql.NullString{String: hexutil.Encode(rebalance.RebalanceID[:]), Valid: true} + } + return Rebalance{ + RebalanceID: id, + Origin: rebalance.Origin, + Destination: rebalance.Destination, + OriginAmount: rebalance.OriginAmount.String(), + Status: rebalance.Status, + OriginTxHash: stringToNullString(rebalance.OriginTxHash.String()), + DestTxHash: stringToNullString(rebalance.DestTxHash.String()), + } +} + func stringToNullString(s string) sql.NullString { if s == "" { return sql.NullString{Valid: false} diff --git a/services/rfq/relayer/reldb/base/rebalance.go b/services/rfq/relayer/reldb/base/rebalance.go new file mode 100644 index 0000000000..e899b37679 --- /dev/null +++ b/services/rfq/relayer/reldb/base/rebalance.go @@ -0,0 +1,86 @@ +package base + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/synapsecns/sanguine/services/rfq/relayer/reldb" + "gorm.io/gorm" +) + +// StoreRebalance stores a rebalance action. +func (s Store) StoreRebalance(ctx context.Context, rebalance reldb.Rebalance) error { + reb := FromRebalance(rebalance) + dbTx := s.DB().WithContext(ctx).Create(&reb) + if dbTx.Error != nil { + return fmt.Errorf("could not store rebalance: %w", dbTx.Error) + } + return nil +} + +// UpdateRebalanceStatus updates the rebalance status. +func (s Store) UpdateRebalanceStatus(ctx context.Context, id [32]byte, origin *uint64, status reldb.RebalanceStatus) error { + tx := s.DB().WithContext(ctx).Begin() + if tx.Error != nil { + return fmt.Errorf("could not start transaction: %w", tx.Error) + } + + // prepare the update transaction + var result *gorm.DB + if origin != nil { + result = tx.Model(&Rebalance{}). + Where(fmt.Sprintf("%s = ?", "origin"), *origin). + Where(fmt.Sprintf("%s = ?", statusFieldName), reldb.RebalanceInitiated.Int()). + Updates(map[string]interface{}{ + rebalanceIDFieldName: hexutil.Encode(id[:]), + statusFieldName: status, + }) + } else { + result = tx.Model(&Rebalance{}). + Where(fmt.Sprintf("%s = ?", rebalanceIDFieldName), hexutil.Encode(id[:])). + Update(statusFieldName, status) + } + + // commit the transaction if only one row is affected + if result.Error != nil { + tx.Rollback() + return fmt.Errorf("could not update rebalance status: %w", result.Error) + } + if result.RowsAffected != 1 { + tx.Rollback() + return fmt.Errorf("expected 1 row to be affected, got %d", result.RowsAffected) + } + err := tx.Commit().Error + if err != nil { + return fmt.Errorf("could not commit transaction: %w", err) + } + return nil +} + +// HasPendingRebalance checks if there is a pending rebalance for the given chain ids. +func (s Store) HasPendingRebalance(ctx context.Context, chainIDs ...uint64) (bool, error) { + var rebalances []Rebalance + + matchStatuses := []reldb.RebalanceStatus{reldb.RebalanceInitiated, reldb.RebalancePending} + inArgs := make([]int, len(matchStatuses)) + for i := range matchStatuses { + inArgs[i] = int(matchStatuses[i].Int()) + } + + // TODO: can be made more efficient by doing below check inside sql query + tx := s.DB().WithContext(ctx).Model(&Rebalance{}).Where(fmt.Sprintf("%s IN ?", statusFieldName), inArgs).Find(&rebalances) + if tx.Error != nil { + return false, fmt.Errorf("could not get db results: %w", tx.Error) + } + + // Check if any pending rebalances involve the given chain ids + for _, result := range rebalances { + for _, chainID := range chainIDs { + if result.Origin == chainID || result.Destination == chainID { + return true, nil + } + } + } + return false, nil +} diff --git a/services/rfq/relayer/reldb/base/store.go b/services/rfq/relayer/reldb/base/store.go index e153da1dee..1a026933f6 100644 --- a/services/rfq/relayer/reldb/base/store.go +++ b/services/rfq/relayer/reldb/base/store.go @@ -2,6 +2,7 @@ package base import ( "github.com/synapsecns/sanguine/core/metrics" + listenerDB "github.com/synapsecns/sanguine/ethergo/listener/db" submitterDB "github.com/synapsecns/sanguine/ethergo/submitter/db" "github.com/synapsecns/sanguine/ethergo/submitter/db/txdb" "github.com/synapsecns/sanguine/services/rfq/relayer/reldb" @@ -10,6 +11,7 @@ import ( // Store implements the service. type Store struct { + listenerDB.ChainListenerDB db *gorm.DB submitterStore submitterDB.Service } @@ -17,7 +19,8 @@ type Store struct { // NewStore creates a new store. func NewStore(db *gorm.DB, metrics metrics.Handler) *Store { txDB := txdb.NewTXStore(db, metrics) - return &Store{db: db, submitterStore: txDB} + + return &Store{ChainListenerDB: listenerDB.NewChainListenerStore(db, metrics), db: db, submitterStore: txDB} } // DB gets the database object for mutation outside of the lib. @@ -33,7 +36,8 @@ func (s Store) SubmitterDB() submitterDB.Service { // GetAllModels gets all models to migrate // see: https://medium.com/@SaifAbid/slice-interfaces-8c78f8b6345d for an explanation of why we can't do this at initialization time func GetAllModels() (allModels []interface{}) { - allModels = append(txdb.GetAllModels(), &LastIndexed{}, &RequestForQuote{}) + allModels = append(txdb.GetAllModels(), &RequestForQuote{}, &Rebalance{}) + allModels = append(allModels, listenerDB.GetAllModels()...) return allModels } diff --git a/services/rfq/relayer/reldb/db.go b/services/rfq/relayer/reldb/db.go index c3901f190e..f3af3ed0b7 100644 --- a/services/rfq/relayer/reldb/db.go +++ b/services/rfq/relayer/reldb/db.go @@ -5,6 +5,8 @@ import ( "database/sql/driver" "errors" "fmt" + "github.com/synapsecns/sanguine/ethergo/listener/db" + "math/big" "github.com/ethereum/go-ethereum/common" "github.com/synapsecns/sanguine/core/dbcommon" @@ -14,27 +16,30 @@ import ( // Writer is the interface for writing to the database. type Writer interface { - // PutLatestBlock upsers the latest block on a given chain id to be new height. - PutLatestBlock(ctx context.Context, chainID, height uint64) error - // StoreQuoteRequest stores a quote reuquest. If one already exists, only the status will be updated + // StoreQuoteRequest stores a quote request. If one already exists, only the status will be updated // TODO: find a better way to describe this in the name StoreQuoteRequest(ctx context.Context, request QuoteRequest) error + // StoreRebalance stores a rebalance. + StoreRebalance(ctx context.Context, rebalance Rebalance) error // UpdateQuoteRequestStatus updates the status of a quote request UpdateQuoteRequestStatus(ctx context.Context, id [32]byte, status QuoteRequestStatus) error + // UpdateRebalanceStatus updates the status of a rebalance action. + // If the origin is supplied, it will be used to update the ID for the corresponding rebalance model. + UpdateRebalanceStatus(ctx context.Context, id [32]byte, origin *uint64, status RebalanceStatus) error // UpdateDestTxHash updates the dest tx hash of a quote request UpdateDestTxHash(ctx context.Context, id [32]byte, destTxHash common.Hash) error } // Reader is the interface for reading from the database. type Reader interface { - // LatestBlockForChain gets the latest block for a given chain id. - LatestBlockForChain(ctx context.Context, chainID uint64) (uint64, error) // GetQuoteRequestByID gets a quote request by id. Should return ErrNoQuoteForID if not found GetQuoteRequestByID(ctx context.Context, id [32]byte) (*QuoteRequest, error) // GetQuoteRequestByOriginTxHash gets a quote request by origin tx hash. Should return ErrNoQuoteForTxHash if not found GetQuoteRequestByOriginTxHash(ctx context.Context, txHash common.Hash) (*QuoteRequest, error) // GetQuoteResultsByStatus gets quote results by status GetQuoteResultsByStatus(ctx context.Context, matchStatuses ...QuoteRequestStatus) (res []QuoteRequest, _ error) + // HasPendingRebalance checks if there is a pending rebalance for the given chain ids. + HasPendingRebalance(ctx context.Context, chainIDs ...uint64) (bool, error) } // Service is the interface for the database service. @@ -43,11 +48,10 @@ type Service interface { // SubmitterDB returns the submitter database service. SubmitterDB() submitterDB.Service Writer + db.ChainListenerDB } var ( - // ErrNoLatestBlockForChainID is returned when no block exists for the chain. - ErrNoLatestBlockForChainID = errors.New("no latest block for chainId") // ErrNoQuoteForID means the quote was not found. ErrNoQuoteForID = errors.New("no quote found for tx id") // ErrNoQuoteForTxHash means the quote was not found. @@ -149,3 +153,57 @@ func (q QuoteRequestStatus) Value() (driver.Value, error) { } var _ dbcommon.Enum = (*QuoteRequestStatus)(nil) + +// Rebalance represents a rebalance action. +type Rebalance struct { + RebalanceID *[32]byte + Origin uint64 + Destination uint64 + OriginAmount *big.Int + Status RebalanceStatus + OriginTxHash common.Hash + DestTxHash common.Hash +} + +// RebalanceStatus is the status of a rebalance action in the db. +// +//go:generate go run golang.org/x/tools/cmd/stringer -type=RebalanceStatus +type RebalanceStatus uint8 + +const ( + // RebalanceInitiated means the rebalance transaction has been initiated. + RebalanceInitiated RebalanceStatus = iota + 1 + // RebalancePending means the rebalance transaction has been confirmed on the origin. + RebalancePending + // RebalanceCompleted means the rebalance transaction has been confirmed on the destination. + RebalanceCompleted +) + +// Int returns the int value of the quote request status. +func (r RebalanceStatus) Int() uint8 { + return uint8(r) +} + +// GormDataType implements the gorm common interface for enums. +func (r RebalanceStatus) GormDataType() string { + return dbcommon.EnumDataType +} + +// Scan implements the gorm common interface for enums. +func (r *RebalanceStatus) Scan(src any) error { + res, err := dbcommon.EnumScan(src) + if err != nil { + return fmt.Errorf("could not scan %w", err) + } + newStatus := RebalanceStatus(res) + *r = newStatus + return nil +} + +// Value implements the gorm common interface for enums. +func (r RebalanceStatus) Value() (driver.Value, error) { + // nolint: wrapcheck + return dbcommon.EnumValue(r) +} + +var _ dbcommon.Enum = (*RebalanceStatus)(nil) diff --git a/services/rfq/relayer/reldb/db_test.go b/services/rfq/relayer/reldb/db_test.go index ea6ce9a1ad..d36fc760c1 100644 --- a/services/rfq/relayer/reldb/db_test.go +++ b/services/rfq/relayer/reldb/db_test.go @@ -2,6 +2,7 @@ package reldb_test import ( "errors" + "github.com/synapsecns/sanguine/ethergo/listener" "github.com/synapsecns/sanguine/services/rfq/relayer/reldb" ) @@ -9,7 +10,7 @@ func (d *DBSuite) TestBlock() { d.RunOnAllDBs(func(testDB reldb.Service) { const testChainID = 5 _, err := testDB.LatestBlockForChain(d.GetTestContext(), testChainID) - d.True(errors.Is(err, reldb.ErrNoLatestBlockForChainID)) + d.True(errors.Is(err, listener.ErrNoLatestBlockForChainID)) testHeight := 10 diff --git a/services/rfq/relayer/reldb/rebalancestatus_string.go b/services/rfq/relayer/reldb/rebalancestatus_string.go new file mode 100644 index 0000000000..7808a7cf49 --- /dev/null +++ b/services/rfq/relayer/reldb/rebalancestatus_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=RebalanceStatus"; DO NOT EDIT. + +package reldb + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[RebalanceInitiated-1] + _ = x[RebalancePending-2] + _ = x[RebalanceCompleted-3] +} + +const _RebalanceStatus_name = "RebalanceInitiatedRebalancePendingRebalanceCompleted" + +var _RebalanceStatus_index = [...]uint8{0, 18, 34, 52} + +func (i RebalanceStatus) String() string { + i -= 1 + if i >= RebalanceStatus(len(_RebalanceStatus_index)-1) { + return "RebalanceStatus(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _RebalanceStatus_name[_RebalanceStatus_index[i]:_RebalanceStatus_index[i+1]] +} diff --git a/services/rfq/relayer/service/chainindexer.go b/services/rfq/relayer/service/chainindexer.go index 7a7c07a94b..41121c28f0 100644 --- a/services/rfq/relayer/service/chainindexer.go +++ b/services/rfq/relayer/service/chainindexer.go @@ -101,7 +101,7 @@ func (r *Relayer) runChainIndexer(ctx context.Context, chainID int) (err error) return nil } - err = r.handleDepositClaimed(ctx, event) + err = r.handleDepositClaimed(ctx, event, chainID) if err != nil { return fmt.Errorf("could not handle deposit claimed: %w", err) } @@ -199,8 +199,12 @@ type decimalsRes struct { originDecimals, destDecimals uint8 } -func (r *Relayer) handleDepositClaimed(ctx context.Context, event *fastbridge.FastBridgeBridgeDepositClaimed) error { - err := r.db.UpdateQuoteRequestStatus(ctx, event.TransactionId, reldb.ClaimCompleted) +func (r *Relayer) handleDepositClaimed(ctx context.Context, event *fastbridge.FastBridgeBridgeDepositClaimed, chainID int) error { + err := r.inventory.Rebalance(ctx, chainID, event.Token) + if err != nil { + return fmt.Errorf("could not rebalance: %w", err) + } + err = r.db.UpdateQuoteRequestStatus(ctx, event.TransactionId, reldb.ClaimCompleted) if err != nil { return fmt.Errorf("could not update request status: %w", err) } diff --git a/services/rfq/relayer/service/handlers.go b/services/rfq/relayer/service/handlers.go index 2104a05bdf..28e234b355 100644 --- a/services/rfq/relayer/service/handlers.go +++ b/services/rfq/relayer/service/handlers.go @@ -285,12 +285,12 @@ func (q *QuoteRequestHandler) handleProofPosted(ctx context.Context, _ trace.Spa return nil } - canClaim, err := q.Origin.Bridge.CanClaim(&bind.CallOpts{Context: ctx}, request.TransactionID, q.RelayerAdress) + canClaim, err := q.Origin.Bridge.CanClaim(&bind.CallOpts{Context: ctx}, request.TransactionID, q.RelayerAddress) if err != nil { return fmt.Errorf("could not check if can claim: %w", err) } - // can't cliam yet. we'll check again later + // can't claim yet. we'll check again later if !canClaim { return nil } @@ -299,7 +299,6 @@ func (q *QuoteRequestHandler) handleProofPosted(ctx context.Context, _ trace.Spa if err != nil { return nil, fmt.Errorf("could not relay: %w", err) } - return tx, nil }) if err != nil { @@ -313,7 +312,7 @@ func (q *QuoteRequestHandler) handleProofPosted(ctx context.Context, _ trace.Spa return nil } -// Error Handlers Only from this point belo +// Error Handlers Only from this point below. // // handleNotEnoughInventory handles the not enough inventory status. func (q *QuoteRequestHandler) handleNotEnoughInventory(ctx context.Context, _ trace.Span, request reldb.QuoteRequest) (err error) { diff --git a/services/rfq/relayer/service/relayer.go b/services/rfq/relayer/service/relayer.go index ba94ca7a86..d1af5aa87a 100644 --- a/services/rfq/relayer/service/relayer.go +++ b/services/rfq/relayer/service/relayer.go @@ -6,17 +6,19 @@ import ( "math/big" "time" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ipfs/go-log" "github.com/jellydator/ttlcache/v3" "github.com/synapsecns/sanguine/core/dbcommon" "github.com/synapsecns/sanguine/core/metrics" + "github.com/synapsecns/sanguine/ethergo/listener" signerConfig "github.com/synapsecns/sanguine/ethergo/signer/config" "github.com/synapsecns/sanguine/ethergo/signer/signer" "github.com/synapsecns/sanguine/ethergo/submitter" omnirpcClient "github.com/synapsecns/sanguine/services/omnirpc/client" + "github.com/synapsecns/sanguine/services/rfq/contracts/fastbridge" "github.com/synapsecns/sanguine/services/rfq/relayer/inventory" - "github.com/synapsecns/sanguine/services/rfq/relayer/listener" "github.com/synapsecns/sanguine/services/rfq/relayer/pricer" "github.com/synapsecns/sanguine/services/rfq/relayer/quoter" "github.com/synapsecns/sanguine/services/rfq/relayer/relapi" @@ -62,15 +64,25 @@ func NewRelayer(ctx context.Context, metricHandler metrics.Handler, cfg relconfi chainListeners := make(map[int]listener.ContractListener) // setup chain listeners - for chainID, chainCFG := range cfg.GetChains() { - // TODO: consider getter for this convert step - bridge := common.HexToAddress(chainCFG.Bridge) + for chainID := range cfg.GetChains() { + rfqAddr, err := cfg.GetRFQAddress(chainID) + if err != nil { + return nil, fmt.Errorf("could not get rfq address: %w", err) + } chainClient, err := omniClient.GetChainClient(ctx, chainID) if err != nil { return nil, fmt.Errorf("could not get chain client: %w", err) } - chainListener, err := listener.NewChainListener(chainClient, store, bridge, metricHandler) + contract, err := fastbridge.NewFastBridgeRef(common.HexToAddress(rfqAddr), chainClient) + if err != nil { + return nil, fmt.Errorf("could not create fast bridge contract: %w", err) + } + startBlock, err := contract.DeployBlock(&bind.CallOpts{Context: ctx}) + if err != nil { + return nil, fmt.Errorf("could not get deploy block: %w", err) + } + chainListener, err := listener.NewChainListener(chainClient, store, common.HexToAddress(rfqAddr), uint64(startBlock.Int64()), metricHandler) if err != nil { return nil, fmt.Errorf("could not get chain listener: %w", err) } @@ -82,7 +94,9 @@ func NewRelayer(ctx context.Context, metricHandler metrics.Handler, cfg relconfi return nil, fmt.Errorf("could not get signer: %w", err) } - im, err := inventory.NewInventoryManager(ctx, omniClient, metricHandler, cfg, sg.Address(), store) + sm := submitter.NewTransactionSubmitter(metricHandler, sg, omniClient, store.SubmitterDB(), &cfg.SubmitterConfig) + + im, err := inventory.NewInventoryManager(ctx, omniClient, metricHandler, cfg, sg.Address(), sm, store) if err != nil { return nil, fmt.Errorf("could not add imanager: %w", err) } @@ -95,8 +109,6 @@ func NewRelayer(ctx context.Context, metricHandler metrics.Handler, cfg relconfi return nil, fmt.Errorf("could not get quoter") } - sm := submitter.NewTransactionSubmitter(metricHandler, sg, omniClient, store.SubmitterDB(), &cfg.SubmitterConfig) - apiServer, err := relapi.NewRelayerAPI(ctx, cfg, metricHandler, omniClient, store, sm) if err != nil { return nil, fmt.Errorf("could not get api server: %w", err) @@ -131,7 +143,7 @@ const defaultPostInterval = 1 // 4. Start the submitter: This will submit any transactions that need to be submitted. // nolint: cyclop func (r *Relayer) Start(ctx context.Context) error { - err := r.inventory.ApproveAllTokens(ctx, r.submitter) + err := r.inventory.ApproveAllTokens(ctx) if err != nil { return fmt.Errorf("could not approve all tokens: %w", err) } @@ -195,6 +207,14 @@ func (r *Relayer) Start(ctx context.Context) error { return nil }) + g.Go(func() error { + err := r.inventory.Start(ctx) + if err != nil { + return fmt.Errorf("could not start inventory manager: %w", err) + } + return nil + }) + err = g.Wait() if err != nil { return fmt.Errorf("could not start: %w", err) diff --git a/services/rfq/relayer/service/statushandler.go b/services/rfq/relayer/service/statushandler.go index e9cb04a34c..de6e12a509 100644 --- a/services/rfq/relayer/service/statushandler.go +++ b/services/rfq/relayer/service/statushandler.go @@ -38,8 +38,8 @@ type QuoteRequestHandler struct { handlers map[reldb.QuoteRequestStatus]Handler // claimCache is the cache of claims used for figuring out when we should retry the claim method. claimCache *ttlcache.Cache[common.Hash, bool] - // RelayerAdress is the relayer RelayerAdress - RelayerAdress common.Address + // RelayerAddress is the relayer RelayerAddress + RelayerAddress common.Address // metrics is the metrics handler. metrics metrics.Handler } @@ -59,24 +59,23 @@ func (r *Relayer) requestToHandler(ctx context.Context, req reldb.QuoteRequest) } qr := &QuoteRequestHandler{ - Origin: *origin, - Dest: *dest, - db: r.db, - Inventory: r.inventory, - Quoter: r.quoter, - handlers: make(map[reldb.QuoteRequestStatus]Handler), - metrics: r.metrics, - RelayerAdress: r.signer.Address(), - claimCache: r.claimCache, + Origin: *origin, + Dest: *dest, + db: r.db, + Inventory: r.inventory, + Quoter: r.quoter, + handlers: make(map[reldb.QuoteRequestStatus]Handler), + metrics: r.metrics, + RelayerAddress: r.signer.Address(), + claimCache: r.claimCache, } - qr.handlers[reldb.Seen] = r.deadlineMiddleware(qr.handleSeen) - qr.handlers[reldb.CommittedPending] = r.deadlineMiddleware(qr.handleCommitPending) - qr.handlers[reldb.CommittedConfirmed] = r.deadlineMiddleware(qr.handleCommitConfirmed) + qr.handlers[reldb.Seen] = r.deadlineMiddleware(r.gasMiddleware(qr.handleSeen)) + qr.handlers[reldb.CommittedPending] = r.deadlineMiddleware(r.gasMiddleware(qr.handleCommitPending)) + qr.handlers[reldb.CommittedConfirmed] = r.deadlineMiddleware(r.gasMiddleware(qr.handleCommitConfirmed)) // no more need for deadline middleware now, we already relayed. - qr.handlers[reldb.RelayCompleted] = r.gasMiddleware(qr.handleRelayCompleted) + qr.handlers[reldb.RelayCompleted] = qr.handleRelayCompleted qr.handlers[reldb.ProvePosted] = qr.handleProofPosted - // TODO: we probably want a claim complete state once we've seen that event on chain // error handlers only qr.handlers[reldb.NotEnoughInventory] = r.deadlineMiddleware(qr.handleNotEnoughInventory) @@ -130,7 +129,15 @@ func (r *Relayer) chainIDToChain(ctx context.Context, chainID uint32) (*chain.Ch } //nolint: wrapcheck - return chain.NewChain(ctx, chainClient, common.HexToAddress(r.cfg.GetChains()[id].Bridge), r.chainListeners[id], r.submitter) + rfqAddr, err := r.cfg.GetRFQAddress(id) + if err != nil { + return nil, fmt.Errorf("could not get rfq address: %w", err) + } + chain, err := chain.NewChain(ctx, chainClient, common.HexToAddress(rfqAddr), r.chainListeners[id], r.submitter) + if err != nil { + return nil, fmt.Errorf("could not create chain: %w", err) + } + return chain, nil } // shouldCheckClaim checks if we should check the claim method. diff --git a/services/rfq/relayer/service/suite_test.go b/services/rfq/relayer/service/suite_test.go index 48d7514e82..6cdebefa06 100644 --- a/services/rfq/relayer/service/suite_test.go +++ b/services/rfq/relayer/service/suite_test.go @@ -69,10 +69,10 @@ func (r *RelayerTestSuite) SetupTest() { }, Chains: map[int]relconfig.ChainConfig{ int(r.originBackend.GetChainID()): { - Bridge: originContract.Address().String(), + RFQAddress: originContract.Address().String(), }, int(r.destBackend.GetChainID()): { - Bridge: destContract.Address().String(), + RFQAddress: destContract.Address().String(), }, }, OmniRPCURL: serverURL, diff --git a/services/rfq/testutil/deployers.go b/services/rfq/testutil/deployers.go index 92aeb37484..79ae1538e8 100644 --- a/services/rfq/testutil/deployers.go +++ b/services/rfq/testutil/deployers.go @@ -3,6 +3,9 @@ package testutil import ( "context" "fmt" + "math/big" + "testing" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -13,8 +16,6 @@ import ( "github.com/synapsecns/sanguine/ethergo/manager" "github.com/synapsecns/sanguine/services/rfq/contracts/fastbridge" "github.com/synapsecns/sanguine/services/rfq/contracts/testcontracts/fastbridgemock" - "math/big" - "testing" ) // DeployManager wraps DeployManager and allows typed contract handles to be returned. @@ -26,7 +27,6 @@ type DeployManager struct { func NewDeployManager(t *testing.T) *DeployManager { t.Helper() - // TODO: add contracts here parentManager := manager.NewDeployerManager(t, NewFastBridgeDeployer, NewMockERC20Deployer, NewMockFastBridgeDeployer, NewWETH9Deployer, NewUSDTDeployer, NewUSDCDeployer, NewDAIDeployer) return &DeployManager{parentManager} } diff --git a/services/rfq/testutil/typecast.go b/services/rfq/testutil/typecast.go index a18a6563e9..fd096d497b 100644 --- a/services/rfq/testutil/typecast.go +++ b/services/rfq/testutil/typecast.go @@ -2,6 +2,7 @@ package testutil import ( "context" + "github.com/synapsecns/sanguine/ethergo/backends" "github.com/synapsecns/sanguine/ethergo/contracts" "github.com/synapsecns/sanguine/ethergo/manager" diff --git a/services/stiprelayer/cmd/commands.go b/services/stiprelayer/cmd/commands.go index 993882d865..96493c08ec 100644 --- a/services/stiprelayer/cmd/commands.go +++ b/services/stiprelayer/cmd/commands.go @@ -1,4 +1,4 @@ -// Package cmd provides the command line interface for the RFQ API service. +// Package cmd provides the command line interface for the stip relayer service. package cmd import ( diff --git a/services/stiprelayer/main.go b/services/stiprelayer/main.go index 54226c5f44..7613c9cf82 100644 --- a/services/stiprelayer/main.go +++ b/services/stiprelayer/main.go @@ -8,7 +8,7 @@ import ( "github.com/synapsecns/sanguine/services/stiprelayer/metadata" ) -// main is the entry point for the RFQ API Server. +// main is the entry point for the stip relayer. func main() { cmd.Start(os.Args, metadata.BuildInfo()) }