-
Notifications
You must be signed in to change notification settings - Fork 1.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Change keepers to use the default contract transmitter #11308
Merged
Merged
Changes from all commits
Commits
Show all changes
14 commits
Select commit
Hold shift + click to select a range
2fa9e69
Switch keepers to use the default contract transmitter
ferglor d6ced2f
Fix streams import
ferglor 28b6cbc
Clean up function calls
ferglor ba160f6
Remove pipeline runner dependency for automation
ferglor df408bf
Use contractTransmitterOpts to specify a pluginGasLimit
ferglor ba9ac83
Make the pluginGasLimit a pointer
ferglor e0405d0
Clean up the pipeline transmitter
ferglor 027f680
Attempt to listen for transmits
ferglor b2f5d6a
Revert "Attempt to listen for transmits"
ferglor 93e9973
Listen for performed events
ferglor c31b3e4
Goimports
ferglor 0311896
Add wrapper function that lets us specify a count of performed
ferglor c12cab2
Update integration test
ferglor 785faa6
Pass the configWatcher as a parameter to indicate that its required
ferglor File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -54,7 +54,6 @@ import ( | |
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey" | ||
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper" | ||
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams" | ||
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline" | ||
"github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" | ||
) | ||
|
||
|
@@ -117,7 +116,7 @@ func TestIntegration_KeeperPluginConditionalUpkeep(t *testing.T) { | |
require.NoError(t, err) | ||
registry := deployKeeper21Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) | ||
|
||
nodes, _ := setupNodes(t, nodeKeys, registry, backend, steve) | ||
setupNodes(t, nodeKeys, registry, backend, steve) | ||
|
||
<-time.After(time.Second * 5) | ||
|
||
|
@@ -160,8 +159,6 @@ func TestIntegration_KeeperPluginConditionalUpkeep(t *testing.T) { | |
} | ||
g.Eventually(receivedBytes, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(payload1)) | ||
|
||
checkPipelineRuns(t, nodes, 1) | ||
|
||
// change payload | ||
_, err = upkeepContract.SetBytesToSend(carrol, payload2) | ||
require.NoError(t, err) | ||
|
@@ -204,7 +201,7 @@ func TestIntegration_KeeperPluginLogUpkeep(t *testing.T) { | |
require.NoError(t, err) | ||
|
||
registry := deployKeeper21Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) | ||
nodes, _ := setupNodes(t, nodeKeys, registry, backend, steve) | ||
setupNodes(t, nodeKeys, registry, backend, steve) | ||
upkeeps := 1 | ||
|
||
_, err = linkToken.Transfer(sergey, carrol.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeeps+1)))) | ||
|
@@ -228,35 +225,36 @@ func TestIntegration_KeeperPluginLogUpkeep(t *testing.T) { | |
g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) | ||
done() | ||
|
||
runs := checkPipelineRuns(t, nodes, 1) | ||
|
||
t.Run("recover logs", func(t *testing.T) { | ||
|
||
addr, contract := addrs[0], contracts[0] | ||
upkeepID := registerUpkeep(t, registry, addr, carrol, steve, backend) | ||
backend.Commit() | ||
t.Logf("Registered new upkeep %s for address %s", upkeepID.String(), addr.String()) | ||
// Emit 100 logs in a burst | ||
emits := 100 | ||
recoverEmits := 100 | ||
i := 0 | ||
emitEvents(testutils.Context(t), t, 100, []*log_upkeep_counter_wrapper.LogUpkeepCounter{contract}, carrol, func() { | ||
i++ | ||
if i%(emits/4) == 0 { | ||
if i%(recoverEmits/4) == 0 { | ||
backend.Commit() | ||
time.Sleep(time.Millisecond * 250) // otherwise we get "invalid transaction nonce" errors | ||
} | ||
}) | ||
// Mine enough blocks to ensre these logs don't fall into log provider range | ||
|
||
beforeDummyBlocks := backend.Blockchain().CurrentBlock().Number.Uint64() | ||
|
||
// Mine enough blocks to ensure these logs don't fall into log provider range | ||
dummyBlocks := 500 | ||
for i := 0; i < dummyBlocks; i++ { | ||
backend.Commit() | ||
time.Sleep(time.Millisecond * 10) | ||
} | ||
t.Logf("Mined %d blocks, waiting for logs to be recovered", dummyBlocks) | ||
|
||
expectedPostRecover := runs + emits | ||
waitPipelineRuns(t, nodes, expectedPostRecover, testutils.WaitTimeout(t), cltest.DBPollingInterval) | ||
Comment on lines
-257
to
-258
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should find some other way to assert wanted expectations. Removing this is weakening this test |
||
t.Logf("Mined %d blocks, waiting for logs to be recovered", dummyBlocks) | ||
|
||
listener, done := listenPerformedN(t, backend, registry, ids, int64(beforeDummyBlocks), recoverEmits) | ||
g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) | ||
done() | ||
}) | ||
} | ||
|
||
|
@@ -296,7 +294,7 @@ func TestIntegration_KeeperPluginLogUpkeep_Retry(t *testing.T) { | |
|
||
registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) | ||
|
||
nodes, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner) | ||
_, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner) | ||
|
||
const upkeepCount = 10 | ||
const mercuryFailCount = upkeepCount * 3 * 2 | ||
|
@@ -374,39 +372,6 @@ func TestIntegration_KeeperPluginLogUpkeep_Retry(t *testing.T) { | |
g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) | ||
|
||
done() | ||
|
||
_ = checkPipelineRuns(t, nodes, 1*len(nodes)) // TODO: TBD | ||
} | ||
|
||
func waitPipelineRuns(t *testing.T, nodes []Node, n int, timeout, interval time.Duration) { | ||
ctx, cancel := context.WithTimeout(testutils.Context(t), timeout) | ||
defer cancel() | ||
var allRuns []pipeline.Run | ||
for len(allRuns) < n && ctx.Err() == nil { | ||
allRuns = []pipeline.Run{} | ||
for _, node := range nodes { | ||
runs, err := node.App.PipelineORM().GetAllRuns() | ||
require.NoError(t, err) | ||
allRuns = append(allRuns, runs...) | ||
} | ||
time.Sleep(interval) | ||
} | ||
runs := len(allRuns) | ||
t.Logf("found %d pipeline runs", runs) | ||
require.GreaterOrEqual(t, runs, n) | ||
} | ||
|
||
func checkPipelineRuns(t *testing.T, nodes []Node, n int) int { | ||
var allRuns []pipeline.Run | ||
for _, node := range nodes { | ||
runs, err2 := node.App.PipelineORM().GetAllRuns() | ||
require.NoError(t, err2) | ||
allRuns = append(allRuns, runs...) | ||
} | ||
runs := len(allRuns) | ||
t.Logf("found %d pipeline runs", runs) | ||
require.GreaterOrEqual(t, runs, n) | ||
return runs | ||
} | ||
|
||
func emitEvents(ctx context.Context, t *testing.T, n int, contracts []*log_upkeep_counter_wrapper.LogUpkeepCounter, carrol *bind.TransactOpts, afterEmit func()) { | ||
|
@@ -424,32 +389,32 @@ func mapListener(m *sync.Map, n int) func() bool { | |
return func() bool { | ||
count := 0 | ||
m.Range(func(key, value interface{}) bool { | ||
count++ | ||
count += value.(int) | ||
return true | ||
}) | ||
return count > n | ||
} | ||
} | ||
|
||
func listenPerformed(t *testing.T, backend *backends.SimulatedBackend, registry *iregistry21.IKeeperRegistryMaster, ids []*big.Int, startBlock int64) (func() bool, func()) { | ||
func listenPerformedN(t *testing.T, backend *backends.SimulatedBackend, registry *iregistry21.IKeeperRegistryMaster, ids []*big.Int, startBlock int64, count int) (func() bool, func()) { | ||
cache := &sync.Map{} | ||
ctx, cancel := context.WithCancel(testutils.Context(t)) | ||
start := startBlock | ||
|
||
go func() { | ||
for ctx.Err() == nil { | ||
bl := backend.Blockchain().CurrentBlock().Number.Uint64() | ||
currentBlock := backend.Blockchain().CurrentBlock().Number.Uint64() | ||
|
||
sc := make([]bool, len(ids)) | ||
for i := range sc { | ||
sc[i] = true | ||
success := make([]bool, len(ids)) | ||
for i := range success { | ||
success[i] = true | ||
} | ||
|
||
iter, err := registry.FilterUpkeepPerformed(&bind.FilterOpts{ | ||
Start: uint64(start), | ||
End: &bl, | ||
End: ¤tBlock, | ||
Context: ctx, | ||
}, ids, sc) | ||
}, ids, success) | ||
|
||
if ctx.Err() != nil { | ||
return | ||
|
@@ -460,7 +425,15 @@ func listenPerformed(t *testing.T, backend *backends.SimulatedBackend, registry | |
for iter.Next() { | ||
if iter.Event != nil { | ||
t.Logf("[automation-ocr3 | EvmRegistry] upkeep performed event emitted for id %s", iter.Event.Id.String()) | ||
cache.Store(iter.Event.Id.String(), true) | ||
|
||
//cache.Store(iter.Event.Id.String(), true) | ||
count, ok := cache.Load(iter.Event.Id.String()) | ||
if !ok { | ||
cache.Store(iter.Event.Id.String(), 1) | ||
continue | ||
} | ||
countI := count.(int) | ||
cache.Store(iter.Event.Id.String(), countI+1) | ||
} | ||
} | ||
|
||
|
@@ -470,7 +443,11 @@ func listenPerformed(t *testing.T, backend *backends.SimulatedBackend, registry | |
} | ||
}() | ||
|
||
return mapListener(cache, 0), cancel | ||
return mapListener(cache, count), cancel | ||
} | ||
|
||
func listenPerformed(t *testing.T, backend *backends.SimulatedBackend, registry *iregistry21.IKeeperRegistryMaster, ids []*big.Int, startBlock int64) (func() bool, func()) { | ||
return listenPerformedN(t, backend, registry, ids, startBlock, 0) | ||
} | ||
|
||
func setupNodes(t *testing.T, nodeKeys [5]ethkey.KeyV2, registry *iregistry21.IKeeperRegistryMaster, backend *backends.SimulatedBackend, usr *bind.TransactOpts) ([]Node, *SimulatedMercuryServer) { | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Have we explored any alternate assertions for non pipeline transmitter? Or do we feel they are covered by existing assertions
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I wasn't able to see how the other services assert their transmits, so I just assumed the existing assertions would be enough, but I can look into this further
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Verifying the upkeeps are actually performed i think should give similar coverage but I'm not sure that's done everywhere (e.g. the other place where this is removed)