Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: determine F3 participants relative to current network name #12597

Merged
merged 8 commits into from
Oct 17, 2024
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
- Fix a bug in the `lotus-shed indexes backfill-events` command that may result in either duplicate events being backfilled where there are existing events (such an operation *should* be idempotent) or events erroneously having duplicate `logIndex` values when queried via ETH APIs. ([filecoin-project/lotus#12567](https://github.com/filecoin-project/lotus/pull/12567))
- Event APIs (Eth events and actor events) should only return reverted events if client queries by specific block hash / tipset. Eth and actor event subscription APIs should always return reverted events to enable accurate observation of real-time changes. ([filecoin-project/lotus#12585](https://github.com/filecoin-project/lotus/pull/12585))
- Add logic to check if the miner's owner address is delegated (f4 address). If it is delegated, the `lotus-shed sectors termination-estimate` command now sends the termination state call using the worker ID. This fix resolves the issue where termination-estimate did not function correctly for miners with delegated owner addresses. ([filecoin-project/lotus#12569](https://github.com/filecoin-project/lotus/pull/12569))
- Fix a bug in F3 participation API where valid leases may get removed due to dynamic manifest update. ([filecoin-project/lotus#12597](https://github.com/filecoin-project/lotus/pull/12597))

## Improvements

Expand Down
2 changes: 1 addition & 1 deletion chain/lf3/f3.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ func (fff *F3) runSigningLoop(ctx context.Context) {
clear(alreadyParticipated)
}

participants := fff.leaser.getParticipantsByInstance(mb.Payload.Instance)
participants := fff.leaser.getParticipantsByInstance(mb.NetworkName, mb.Payload.Instance)
for _, id := range participants {
if _, ok := alreadyParticipated[id]; ok {
continue
Expand Down
17 changes: 14 additions & 3 deletions chain/lf3/participation_lease.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,15 +112,26 @@ func (l *leaser) participate(ticket api.F3ParticipationTicket) (api.F3Participat
return newLease, nil
}

func (l *leaser) getParticipantsByInstance(instance uint64) []uint64 {
func (l *leaser) getParticipantsByInstance(network gpbft.NetworkName, instance uint64) []uint64 {
l.mutex.Lock()
defer l.mutex.Unlock()
currentManifest, _ := l.status()
currentNetwork := currentManifest.NetworkName
if currentNetwork != network {
log.Warnf("no participants for network: current network (%s) does not match requested network (%s) at instance %d", currentNetwork, network, instance)
masih marked this conversation as resolved.
Show resolved Hide resolved
return nil
}
var participants []uint64
for id, lease := range l.leases {
if instance > lease.ToInstance() {
if currentNetwork != lease.Network {
// Lazily delete any lease that does not belong to network, likely acquired from
// prior manifests.
delete(l.leases, id)
log.Warnf("lost F3 participation lease for miner %d at instance %d due to network mismatch: %s != %s", id, instance, currentNetwork, lease.Network)
} else if instance > lease.ToInstance() {
// Lazily delete the expired leases.
delete(l.leases, id)
log.Warnf("lost F3 participation lease for miner %d", id)
log.Warnf("lost F3 participation lease for miner %d due to instance (%d) > lease to instance (%d)", id, instance, lease.ToInstance())
} else {
participants = append(participants, id)
}
Expand Down
6 changes: 3 additions & 3 deletions chain/lf3/participation_lease_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,18 +42,18 @@ func TestLeaser(t *testing.T) {
require.NoError(t, err)

// Both participants should still be valid.
participants := subject.getParticipantsByInstance(11)
participants := subject.getParticipantsByInstance(testManifest.NetworkName, 11)
require.Len(t, participants, 2)
require.Contains(t, participants, uint64(123))
require.Contains(t, participants, uint64(456))

// After instance 16, only participant 456 should be valid.
participants = subject.getParticipantsByInstance(16)
participants = subject.getParticipantsByInstance(testManifest.NetworkName, 16)
require.Len(t, participants, 1)
require.Contains(t, participants, uint64(456))

// After instance 17, no participant must have a lease.
participants = subject.getParticipantsByInstance(17)
participants = subject.getParticipantsByInstance(testManifest.NetworkName, 17)
require.Empty(t, participants)
})
t.Run("expired ticket", func(t *testing.T) {
Expand Down
37 changes: 36 additions & 1 deletion itests/f3_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package itests

import (
"context"
"sync"
"testing"
"time"

Expand Down Expand Up @@ -36,6 +37,7 @@ type testEnv struct {
m *manifest.Manifest
t *testing.T
testCtx context.Context
debug bool
}

// Test that checks that F3 is enabled successfully,
Expand Down Expand Up @@ -194,6 +196,24 @@ func (e *testEnv) waitFor(f func(n *kit.TestFullNode) bool, timeout time.Duratio
e.t.Helper()
require.Eventually(e.t, func() bool {
e.t.Helper()
defer func() {
if e.debug {
var wg sync.WaitGroup
printProgress := func(index int, n *kit.TestFullNode) {
defer wg.Done()
if progress, err := n.F3GetProgress(e.testCtx); err != nil {
e.t.Logf("Node #%d progress: err: %v", index, err)
} else {
e.t.Logf("Node #%d progress: %v", index, progress)
}
}
for i, n := range e.minerFullNodes {
wg.Add(1)
go printProgress(i, n)
}
wg.Wait()
}
}()
for _, n := range e.minerFullNodes {
if !f(n) {
return false
Expand All @@ -210,6 +230,12 @@ func (e *testEnv) waitFor(f func(n *kit.TestFullNode) bool, timeout time.Duratio
// a miner. The last return value is the manifest sender for the network.
func setup(t *testing.T, blocktime time.Duration) *testEnv {
manif := lf3.NewManifest(BaseNetworkName+"/1", DefaultFinality, DefaultBootstrapEpoch, blocktime, cid.Undef)
manif.Gpbft.Delta = 250 * time.Millisecond
manif.Gpbft.DeltaBackOffExponent = 1.3
manif.Gpbft.RebroadcastBackoffBase = manif.Gpbft.Delta * 2
manif.Gpbft.RebroadcastBackoffMax = manif.Gpbft.RebroadcastBackoffBase * 2
manif.Gpbft.RebroadcastBackoffExponent = manif.Gpbft.DeltaBackOffExponent

return setupWithStaticManifest(t, manif, false)
}

Expand Down Expand Up @@ -275,14 +301,23 @@ func setupWithStaticManifest(t *testing.T, manif *manifest.Manifest, testBootstr
err = n.NetConnect(ctx, e.ms.PeerInfo())
require.NoError(t, err)
}

errgrp.Go(func() error {
defer func() {
require.NoError(t, manifestServerHost.Close())
}()
return e.ms.Run(ctx)
})

// Update initial manifest params to shorten the timeouts and backoff for
// testing, and assert it is consistently applied to all nodes.
e.m.Gpbft.Delta = 250 * time.Millisecond
e.m.Gpbft.DeltaBackOffExponent = 1.3
e.m.Gpbft.RebroadcastBackoffBase = manif.Gpbft.Delta * 2
e.m.Gpbft.RebroadcastBackoffMax = manif.Gpbft.RebroadcastBackoffBase * 2
e.m.Gpbft.RebroadcastBackoffExponent = manif.Gpbft.DeltaBackOffExponent
Stebalien marked this conversation as resolved.
Show resolved Hide resolved
e.ms.UpdateManifest(m)
e.waitTillManifestChange(m, 20*time.Second)

return e
}

Expand Down
Loading