diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c7e84f910a..52435293df0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,7 +44,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## v18.0.0 -Fixes mainnet bugs w/ migrated CL pools and incorrect accumulation sumtrees. +Fixes mainnet bugs w/ incorrect accumulation sumtrees, and CL handling for a balancer pool with 0 bonded shares. + +### Improvements + +* [#6144](https://github.com/osmosis-labs/osmosis/pull/6144) perf: Speedup compute time of Epoch +* [#6144](https://github.com/osmosis-labs/osmosis/pull/6144) misc: Move many Superfluid info logs to debug logs ### API breaks diff --git a/app/upgrades/v18/constants.go b/app/upgrades/v18/constants.go index e35c8c662a6..73c9b440825 100644 --- a/app/upgrades/v18/constants.go +++ b/app/upgrades/v18/constants.go @@ -1,4 +1,4 @@ -package v17 +package v18 import ( "github.com/osmosis-labs/osmosis/v17/app/upgrades" @@ -6,7 +6,7 @@ import ( store "github.com/cosmos/cosmos-sdk/store/types" ) -// UpgradeName defines the on-chain upgrade name for the Osmosis v17 upgrade. +// UpgradeName defines the on-chain upgrade name for the Osmosis v18 upgrade. const UpgradeName = "v18" var Upgrade = upgrades.Upgrade{ diff --git a/app/upgrades/v18/upgrades.go b/app/upgrades/v18/upgrades.go index 8021cd63c58..942b78c3384 100644 --- a/app/upgrades/v18/upgrades.go +++ b/app/upgrades/v18/upgrades.go @@ -1,17 +1,18 @@ -package v17 +package v18 import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + gammtypes "github.com/osmosis-labs/osmosis/v17/x/gamm/types" + "github.com/osmosis-labs/osmosis/v17/app/keepers" "github.com/osmosis-labs/osmosis/v17/app/upgrades" ) -const ( - mainnetChainID = "osmosis-1" -) +// OSMO / DAI CL pool ID +const firstCLPoolId = 1066 func CreateUpgradeHandler( mm *module.Manager, @@ -26,6 +27,15 @@ func CreateUpgradeHandler( if err != nil { return nil, err } + + for id := 1; id < firstCLPoolId; id++ { + resetSumtree(keepers, ctx, uint64(id)) + } return migrations, nil } } + +func resetSumtree(keepers *keepers.AppKeepers, ctx sdk.Context, id uint64) { + denom := gammtypes.GetPoolShareDenom(id) + keepers.LockupKeeper.RebuildAccumulationStoreForDenom(ctx, denom) +} diff --git a/app/upgrades/v18/upgrades_test.go b/app/upgrades/v18/upgrades_test.go new file mode 100644 index 00000000000..45e00a559e0 --- /dev/null +++ b/app/upgrades/v18/upgrades_test.go @@ -0,0 +1,311 @@ +package v18_test + +import ( + "fmt" + "sort" + "testing" + "time" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + abci "github.com/tendermint/tendermint/abci/types" + + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + "github.com/stretchr/testify/suite" + + "github.com/osmosis-labs/osmosis/v17/app/apptesting" + v17 "github.com/osmosis-labs/osmosis/v17/app/upgrades/v17" + + gammmigration "github.com/osmosis-labs/osmosis/v17/x/gamm/types/migration" + lockuptypes "github.com/osmosis-labs/osmosis/v17/x/lockup/types" + protorevtypes "github.com/osmosis-labs/osmosis/v17/x/protorev/types" + superfluidtypes "github.com/osmosis-labs/osmosis/v17/x/superfluid/types" +) + +type UpgradeTestSuite struct { + apptesting.KeeperTestHelper +} + +func (suite *UpgradeTestSuite) SetupTest() { + suite.Setup() +} + +func TestUpgradeTestSuite(t *testing.T) { + suite.Run(t, new(UpgradeTestSuite)) +} + +const ( + dummyUpgradeHeight = 5 + // this would be the amount in the lock that would stay locked during upgrades + shareStaysLocked = 10000 +) + +func assertEqual(suite *UpgradeTestSuite, pre, post interface{}) { + suite.Require().Equal(pre, post) +} + +func (s *UpgradeTestSuite) TestUpgrade() { + // set up pools first to match v17 state(including linked cl pools) + s.setupPoolsToMainnetState() + + // corrupt state to match mainnet state + s.setupCorruptedState() + + // with the corrupted state, distribution used to panic in the `AfterEpochEnd` hook, + // specifically from the one from incentives keeper. + // This method ensures that with the corrupted state, we have the same state where + // distribution would fail. + s.ensurePreUpgradeDistributionPanics() + + migrationInfo, err := s.App.GAMMKeeper.GetAllMigrationInfo(s.Ctx) + s.Require().NoError(err) + + link := migrationInfo.BalancerToConcentratedPoolLinks[0] + s.Require().Equal(uint64(3), link.BalancerPoolId) + + clPoolId := link.ClPoolId + + pool, err := s.App.ConcentratedLiquidityKeeper.GetConcentratedPoolById(s.Ctx, clPoolId) + s.Require().NoError(err) + + // LP Fails before the upgrade + lpTokens := sdk.NewCoins(sdk.NewCoin(pool.GetToken0(), sdk.NewInt(1_000_000)), sdk.NewCoin(pool.GetToken1(), sdk.NewInt(1_000_000))) + s.FundAcc(s.TestAccs[0], lpTokens) + // require a panic + s.Require().Panics(func() { + _, err = s.App.ConcentratedLiquidityKeeper.CreateFullRangePosition(s.Ctx, clPoolId, s.TestAccs[0], lpTokens) + }) + + // upgrade software + s.imitateUpgrade() + s.App.BeginBlocker(s.Ctx, abci.RequestBeginBlock{}) + s.Ctx = s.Ctx.WithBlockTime(s.Ctx.BlockTime().Add(time.Hour * 24)) + + // after the accum values have been resetted correctly after upgrade, we expect the accumulator store to be initialized with the correct value, + // which in our test case would be 10000(the amount that was locked) + valueAfterClear := s.App.LockupKeeper.GetPeriodLocksAccumulation(s.Ctx, lockuptypes.QueryCondition{ + LockQueryType: lockuptypes.ByDuration, + Denom: "gamm/pool/3", + Duration: time.Hour * 24 * 14, + }) + valueAfterClear.Equal(sdk.NewInt(shareStaysLocked)) + + s.ensurePostUpgradeDistributionWorks() + + // Elapse time so that incentive distribution is triggered. + s.Ctx = s.Ctx.WithBlockTime(s.Ctx.BlockTime().Add(time.Hour)) + + // Check that can LP and swap into pool 3 with no usses + // LP + _, err = s.App.ConcentratedLiquidityKeeper.CreateFullRangePosition(s.Ctx, clPoolId, s.TestAccs[0], lpTokens) + s.Require().NoError(err) + + // Refetch CL Pool + updatedCLPool, err := s.App.ConcentratedLiquidityKeeper.GetPool(s.Ctx, clPoolId) + s.Require().NoError(err) + + // Swap + toSwap := sdk.NewCoin(pool.GetToken0(), sdk.NewInt(100)) + _, err = s.App.ConcentratedLiquidityKeeper.SwapExactAmountIn(s.Ctx, s.TestAccs[0], updatedCLPool, toSwap, pool.GetToken1(), sdk.NewInt(1), sdk.ZeroDec()) + s.Require().NoError(err) + +} + +func (suite *UpgradeTestSuite) imitateUpgrade() { + suite.Ctx = suite.Ctx.WithBlockHeight(dummyUpgradeHeight - 1) + plan := upgradetypes.Plan{Name: "v18", Height: dummyUpgradeHeight} + err := suite.App.UpgradeKeeper.ScheduleUpgrade(suite.Ctx, plan) + suite.Require().NoError(err) + _, exists := suite.App.UpgradeKeeper.GetUpgradePlan(suite.Ctx) + suite.Require().True(exists) + + suite.Ctx = suite.Ctx.WithBlockHeight(dummyUpgradeHeight) +} + +// first set up pool state to mainnet state +func (suite *UpgradeTestSuite) setupPoolsToMainnetState() { + var lastPoolID uint64 // To keep track of the last assigned pool ID + + // Sort AssetPairs based on LinkedClassicPool values. + // We sort both pairs because we use the test asset pairs to create initial state, + // then use the actual asset pairs to verify the result is correct. + sort.Sort(ByLinkedClassicPool(v17.AssetPairsForTestsOnly)) + + // Create earlier pools or dummy pools if needed + for _, assetPair := range v17.AssetPairsForTestsOnly { + poolID := assetPair.LinkedClassicPool + + // If LinkedClassicPool is specified, but it's smaller than the current pool ID, + // create dummy pools to fill the gap. + for lastPoolID+1 < poolID { + poolCoins := sdk.NewCoins(sdk.NewCoin(assetPair.BaseAsset, sdk.NewInt(100000000000)), sdk.NewCoin(assetPair.QuoteAsset, sdk.NewInt(100000000000))) + suite.PrepareBalancerPoolWithCoins(poolCoins...) + lastPoolID++ + } + + // Now create the pool with the correct pool ID. + poolCoins := sdk.NewCoins(sdk.NewCoin(assetPair.BaseAsset, sdk.NewInt(100000000000)), sdk.NewCoin(assetPair.QuoteAsset, sdk.NewInt(100000000000))) + suite.PrepareBalancerPoolWithCoins(poolCoins...) + + // Enable the GAMM pool for superfluid if the record says so. + if assetPair.Superfluid { + poolShareDenom := fmt.Sprintf("gamm/pool/%d", assetPair.LinkedClassicPool) + superfluidAsset := superfluidtypes.SuperfluidAsset{ + Denom: poolShareDenom, + AssetType: superfluidtypes.SuperfluidAssetTypeLPShare, + } + suite.App.SuperfluidKeeper.SetSuperfluidAsset(suite.Ctx, superfluidAsset) + } + + // Update the lastPoolID to the current pool ID. + lastPoolID = poolID + } +} + +// setupCorruptedState aligns the testing environment with the mainnet state. +// By running this method, it will modify the lockup accumulator to be deleted which has happended in v4.0.0 upgrade. +// In this method, we join pool 3, then delete denom accum store in the lockup module to have the testing environment +// in the correct state. +func (s *UpgradeTestSuite) setupCorruptedState() { + pool3Denom := "gamm/pool/3" + + // join pool, create lock + addr, err := sdk.AccAddressFromBech32("osmo1urn0pnx8fl5kt89r5nzqd8htruq7skadc2xdk3") + s.Require().NoError(err) + keepers := &s.App.AppKeepers + err = keepers.BankKeeper.MintCoins(s.Ctx, protorevtypes.ModuleName, sdk.NewCoins(sdk.NewCoin(v17.OSMO, sdk.NewInt(50000000000)))) + s.Require().NoError(err) + err = keepers.BankKeeper.SendCoinsFromModuleToAccount(s.Ctx, protorevtypes.ModuleName, addr, sdk.NewCoins(sdk.NewCoin(v17.OSMO, sdk.NewInt(50000000000)))) + s.Require().NoError(err) + aktGAMMPool, err := keepers.GAMMKeeper.GetPool(s.Ctx, 3) + s.Require().NoError(err) + sharesOut, err := keepers.GAMMKeeper.JoinSwapExactAmountIn(s.Ctx, addr, aktGAMMPool.GetId(), sdk.NewCoins(sdk.NewCoin(v17.OSMO, sdk.NewInt(50000000000))), sdk.ZeroInt()) + s.Require().NoError(err) + aktSharesDenom := fmt.Sprintf("gamm/pool/%d", aktGAMMPool.GetId()) + shareCoins := sdk.NewCoins(sdk.NewCoin(aktSharesDenom, sharesOut)) + lock, err := keepers.LockupKeeper.CreateLock(s.Ctx, addr, shareCoins, time.Hour*24*14) + s.Require().NoError(err) + + // also create a lock with the shares that would stay locked during the upgrade. + // doing this would help us assert if the accumulator has been resetted to the correct value. + shareCoinsStaysLocked := sdk.NewCoins(sdk.NewCoin(aktSharesDenom, sdk.NewInt(shareStaysLocked))) + s.FundAcc(addr, shareCoinsStaysLocked) + _, err = keepers.LockupKeeper.CreateLock(s.Ctx, addr, shareCoinsStaysLocked, time.Hour*24*14) + s.Require().NoError(err) + + // get value before clearing denom accum store, this should be in positive value + valueBeforeClear := keepers.LockupKeeper.GetPeriodLocksAccumulation(s.Ctx, lockuptypes.QueryCondition{ + LockQueryType: lockuptypes.ByDuration, + Denom: "gamm/pool/3", + Duration: time.Hour * 24 * 14, + }) + + // this should be a positive value + s.Require().True(!valueBeforeClear.IsNegative()) + + // Clear gamm/pool/3 denom accumulation store + s.clearDenomAccumulationStore(pool3Denom) + // Remove the lockup created for pool 3 above to get negative amount of accum value + err = keepers.LockupKeeper.ForceUnlock(s.Ctx, lock) + s.Require().NoError(err) + + valueAfterClear := keepers.LockupKeeper.GetPeriodLocksAccumulation(s.Ctx, lockuptypes.QueryCondition{ + LockQueryType: lockuptypes.ByDuration, + Denom: "gamm/pool/3", + Duration: time.Hour * 24 * 14, + }) + + s.Require().True(valueAfterClear.IsNegative()) + s.Require().True(shareCoins[0].Amount.Neg().Equal(valueAfterClear)) +} + +// We want to ensure that with the corrupted state of the lockup accumulator, +// `AfterEpochEnd` was panicing. +// We can do this check by creating a CL pool, then trying to distribute using that specific +// CL pool gauge. If our test setup was correct, this should panic. +func (suite *UpgradeTestSuite) ensurePreUpgradeDistributionPanics() { + epochInfo := suite.App.IncentivesKeeper.GetEpochInfo(suite.Ctx) + + // add pool 3 denom (AKT) ti authorized quote denom param. + clParams := suite.App.ConcentratedLiquidityKeeper.GetParams(suite.Ctx) + authorizedQuoteDenom := append(clParams.AuthorizedQuoteDenoms, v17.AKTIBCDenom) + clParams.AuthorizedQuoteDenoms = authorizedQuoteDenom + suite.App.ConcentratedLiquidityKeeper.SetParams(suite.Ctx, clParams) + + // prepare CL pool with the same denom as pool 3, which is the pool we are testing with + clPool := suite.PrepareConcentratedPoolWithCoins(v17.OSMO, v17.AKTIBCDenom) + balancerToCLPoolLink := []gammmigration.BalancerToConcentratedPoolLink{ + { + BalancerPoolId: 3, + ClPoolId: clPool.GetId(), + }, + } + + // set migration record between the new CL pool and the old pool(pool number 3) + migrationInfo := gammmigration.MigrationRecords{ + BalancerToConcentratedPoolLinks: balancerToCLPoolLink, + } + suite.App.GAMMKeeper.SetMigrationRecords(suite.Ctx, migrationInfo) + + // add new coins to the CL pool gauge so that it would be distributed after epoch ends then trigger panic + coinsToAdd := sdk.NewCoins(sdk.NewCoin("uosmo", sdk.NewInt(1000))) + gagueId, err := suite.App.PoolIncentivesKeeper.GetPoolGaugeId(suite.Ctx, clPool.GetId(), epochInfo.Duration) + gauge, err := suite.App.IncentivesKeeper.GetGaugeByID(suite.Ctx, gagueId) + suite.Require().NoError(err) + + addr := sdk.AccAddress([]byte("addrx---------------")) + suite.FundAcc(addr, coinsToAdd) + err = suite.App.IncentivesKeeper.AddToGaugeRewards(suite.Ctx, addr, coinsToAdd, gauge.Id) + suite.Require().NoError(err) + + // add block time so that rewards get distributed + suite.Ctx = suite.Ctx.WithBlockTime(suite.Ctx.BlockTime().Add(time.Hour * 25)) + suite.BeginNewBlock(false) + suite.App.EpochsKeeper.BeforeEpochStart(suite.Ctx, epochInfo.GetIdentifier(), 1) + + suite.Require().Panics(func() { + suite.App.IncentivesKeeper.AfterEpochEnd(suite.Ctx, epochInfo.GetIdentifier(), 1) + }) + +} + +// clearDenomAccumulationStore clears denom accumulation store in the lockup keeper, +// this was cleared in v4.0.0 upgrade. +// Creating raw pools would re-initialize these pools, thus to properly imitate mainnet state, +// we need to manually delete this again. +func (s *UpgradeTestSuite) clearDenomAccumulationStore(denom string) { + // Get Prefix + capacity := len(lockuptypes.KeyPrefixLockAccumulation) + len(denom) + 1 + res := make([]byte, len(lockuptypes.KeyPrefixLockAccumulation), capacity) + copy(res, lockuptypes.KeyPrefixLockAccumulation) + res = append(res, []byte(denom+"/")...) + + lockupTypesStoreKeys := s.App.AppKeepers.GetKey(lockuptypes.StoreKey) + store := prefix.NewStore(s.Ctx.KVStore(lockupTypesStoreKeys), res) + iter := store.Iterator(nil, nil) + defer iter.Close() + for ; iter.Valid(); iter.Next() { + store.Delete(iter.Key()) + } +} + +func (s *UpgradeTestSuite) ensurePostUpgradeDistributionWorks() { + epochInfo := s.App.IncentivesKeeper.GetEpochInfo(s.Ctx) + + // add block time so that rewards get distributed + s.Ctx = s.Ctx.WithBlockTime(s.Ctx.BlockTime().Add(time.Hour * 25)) + s.BeginNewBlock(false) + s.App.EpochsKeeper.BeforeEpochStart(s.Ctx, epochInfo.GetIdentifier(), 1) + + s.Require().NotPanics(func() { + s.App.IncentivesKeeper.AfterEpochEnd(s.Ctx, epochInfo.GetIdentifier(), 1) + }) +} + +type ByLinkedClassicPool []v17.AssetPair + +func (a ByLinkedClassicPool) Len() int { return len(a) } +func (a ByLinkedClassicPool) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByLinkedClassicPool) Less(i, j int) bool { + return a[i].LinkedClassicPool < a[j].LinkedClassicPool +} diff --git a/x/concentrated-liquidity/incentives.go b/x/concentrated-liquidity/incentives.go index c5868b9d868..1147535ba3e 100644 --- a/x/concentrated-liquidity/incentives.go +++ b/x/concentrated-liquidity/incentives.go @@ -171,32 +171,24 @@ func (k Keeper) prepareBalancerPoolAsFullRange(ctx sdk.Context, clPoolId uint64, // relaxed in the future. // Note that we check denom compatibility later, and pool weights technically do not matter as they // are analogous to changing the spot price, which is handled by our lower bounding. - if len(balancerPoolLiquidity) != 2 { + // Note that due to low share ratio, the balancer token liquidity may be truncated to zero. + // Balancer liquidity may also upgrade in-full to CL. + if len(balancerPoolLiquidity) > 2 { return 0, sdk.ZeroDec(), types.ErrInvalidBalancerPoolLiquidityError{ClPoolId: clPoolId, BalancerPoolId: canonicalBalancerPoolId, BalancerPoolLiquidity: balancerPoolLiquidity} } - // We ensure that the asset ordering is correct when passing Balancer assets into the CL pool. - var asset0Amount, asset1Amount sdk.Int - if balancerPoolLiquidity[0].Denom == clPool.GetToken0() { - asset0Amount = balancerPoolLiquidity[0].Amount - asset1Amount = balancerPoolLiquidity[1].Amount + denom0 := clPool.GetToken0() + denom1 := clPool.GetToken1() - // Ensure second denom matches (bal1 -> CL1) - if balancerPoolLiquidity[1].Denom != clPool.GetToken1() { - return 0, sdk.ZeroDec(), types.ErrInvalidBalancerPoolLiquidityError{ClPoolId: clPoolId, BalancerPoolId: canonicalBalancerPoolId, BalancerPoolLiquidity: balancerPoolLiquidity} - } - } else if balancerPoolLiquidity[0].Denom == clPool.GetToken1() { - asset0Amount = balancerPoolLiquidity[1].Amount - asset1Amount = balancerPoolLiquidity[0].Amount - - // Ensure second denom matches (bal1 -> CL0) - if balancerPoolLiquidity[1].Denom != clPool.GetToken0() { - return 0, sdk.ZeroDec(), types.ErrInvalidBalancerPoolLiquidityError{ClPoolId: clPoolId, BalancerPoolId: canonicalBalancerPoolId, BalancerPoolLiquidity: balancerPoolLiquidity} - } - } else { + // This check's purpose is to confirm that denoms are the same. + clCoins := totalBalancerPoolLiquidity.FilterDenoms([]string{denom0, denom1}) + if len(clCoins) != 2 { return 0, sdk.ZeroDec(), types.ErrInvalidBalancerPoolLiquidityError{ClPoolId: clPoolId, BalancerPoolId: canonicalBalancerPoolId, BalancerPoolLiquidity: balancerPoolLiquidity} } + asset0Amount := balancerPoolLiquidity.AmountOf(denom0) + asset1Amount := balancerPoolLiquidity.AmountOf(denom1) + // Calculate the amount of liquidity the Balancer amounts qualify in the CL pool. Note that since we use the CL spot price, this is // safe against prices drifting apart between the two pools (we take the lower bound on the qualifying liquidity in this case). // The `sqrtPriceLowerTick` and `sqrtPriceUpperTick` fields are set to the appropriate values for a full range position. @@ -437,7 +429,9 @@ func (k Keeper) updateGivenPoolUptimeAccumulatorsToNow(ctx sdk.Context, pool typ // Even though this exposes CL LPs to getting immediately diluted by a large Balancer position, this would // require a lot of capital to be tied up in a two week bond, which is a viable tradeoff given the relative // simplicity of this approach. - if balancerPoolId != 0 { + // It is possible that the balancer qualifying shares are zero if the bonded liquidity in the + // pool is extremely low. As a result, in that case we simply skip claiming. + if balancerPoolId != 0 && !qualifyingBalancerShares.IsZero() { _, err := k.claimAndResetFullRangeBalancerPool(ctx, poolId, balancerPoolId, uptimeAccums) if err != nil { return err diff --git a/x/incentives/keeper/distribute.go b/x/incentives/keeper/distribute.go index d99e4b28cda..de38f570f38 100644 --- a/x/incentives/keeper/distribute.go +++ b/x/incentives/keeper/distribute.go @@ -330,7 +330,7 @@ func (k Keeper) distributeInternal( // Note: reason why we do millisecond conversion is because floats are non-deterministic. emissionRate := sdk.NewDecFromInt(remainAmountPerEpoch).QuoTruncate(sdk.NewDec(currentEpoch.Duration.Milliseconds()).QuoInt(sdk.NewInt(1000))) - ctx.Logger().Debug("distributeInternal, CreateIncentiveRecord NoLock gauge", "module", types.ModuleName, "gaugeId", gauge.Id, "poolId", pool.GetId(), "remainCoinPerEpoch", remainCoinPerEpoch, "height", ctx.BlockHeight()) + ctx.Logger().Info("distributeInternal, CreateIncentiveRecord NoLock gauge", "module", types.ModuleName, "gaugeId", gauge.Id, "poolId", pool.GetId(), "remainCoinPerEpoch", remainCoinPerEpoch, "height", ctx.BlockHeight()) _, err := k.clk.CreateIncentive(ctx, pool.GetId(), k.ak.GetModuleAddress(types.ModuleName), @@ -343,6 +343,8 @@ func (k Keeper) distributeInternal( // Only default uptime is supported at launch. types.DefaultConcentratedUptime, ) + + ctx.Logger().Info(fmt.Sprintf("distributeInternal CL for pool id %d finished", pool.GetId())) if err != nil { return nil, err } @@ -358,6 +360,22 @@ func (k Keeper) distributeInternal( return nil, nil } + // In this case, remove redundant cases. + // Namely: gauge empty OR gauge coins undistributable. + if remainCoins.Empty() { + ctx.Logger().Debug(fmt.Sprintf("gauge debug, this gauge is empty, why is it being ran %d. Balancer code", gauge.Id)) + err := k.updateGaugePostDistribute(ctx, gauge, totalDistrCoins) + return totalDistrCoins, err + } + + // Remove some spam gauges, is state compatible. + // If they're to pool 1 they can't distr at this small of a quantity. + if remainCoins.Len() == 1 && remainCoins[0].Amount.LTE(sdk.NewInt(10)) && gauge.DistributeTo.Denom == "gamm/pool/1" && remainCoins[0].Denom != "uosmo" { + ctx.Logger().Debug(fmt.Sprintf("gauge debug, this gauge is perceived spam, skipping %d", gauge.Id)) + err := k.updateGaugePostDistribute(ctx, gauge, totalDistrCoins) + return totalDistrCoins, err + } + for _, lock := range locks { distrCoins := sdk.Coins{} ctx.Logger().Debug("distributeInternal, distribute to lock", "module", types.ModuleName, "gaugeId", gauge.Id, "lockId", lock.ID, "remainCons", remainCoins, "height", ctx.BlockHeight()) diff --git a/x/incentives/keeper/hooks.go b/x/incentives/keeper/hooks.go index 7454824c295..9926cd83a68 100644 --- a/x/incentives/keeper/hooks.go +++ b/x/incentives/keeper/hooks.go @@ -47,11 +47,12 @@ func (k Keeper) AfterEpochEnd(ctx sdk.Context, epochIdentifier string, epochNumb } } - ctx.Logger().Info("AfterEpochEnd: distributing to gauges", "module", types.ModuleName, "numGauges", len(distrGauges), "height", ctx.BlockHeight()) + ctx.Logger().Info("x/incentives AfterEpochEnd: distributing to gauges", "module", types.ModuleName, "numGauges", len(distrGauges), "height", ctx.BlockHeight()) _, err := k.Distribute(ctx, distrGauges) if err != nil { return err } + ctx.Logger().Info("x/incentives AfterEpochEnd finished distribution") } return nil } diff --git a/x/lockup/keeper/lock.go b/x/lockup/keeper/lock.go index ca1818a688d..57e1531e5bd 100644 --- a/x/lockup/keeper/lock.go +++ b/x/lockup/keeper/lock.go @@ -295,6 +295,22 @@ func (k Keeper) clearKeysByPrefix(ctx sdk.Context, prefix []byte) { } } +func (k Keeper) RebuildAccumulationStoreForDenom(ctx sdk.Context, denom string) { + prefix := accumulationStorePrefix(denom) + k.clearKeysByPrefix(ctx, prefix) + locks := k.GetLocksDenom(ctx, denom) + mapDurationToAmount := make(map[time.Duration]sdk.Int) + for _, lock := range locks { + if v, ok := mapDurationToAmount[lock.Duration]; ok { + mapDurationToAmount[lock.Duration] = v.Add(lock.Coins.AmountOf(denom)) + } else { + mapDurationToAmount[lock.Duration] = lock.Coins.AmountOf(denom) + } + } + + k.writeDurationValuesToAccumTree(ctx, denom, mapDurationToAmount) +} + func (k Keeper) ClearAccumulationStores(ctx sdk.Context) { k.clearKeysByPrefix(ctx, types.KeyPrefixLockAccumulation) } @@ -599,25 +615,29 @@ func (k Keeper) InitializeAllLocks(ctx sdk.Context, locks []types.PeriodLock) er sort.Strings(denoms) for _, denom := range denoms { curDurationMap := accumulationStoreEntries[denom] - durations := make([]time.Duration, 0, len(curDurationMap)) - for duration := range curDurationMap { - durations = append(durations, duration) - } - sort.Slice(durations, func(i, j int) bool { return durations[i] < durations[j] }) - // now that we have a sorted list of durations for this denom, - // add them all to accumulation store - msg := fmt.Sprintf("Setting accumulation entries for locks for %s, there are %d distinct durations", - denom, len(durations)) - ctx.Logger().Info(msg) - for _, d := range durations { - amt := curDurationMap[d] - k.accumulationStore(ctx, denom).Increase(accumulationKey(d), amt) - } + k.writeDurationValuesToAccumTree(ctx, denom, curDurationMap) } return nil } +func (k Keeper) writeDurationValuesToAccumTree(ctx sdk.Context, denom string, durationValueMap map[time.Duration]sdk.Int) { + durations := make([]time.Duration, 0, len(durationValueMap)) + for duration := range durationValueMap { + durations = append(durations, duration) + } + sort.Slice(durations, func(i, j int) bool { return durations[i] < durations[j] }) + // now that we have a sorted list of durations for this denom, + // add them all to accumulation store + msg := fmt.Sprintf("Setting accumulation entries for locks for %s, there are %d distinct durations", + denom, len(durations)) + ctx.Logger().Info(msg) + for _, d := range durations { + amt := durationValueMap[d] + k.accumulationStore(ctx, denom).Increase(accumulationKey(d), amt) + } +} + func (k Keeper) InitializeAllSyntheticLocks(ctx sdk.Context, syntheticLocks []types.SyntheticLock) error { // index by coin.Denom, them duration -> amt // We accumulate the accumulation store entries separately, diff --git a/x/superfluid/keeper/stake.go b/x/superfluid/keeper/stake.go index d9c5beb8990..7575ac4113a 100644 --- a/x/superfluid/keeper/stake.go +++ b/x/superfluid/keeper/stake.go @@ -66,7 +66,8 @@ func (k Keeper) RefreshIntermediaryDelegationAmounts(ctx sdk.Context) { if !found { // continue if current delegation is 0, in case its really a dust delegation // that becomes worth something after refresh. - k.Logger(ctx).Info(fmt.Sprintf("Existing delegation not found for %s with %s during superfluid refresh."+ + // TODO: We have a correct explanation for this in some github issue, lets amend this correctly. + k.Logger(ctx).Debug(fmt.Sprintf("Existing delegation not found for %s with %s during superfluid refresh."+ " It may have been previously bonded, but now unbonded.", mAddr.String(), acc.ValAddr)) } else { currentAmount = validator.TokensFromShares(delegation.Shares).RoundInt() @@ -95,7 +96,7 @@ func (k Keeper) RefreshIntermediaryDelegationAmounts(ctx sdk.Context) { ctx.Logger().Error("Error in forceUndelegateAndBurnOsmoTokens, state update reverted", err) } } else { - ctx.Logger().Info("Intermediary account already has correct delegation amount?" + + ctx.Logger().Debug("Intermediary account already has correct delegation amount?" + " This with high probability implies the exact same spot price as the last epoch," + "and no delegation changes.") }