diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/DBChecker.scala b/eclair-core/src/main/scala/fr/acinq/eclair/DBChecker.scala index 30d68d5464..7a0fa396d6 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/DBChecker.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/DBChecker.scala @@ -27,13 +27,11 @@ object DBChecker extends Logging { * Tests if the channels data in the DB is valid (and throws an exception if not): * - it is compatible with the current version of eclair * - channel keys can be re-generated from the channel seed - * - * @param nodeParams node parameters */ def checkChannelsDB(nodeParams: NodeParams): Seq[PersistentChannelData] = { Try(nodeParams.db.channels.listLocalChannels()) match { case Success(channels) => - channels.foreach(data => if (!data.commitments.validateSeed(nodeParams.channelKeyManager)) throw InvalidChannelSeedException(data.channelId)) + channels.foreach(data => if (!data.metaCommitments.validateSeed(nodeParams.channelKeyManager)) throw InvalidChannelSeedException(data.channelId)) channels case Failure(_) => throw IncompatibleDBException } @@ -41,8 +39,6 @@ object DBChecker extends Logging { /** * Tests if the network database is readable. - * - * @param nodeParams */ def checkNetworkDB(nodeParams: NodeParams): Unit = Try(nodeParams.db.network.listChannels(), nodeParams.db.network.listNodes()) match { diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/channel/Commitments.scala b/eclair-core/src/main/scala/fr/acinq/eclair/channel/Commitments.scala index a00e3ba4a6..b731ab349f 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/channel/Commitments.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/channel/Commitments.scala @@ -16,23 +16,14 @@ package fr.acinq.eclair.channel -import akka.event.LoggingAdapter -import fr.acinq.bitcoin.scalacompat.Crypto.{PrivateKey, PublicKey, sha256} -import fr.acinq.bitcoin.scalacompat.{ByteVector32, ByteVector64, Satoshi, SatoshiLong, Script} -import fr.acinq.eclair.Features.DualFunding +import fr.acinq.bitcoin.scalacompat.Crypto.PublicKey +import fr.acinq.bitcoin.scalacompat.{ByteVector32, ByteVector64, Satoshi} import fr.acinq.eclair._ -import fr.acinq.eclair.blockchain.fee.{FeeratePerKw, OnChainFeeConf} -import fr.acinq.eclair.channel.Helpers.Closing -import fr.acinq.eclair.channel.Monitoring.Metrics -import fr.acinq.eclair.channel.fsm.Channel +import fr.acinq.eclair.crypto.ShaChain import fr.acinq.eclair.crypto.keymanager.ChannelKeyManager -import fr.acinq.eclair.crypto.{Generators, ShaChain} -import fr.acinq.eclair.payment.OutgoingPaymentPacket -import fr.acinq.eclair.transactions.DirectedHtlc._ import fr.acinq.eclair.transactions.Transactions._ import fr.acinq.eclair.transactions._ import fr.acinq.eclair.wire.protocol._ -import scodec.bits.ByteVector // @formatter:off case class LocalChanges(proposed: List[UpdateMessage], signed: List[UpdateMessage], acked: List[UpdateMessage]) { @@ -85,758 +76,45 @@ case class Commitments(channelId: ByteVector32, remoteFundingStatus: RemoteFundingStatus, remotePerCommitmentSecrets: ShaChain) extends AbstractCommitments { - import Commitments._ - - require(channelFeatures.paysDirectlyToWallet == localParams.walletStaticPaymentBasepoint.isDefined, s"localParams.walletStaticPaymentBasepoint must be defined only for commitments that pay directly to our wallet (channel features: $channelFeatures") - require(channelFeatures.hasFeature(DualFunding) == localParams.requestedChannelReserve_opt.isEmpty, "custom local channel reserve is incompatible with dual-funded channels") - require(channelFeatures.hasFeature(DualFunding) == remoteParams.requestedChannelReserve_opt.isEmpty, "custom remote channel reserve is incompatible with dual-funded channels") - def nextRemoteCommit_opt: Option[RemoteCommit] = remoteNextCommitInfo.swap.toOption.map(_.nextRemoteCommit) - /** - * - * @param scriptPubKey optional local script pubkey provided in CMD_CLOSE - * @return the actual local shutdown script that we should use - */ - def getLocalShutdownScript(scriptPubKey: Option[ByteVector]): Either[ChannelException, ByteVector] = { - // to check whether shutdown_any_segwit is active we check features in local and remote parameters, which are negotiated each time we connect to our peer. - val allowAnySegwit = Features.canUseFeature(localParams.initFeatures, remoteParams.initFeatures, Features.ShutdownAnySegwit) - (channelFeatures.hasFeature(Features.UpfrontShutdownScript), scriptPubKey) match { - case (true, Some(script)) if script != localParams.defaultFinalScriptPubKey => Left(InvalidFinalScript(channelId)) - case (false, Some(script)) if !Closing.MutualClose.isValidFinalScriptPubkey(script, allowAnySegwit) => Left(InvalidFinalScript(channelId)) - case (false, Some(script)) => Right(script) - case _ => Right(localParams.defaultFinalScriptPubKey) - } - } - - /** - * - * @param remoteScriptPubKey remote script included in a Shutdown message - * @return the actual remote script that we should use - */ - def getRemoteShutdownScript(remoteScriptPubKey: ByteVector): Either[ChannelException, ByteVector] = { - // to check whether shutdown_any_segwit is active we check features in local and remote parameters, which are negotiated each time we connect to our peer. - val allowAnySegwit = Features.canUseFeature(localParams.initFeatures, remoteParams.initFeatures, Features.ShutdownAnySegwit) - (channelFeatures.hasFeature(Features.UpfrontShutdownScript), remoteParams.shutdownScript) match { - case (false, _) if !Closing.MutualClose.isValidFinalScriptPubkey(remoteScriptPubKey, allowAnySegwit) => Left(InvalidFinalScript(channelId)) - case (false, _) => Right(remoteScriptPubKey) - case (true, None) if !Closing.MutualClose.isValidFinalScriptPubkey(remoteScriptPubKey, allowAnySegwit) => - // this is a special case: they set option_upfront_shutdown_script but did not provide a script in their open/accept message - Left(InvalidFinalScript(channelId)) - case (true, None) => Right(remoteScriptPubKey) - case (true, Some(script)) if script != remoteScriptPubKey => Left(InvalidFinalScript(channelId)) - case (true, Some(script)) => Right(script) - } - } - - def hasNoPendingHtlcs: Boolean = localCommit.spec.htlcs.isEmpty && remoteCommit.spec.htlcs.isEmpty && remoteNextCommitInfo.isRight - - def hasNoPendingHtlcsOrFeeUpdate: Boolean = - remoteNextCommitInfo.isRight && - localCommit.spec.htlcs.isEmpty && - remoteCommit.spec.htlcs.isEmpty && - (localChanges.signed ++ localChanges.acked ++ remoteChanges.signed ++ remoteChanges.acked).collectFirst { case _: UpdateFee => true }.isEmpty - - def hasPendingOrProposedHtlcs: Boolean = !hasNoPendingHtlcs || - localChanges.all.exists(_.isInstanceOf[UpdateAddHtlc]) || - remoteChanges.all.exists(_.isInstanceOf[UpdateAddHtlc]) - - def timedOutOutgoingHtlcs(currentHeight: BlockHeight): Set[UpdateAddHtlc] = { - def expired(add: UpdateAddHtlc): Boolean = currentHeight >= add.cltvExpiry.blockHeight - - localCommit.spec.htlcs.collect(outgoing).filter(expired) ++ - remoteCommit.spec.htlcs.collect(incoming).filter(expired) ++ - remoteNextCommitInfo.left.toSeq.flatMap(_.nextRemoteCommit.spec.htlcs.collect(incoming).filter(expired).toSet) - } - - /** - * Return the outgoing HTLC with the given id if it is: - * - signed by us in their commitment transaction (remote) - * - signed by them in our commitment transaction (local) - * - * NB: if we're in the middle of fulfilling or failing that HTLC, it will not be returned by this function. - */ - def getOutgoingHtlcCrossSigned(htlcId: Long): Option[UpdateAddHtlc] = for { - localSigned <- remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit).getOrElse(remoteCommit).spec.findIncomingHtlcById(htlcId) - remoteSigned <- localCommit.spec.findOutgoingHtlcById(htlcId) - } yield { - require(localSigned.add == remoteSigned.add) - localSigned.add - } - - /** - * Return the incoming HTLC with the given id if it is: - * - signed by us in their commitment transaction (remote) - * - signed by them in our commitment transaction (local) - * - * NB: if we're in the middle of fulfilling or failing that HTLC, it will not be returned by this function. - */ - def getIncomingHtlcCrossSigned(htlcId: Long): Option[UpdateAddHtlc] = for { - localSigned <- remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit).getOrElse(remoteCommit).spec.findOutgoingHtlcById(htlcId) - remoteSigned <- localCommit.spec.findIncomingHtlcById(htlcId) - } yield { - require(localSigned.add == remoteSigned.add) - localSigned.add - } - - /** - * HTLCs that are close to timing out upstream are potentially dangerous. If we received the preimage for those HTLCs, - * we need to get a remote signed updated commitment that removes those HTLCs. - * Otherwise when we get close to the upstream timeout, we risk an on-chain race condition between their HTLC timeout - * and our HTLC success in case of a force-close. - */ - def almostTimedOutIncomingHtlcs(currentHeight: BlockHeight, fulfillSafety: CltvExpiryDelta): Set[UpdateAddHtlc] = { - def nearlyExpired(add: UpdateAddHtlc): Boolean = currentHeight >= (add.cltvExpiry - fulfillSafety).blockHeight - - localCommit.spec.htlcs.collect(incoming).filter(nearlyExpired) - } - - /** - * Return a fully signed commit tx, that can be published as-is. - */ - def fullySignedLocalCommitTx(keyManager: ChannelKeyManager): CommitTx = { - val unsignedCommitTx = localCommit.commitTxAndRemoteSig.commitTx - val localSig = keyManager.sign(unsignedCommitTx, keyManager.fundingPublicKey(localParams.fundingKeyPath), TxOwner.Local, commitmentFormat) - val remoteSig = localCommit.commitTxAndRemoteSig.remoteSig - val commitTx = Transactions.addSigs(unsignedCommitTx, keyManager.fundingPublicKey(localParams.fundingKeyPath).publicKey, remoteParams.fundingPubKey, localSig, remoteSig) - // We verify the remote signature when receiving their commit_sig, so this check should always pass. - require(Transactions.checkSpendable(commitTx).isSuccess, "commit signatures are invalid") - commitTx - } - - val commitInput: InputInfo = localCommit.commitTxAndRemoteSig.commitTx.input - - val fundingTxId: ByteVector32 = commitInput.outPoint.txid - - val commitmentFormat: CommitmentFormat = channelFeatures.commitmentFormat - - val channelType: SupportedChannelType = channelFeatures.channelType - - val localNodeId: PublicKey = localParams.nodeId - - val remoteNodeId: PublicKey = remoteParams.nodeId - - val announceChannel: Boolean = channelFlags.announceChannel - - val capacity: Satoshi = commitInput.txOut.amount - - // We can safely cast to millisatoshis since we verify that it's less than a valid millisatoshi amount. - val maxHtlcAmount: MilliSatoshi = remoteParams.maxHtlcValueInFlightMsat.toBigInt.min(localParams.maxHtlcValueInFlightMsat.toLong).toLong.msat - - /** Channel reserve that applies to our funds. */ - val localChannelReserve: Satoshi = if (channelFeatures.hasFeature(Features.DualFunding)) { - (capacity / 100).max(remoteParams.dustLimit) - } else { - remoteParams.requestedChannelReserve_opt.get // this is guarded by a require() in Commitments - } - - /** Channel reserve that applies to our peer's funds. */ - val remoteChannelReserve: Satoshi = if (channelFeatures.hasFeature(Features.DualFunding)) { - (capacity / 100).max(localParams.dustLimit) - } else { - localParams.requestedChannelReserve_opt.get // this is guarded by a require() in Commitments - } - - // NB: when computing availableBalanceForSend and availableBalanceForReceive, the initiator keeps an extra buffer on - // top of its usual channel reserve to avoid getting channels stuck in case the on-chain feerate increases (see - // https://github.com/lightningnetwork/lightning-rfc/issues/728 for details). - // - // This extra buffer (which we call "funder fee buffer") is calculated as follows: - // 1) Simulate a x2 feerate increase and compute the corresponding commit tx fee (note that it may trim some HTLCs) - // 2) Add the cost of adding a new untrimmed HTLC at that increased feerate. This ensures that we'll be able to - // actually use the channel to add new HTLCs if the feerate doubles. - // - // If for example the current feerate is 1000 sat/kw, the dust limit 546 sat, and we have 3 pending outgoing HTLCs for - // respectively 1250 sat, 2000 sat and 2500 sat. - // commit tx fee = commitWeight * feerate + 3 * htlcOutputWeight * feerate = 724 * 1000 + 3 * 172 * 1000 = 1240 sat - // To calculate the funder fee buffer, we first double the feerate and calculate the corresponding commit tx fee. - // By doubling the feerate, the first HTLC becomes trimmed so the result is: 724 * 2000 + 2 * 172 * 2000 = 2136 sat - // We then add the additional fee for a potential new untrimmed HTLC: 172 * 2000 = 344 sat - // The funder fee buffer is 2136 + 344 = 2480 sat - // - // If there are many pending HTLCs that are only slightly above the trim threshold, the funder fee buffer may be - // smaller than the current commit tx fee because those HTLCs will be trimmed and the commit tx weight will decrease. - // For example if we have 10 outgoing HTLCs of 1250 sat: - // - commit tx fee = 724 * 1000 + 10 * 172 * 1000 = 2444 sat - // - commit tx fee at twice the feerate = 724 * 2000 = 1448 sat (all HTLCs have been trimmed) - // - cost of an additional untrimmed HTLC = 172 * 2000 = 344 sat - // - funder fee buffer = 1448 + 344 = 1792 sat - // In that case the current commit tx fee is higher than the funder fee buffer and will dominate the balance restrictions. - - lazy val availableBalanceForSend: MilliSatoshi = { - // we need to base the next current commitment on the last sig we sent, even if we didn't yet receive their revocation - val remoteCommit1 = remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit).getOrElse(remoteCommit) - val reduced = CommitmentSpec.reduce(remoteCommit1.spec, remoteChanges.acked, localChanges.proposed) - val balanceNoFees = (reduced.toRemote - localChannelReserve).max(0 msat) - if (localParams.isInitiator) { - // The initiator always pays the on-chain fees, so we must subtract that from the amount we can send. - val commitFees = commitTxTotalCostMsat(remoteParams.dustLimit, reduced, commitmentFormat) - // the initiator needs to keep a "funder fee buffer" (see explanation above) - val funderFeeBuffer = commitTxTotalCostMsat(remoteParams.dustLimit, reduced.copy(commitTxFeerate = reduced.commitTxFeerate * 2), commitmentFormat) + htlcOutputFee(reduced.commitTxFeerate * 2, commitmentFormat) - val amountToReserve = commitFees.max(funderFeeBuffer) - if (balanceNoFees - amountToReserve < offeredHtlcTrimThreshold(remoteParams.dustLimit, reduced, commitmentFormat)) { - // htlc will be trimmed - (balanceNoFees - amountToReserve).max(0 msat) - } else { - // htlc will have an output in the commitment tx, so there will be additional fees. - val commitFees1 = commitFees + htlcOutputFee(reduced.commitTxFeerate, commitmentFormat) - // we take the additional fees for that htlc output into account in the fee buffer at a x2 feerate increase - val funderFeeBuffer1 = funderFeeBuffer + htlcOutputFee(reduced.commitTxFeerate * 2, commitmentFormat) - val amountToReserve1 = commitFees1.max(funderFeeBuffer1) - (balanceNoFees - amountToReserve1).max(0 msat) - } - } else { - // The non-initiator doesn't pay on-chain fees. - balanceNoFees - } - } - - lazy val availableBalanceForReceive: MilliSatoshi = { - val reduced = CommitmentSpec.reduce(localCommit.spec, localChanges.acked, remoteChanges.proposed) - val balanceNoFees = (reduced.toRemote - remoteChannelReserve).max(0 msat) - if (localParams.isInitiator) { - // The non-initiator doesn't pay on-chain fees so we don't take those into account when receiving. - balanceNoFees - } else { - // The initiator always pays the on-chain fees, so we must subtract that from the amount we can receive. - val commitFees = commitTxTotalCostMsat(localParams.dustLimit, reduced, commitmentFormat) - // we expected the initiator to keep a "funder fee buffer" (see explanation above) - val funderFeeBuffer = commitTxTotalCostMsat(localParams.dustLimit, reduced.copy(commitTxFeerate = reduced.commitTxFeerate * 2), commitmentFormat) + htlcOutputFee(reduced.commitTxFeerate * 2, commitmentFormat) - val amountToReserve = commitFees.max(funderFeeBuffer) - if (balanceNoFees - amountToReserve < receivedHtlcTrimThreshold(localParams.dustLimit, reduced, commitmentFormat)) { - // htlc will be trimmed - (balanceNoFees - amountToReserve).max(0 msat) - } else { - // htlc will have an output in the commitment tx, so there will be additional fees. - val commitFees1 = commitFees + htlcOutputFee(reduced.commitTxFeerate, commitmentFormat) - // we take the additional fees for that htlc output into account in the fee buffer at a x2 feerate increase - val funderFeeBuffer1 = funderFeeBuffer + htlcOutputFee(reduced.commitTxFeerate * 2, commitmentFormat) - val amountToReserve1 = commitFees1.max(funderFeeBuffer1) - (balanceNoFees - amountToReserve1).max(0 msat) - } - } - } - - /** - * Add a change to our proposed change list. - * - * @param proposal proposed change to add. - * @return an updated commitment instance. - */ - private def addLocalProposal(proposal: UpdateMessage): Commitments = - copy(localChanges = localChanges.copy(proposed = localChanges.proposed :+ proposal)) - - private def addRemoteProposal(proposal: UpdateMessage): Commitments = - copy(remoteChanges = remoteChanges.copy(proposed = remoteChanges.proposed :+ proposal)) - - /** - * @param cmd add HTLC command - * @return either Left(failure, error message) where failure is a failure message (see BOLT #4 and the Failure Message class) or Right(new commitments, updateAddHtlc) - */ - def sendAdd(cmd: CMD_ADD_HTLC, currentHeight: BlockHeight, feeConf: OnChainFeeConf): Either[ChannelException, (Commitments, UpdateAddHtlc)] = { - // we must ensure we're not relaying htlcs that are already expired, otherwise the downstream channel will instantly close - // NB: we add a 3 blocks safety to reduce the probability of running into this when our bitcoin node is slightly outdated - val minExpiry = CltvExpiry(currentHeight + 3) - if (cmd.cltvExpiry < minExpiry) { - return Left(ExpiryTooSmall(channelId, minimum = minExpiry, actual = cmd.cltvExpiry, blockHeight = currentHeight)) - } - // we don't want to use too high a refund timeout, because our funds will be locked during that time if the payment is never fulfilled - val maxExpiry = Channel.MAX_CLTV_EXPIRY_DELTA.toCltvExpiry(currentHeight) - if (cmd.cltvExpiry >= maxExpiry) { - return Left(ExpiryTooBig(channelId, maximum = maxExpiry, actual = cmd.cltvExpiry, blockHeight = currentHeight)) - } - - // even if remote advertises support for 0 msat htlc, we limit ourselves to values strictly positive, hence the max(1 msat) - val htlcMinimum = remoteParams.htlcMinimum.max(1 msat) - if (cmd.amount < htlcMinimum) { - return Left(HtlcValueTooSmall(channelId, minimum = htlcMinimum, actual = cmd.amount)) - } - - // we allowed mismatches between our feerates and our remote's as long as commitments didn't contain any HTLC at risk - // we need to verify that we're not disagreeing on feerates anymore before offering new HTLCs - // NB: there may be a pending update_fee that hasn't been applied yet that needs to be taken into account - val localFeeratePerKw = feeConf.getCommitmentFeerate(remoteNodeId, channelType, capacity, None) - val remoteFeeratePerKw = localCommit.spec.commitTxFeerate +: remoteChanges.all.collect { case f: UpdateFee => f.feeratePerKw } - remoteFeeratePerKw.find(feerate => feeConf.feerateToleranceFor(remoteNodeId).isFeeDiffTooHigh(channelType, localFeeratePerKw, feerate)) match { - case Some(feerate) => return Left(FeerateTooDifferent(channelId, localFeeratePerKw = localFeeratePerKw, remoteFeeratePerKw = feerate)) - case None => - } - - // let's compute the current commitment *as seen by them* with this change taken into account - val add = UpdateAddHtlc(channelId, localNextHtlcId, cmd.amount, cmd.paymentHash, cmd.cltvExpiry, cmd.onion, cmd.nextBlindingKey_opt) - // we increment the local htlc index and add an entry to the origins map - val commitments1 = addLocalProposal(add).copy(localNextHtlcId = localNextHtlcId + 1, originChannels = originChannels + (add.id -> cmd.origin)) - // we need to base the next current commitment on the last sig we sent, even if we didn't yet receive their revocation - val remoteCommit1 = commitments1.remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit).getOrElse(commitments1.remoteCommit) - val reduced = CommitmentSpec.reduce(remoteCommit1.spec, commitments1.remoteChanges.acked, commitments1.localChanges.proposed) - // the HTLC we are about to create is outgoing, but from their point of view it is incoming - val outgoingHtlcs = reduced.htlcs.collect(incoming) - - // note that the initiator pays the fee, so if sender != initiator, both sides will have to afford this payment - val fees = commitTxTotalCost(commitments1.remoteParams.dustLimit, reduced, commitmentFormat) - // the initiator needs to keep an extra buffer to be able to handle a x2 feerate increase and an additional htlc to avoid - // getting the channel stuck (see https://github.com/lightningnetwork/lightning-rfc/issues/728). - val funderFeeBuffer = commitTxTotalCostMsat(commitments1.remoteParams.dustLimit, reduced.copy(commitTxFeerate = reduced.commitTxFeerate * 2), commitmentFormat) + htlcOutputFee(reduced.commitTxFeerate * 2, commitmentFormat) - // NB: increasing the feerate can actually remove htlcs from the commit tx (if they fall below the trim threshold) - // which may result in a lower commit tx fee; this is why we take the max of the two. - val missingForSender = reduced.toRemote - commitments1.localChannelReserve - (if (commitments1.localParams.isInitiator) fees.max(funderFeeBuffer.truncateToSatoshi) else 0.sat) - val missingForReceiver = reduced.toLocal - commitments1.remoteChannelReserve - (if (commitments1.localParams.isInitiator) 0.sat else fees) - if (missingForSender < 0.msat) { - return Left(InsufficientFunds(channelId, amount = cmd.amount, missing = -missingForSender.truncateToSatoshi, reserve = commitments1.localChannelReserve, fees = if (commitments1.localParams.isInitiator) fees else 0.sat)) - } else if (missingForReceiver < 0.msat) { - if (localParams.isInitiator) { - // receiver is not the channel initiator; it is ok if it can't maintain its channel_reserve for now, as long as its balance is increasing, which is the case if it is receiving a payment - } else { - return Left(RemoteCannotAffordFeesForNewHtlc(channelId, amount = cmd.amount, missing = -missingForReceiver.truncateToSatoshi, reserve = commitments1.remoteChannelReserve, fees = fees)) - } - } - - // We apply local *and* remote restrictions, to ensure both peers are happy with the resulting number of HTLCs. - // NB: we need the `toSeq` because otherwise duplicate amountMsat would be removed (since outgoingHtlcs is a Set). - val htlcValueInFlight = outgoingHtlcs.toSeq.map(_.amountMsat).sum - val allowedHtlcValueInFlight = commitments1.maxHtlcAmount - if (allowedHtlcValueInFlight < htlcValueInFlight) { - return Left(HtlcValueTooHighInFlight(channelId, maximum = allowedHtlcValueInFlight, actual = htlcValueInFlight)) - } - if (Seq(commitments1.localParams.maxAcceptedHtlcs, commitments1.remoteParams.maxAcceptedHtlcs).min < outgoingHtlcs.size) { - return Left(TooManyAcceptedHtlcs(channelId, maximum = Seq(commitments1.localParams.maxAcceptedHtlcs, commitments1.remoteParams.maxAcceptedHtlcs).min)) - } - - // If sending this htlc would overflow our dust exposure, we reject it. - val maxDustExposure = feeConf.feerateToleranceFor(remoteNodeId).dustTolerance.maxExposure - val localReduced = DustExposure.reduceForDustExposure(localCommit.spec, commitments1.localChanges.all, remoteChanges.all) - val localDustExposureAfterAdd = DustExposure.computeExposure(localReduced, localParams.dustLimit, commitmentFormat) - if (localDustExposureAfterAdd > maxDustExposure) { - return Left(LocalDustHtlcExposureTooHigh(channelId, maxDustExposure, localDustExposureAfterAdd)) - } - val remoteReduced = DustExposure.reduceForDustExposure(remoteCommit1.spec, remoteChanges.all, commitments1.localChanges.all) - val remoteDustExposureAfterAdd = DustExposure.computeExposure(remoteReduced, remoteParams.dustLimit, commitmentFormat) - if (remoteDustExposureAfterAdd > maxDustExposure) { - return Left(RemoteDustHtlcExposureTooHigh(channelId, maxDustExposure, remoteDustExposureAfterAdd)) - } - - Right(commitments1, add) - } - - def receiveAdd(add: UpdateAddHtlc, feeConf: OnChainFeeConf): Either[ChannelException, Commitments] = { - if (add.id != remoteNextHtlcId) { - return Left(UnexpectedHtlcId(channelId, expected = remoteNextHtlcId, actual = add.id)) - } - - // we used to not enforce a strictly positive minimum, hence the max(1 msat) - val htlcMinimum = localParams.htlcMinimum.max(1 msat) - if (add.amountMsat < htlcMinimum) { - return Left(HtlcValueTooSmall(channelId, minimum = htlcMinimum, actual = add.amountMsat)) - } - - // we allowed mismatches between our feerates and our remote's as long as commitments didn't contain any HTLC at risk - // we need to verify that we're not disagreeing on feerates anymore before accepting new HTLCs - // NB: there may be a pending update_fee that hasn't been applied yet that needs to be taken into account - val localFeeratePerKw = feeConf.getCommitmentFeerate(remoteNodeId, channelType, capacity, None) - val remoteFeeratePerKw = localCommit.spec.commitTxFeerate +: remoteChanges.all.collect { case f: UpdateFee => f.feeratePerKw } - remoteFeeratePerKw.find(feerate => feeConf.feerateToleranceFor(remoteNodeId).isFeeDiffTooHigh(channelType, localFeeratePerKw, feerate)) match { - case Some(feerate) => return Left(FeerateTooDifferent(channelId, localFeeratePerKw = localFeeratePerKw, remoteFeeratePerKw = feerate)) - case None => - } - - // let's compute the current commitment *as seen by us* including this change - val commitments1 = addRemoteProposal(add).copy(remoteNextHtlcId = remoteNextHtlcId + 1) - val reduced = CommitmentSpec.reduce(commitments1.localCommit.spec, commitments1.localChanges.acked, commitments1.remoteChanges.proposed) - val incomingHtlcs = reduced.htlcs.collect(incoming) - - // note that the initiator pays the fee, so if sender != initiator, both sides will have to afford this payment - val fees = commitTxTotalCost(commitments1.remoteParams.dustLimit, reduced, commitmentFormat) - // NB: we don't enforce the funderFeeReserve (see sendAdd) because it would confuse a remote initiator that doesn't have this mitigation in place - // We could enforce it once we're confident a large portion of the network implements it. - val missingForSender = reduced.toRemote - commitments1.remoteChannelReserve - (if (commitments1.localParams.isInitiator) 0.sat else fees) - val missingForReceiver = reduced.toLocal - commitments1.localChannelReserve - (if (commitments1.localParams.isInitiator) fees else 0.sat) - if (missingForSender < 0.sat) { - return Left(InsufficientFunds(channelId, amount = add.amountMsat, missing = -missingForSender.truncateToSatoshi, reserve = commitments1.remoteChannelReserve, fees = if (commitments1.localParams.isInitiator) 0.sat else fees)) - } else if (missingForReceiver < 0.sat) { - if (localParams.isInitiator) { - return Left(CannotAffordFees(channelId, missing = -missingForReceiver.truncateToSatoshi, reserve = commitments1.localChannelReserve, fees = fees)) - } else { - // receiver is not the channel initiator; it is ok if it can't maintain its channel_reserve for now, as long as its balance is increasing, which is the case if it is receiving a payment - } - } - - // NB: we need the `toSeq` because otherwise duplicate amountMsat would be removed (since incomingHtlcs is a Set). - val htlcValueInFlight = incomingHtlcs.toSeq.map(_.amountMsat).sum - if (commitments1.localParams.maxHtlcValueInFlightMsat < htlcValueInFlight) { - return Left(HtlcValueTooHighInFlight(channelId, maximum = commitments1.localParams.maxHtlcValueInFlightMsat, actual = htlcValueInFlight)) - } - - if (incomingHtlcs.size > commitments1.localParams.maxAcceptedHtlcs) { - return Left(TooManyAcceptedHtlcs(channelId, maximum = commitments1.localParams.maxAcceptedHtlcs)) - } - - Right(commitments1) - } - - def sendFulfill(cmd: CMD_FULFILL_HTLC): Either[ChannelException, (Commitments, UpdateFulfillHtlc)] = - getIncomingHtlcCrossSigned(cmd.id) match { - case Some(htlc) if alreadyProposed(localChanges.proposed, htlc.id) => - // we have already sent a fail/fulfill for this htlc - Left(UnknownHtlcId(channelId, cmd.id)) - case Some(htlc) if htlc.paymentHash == sha256(cmd.r) => - val fulfill = UpdateFulfillHtlc(channelId, cmd.id, cmd.r) - val commitments1 = addLocalProposal(fulfill) - payment.Monitoring.Metrics.recordIncomingPaymentDistribution(remoteParams.nodeId, htlc.amountMsat) - Right((commitments1, fulfill)) - case Some(_) => Left(InvalidHtlcPreimage(channelId, cmd.id)) - case None => Left(UnknownHtlcId(channelId, cmd.id)) - } - - def receiveFulfill(fulfill: UpdateFulfillHtlc): Either[ChannelException, (Commitments, Origin, UpdateAddHtlc)] = - getOutgoingHtlcCrossSigned(fulfill.id) match { - case Some(htlc) if htlc.paymentHash == sha256(fulfill.paymentPreimage) => originChannels.get(fulfill.id) match { - case Some(origin) => - payment.Monitoring.Metrics.recordOutgoingPaymentDistribution(remoteParams.nodeId, htlc.amountMsat) - Right(addRemoteProposal(fulfill), origin, htlc) - case None => Left(UnknownHtlcId(channelId, fulfill.id)) - } - case Some(_) => Left(InvalidHtlcPreimage(channelId, fulfill.id)) - case None => Left(UnknownHtlcId(channelId, fulfill.id)) - } - - def sendFail(cmd: CMD_FAIL_HTLC, nodeSecret: PrivateKey): Either[ChannelException, (Commitments, HtlcFailureMessage)] = - getIncomingHtlcCrossSigned(cmd.id) match { - case Some(htlc) if alreadyProposed(localChanges.proposed, htlc.id) => - // we have already sent a fail/fulfill for this htlc - Left(UnknownHtlcId(channelId, cmd.id)) - case Some(htlc) => - // we need the shared secret to build the error packet - OutgoingPaymentPacket.buildHtlcFailure(nodeSecret, cmd, htlc).map(fail => (addLocalProposal(fail), fail)) - case None => Left(UnknownHtlcId(channelId, cmd.id)) - } - - def sendFailMalformed(cmd: CMD_FAIL_MALFORMED_HTLC): Either[ChannelException, (Commitments, UpdateFailMalformedHtlc)] = { - // BADONION bit must be set in failure_code - if ((cmd.failureCode & FailureMessageCodecs.BADONION) == 0) { - Left(InvalidFailureCode(channelId)) - } else { - getIncomingHtlcCrossSigned(cmd.id) match { - case Some(htlc) if alreadyProposed(localChanges.proposed, htlc.id) => - // we have already sent a fail/fulfill for this htlc - Left(UnknownHtlcId(channelId, cmd.id)) - case Some(_) => - val fail = UpdateFailMalformedHtlc(channelId, cmd.id, cmd.onionHash, cmd.failureCode) - val commitments1 = addLocalProposal(fail) - Right((commitments1, fail)) - case None => Left(UnknownHtlcId(channelId, cmd.id)) - } - } - } - - def receiveFail(fail: UpdateFailHtlc): Either[ChannelException, (Commitments, Origin, UpdateAddHtlc)] = - getOutgoingHtlcCrossSigned(fail.id) match { - case Some(htlc) => originChannels.get(fail.id) match { - case Some(origin) => Right(addRemoteProposal(fail), origin, htlc) - case None => Left(UnknownHtlcId(channelId, fail.id)) - } - case None => Left(UnknownHtlcId(channelId, fail.id)) - } - - def receiveFailMalformed(fail: UpdateFailMalformedHtlc): Either[ChannelException, (Commitments, Origin, UpdateAddHtlc)] = { - // A receiving node MUST fail the channel if the BADONION bit in failure_code is not set for update_fail_malformed_htlc. - if ((fail.failureCode & FailureMessageCodecs.BADONION) == 0) { - Left(InvalidFailureCode(channelId)) - } else { - getOutgoingHtlcCrossSigned(fail.id) match { - case Some(htlc) => originChannels.get(fail.id) match { - case Some(origin) => Right(addRemoteProposal(fail), origin, htlc) - case None => Left(UnknownHtlcId(channelId, fail.id)) - } - case None => Left(UnknownHtlcId(channelId, fail.id)) - } - } - } - - def sendFee(cmd: CMD_UPDATE_FEE, feeConf: OnChainFeeConf): Either[ChannelException, (Commitments, UpdateFee)] = { - if (!localParams.isInitiator) { - Left(NonInitiatorCannotSendUpdateFee(channelId)) - } else { - // let's compute the current commitment *as seen by them* with this change taken into account - val fee = UpdateFee(channelId, cmd.feeratePerKw) - // update_fee replace each other, so we can remove previous ones - val commitments1 = copy(localChanges = localChanges.copy(proposed = localChanges.proposed.filterNot(_.isInstanceOf[UpdateFee]) :+ fee)) - val reduced = CommitmentSpec.reduce(commitments1.remoteCommit.spec, commitments1.remoteChanges.acked, commitments1.localChanges.proposed) - - // a node cannot spend pending incoming htlcs, and need to keep funds above the reserve required by the counterparty, after paying the fee - // we look from remote's point of view, so if local is initiator remote doesn't pay the fees - val fees = commitTxTotalCost(commitments1.remoteParams.dustLimit, reduced, commitmentFormat) - val missing = reduced.toRemote.truncateToSatoshi - commitments1.localChannelReserve - fees - if (missing < 0.sat) { - return Left(CannotAffordFees(channelId, missing = -missing, reserve = commitments1.localChannelReserve, fees = fees)) - } - - // if we would overflow our dust exposure with the new feerate, we avoid sending this fee update - if (feeConf.feerateToleranceFor(remoteNodeId).dustTolerance.closeOnUpdateFeeOverflow) { - val maxDustExposure = feeConf.feerateToleranceFor(remoteNodeId).dustTolerance.maxExposure - // this is the commitment as it would be if our update_fee was immediately signed by both parties (it is only an - // estimate because there can be concurrent updates) - val localReduced = DustExposure.reduceForDustExposure(localCommit.spec, commitments1.localChanges.all, remoteChanges.all) - val localDustExposureAfterFeeUpdate = DustExposure.computeExposure(localReduced, cmd.feeratePerKw, localParams.dustLimit, commitmentFormat) - if (localDustExposureAfterFeeUpdate > maxDustExposure) { - return Left(LocalDustHtlcExposureTooHigh(channelId, maxDustExposure, localDustExposureAfterFeeUpdate)) - } - val remoteReduced = DustExposure.reduceForDustExposure(remoteCommit.spec, remoteChanges.all, commitments1.localChanges.all) - val remoteDustExposureAfterFeeUpdate = DustExposure.computeExposure(remoteReduced, cmd.feeratePerKw, remoteParams.dustLimit, commitmentFormat) - if (remoteDustExposureAfterFeeUpdate > maxDustExposure) { - return Left(RemoteDustHtlcExposureTooHigh(channelId, maxDustExposure, remoteDustExposureAfterFeeUpdate)) - } - } - - Right(commitments1, fee) - } - } - - def receiveFee(fee: UpdateFee, feeConf: OnChainFeeConf)(implicit log: LoggingAdapter): Either[ChannelException, Commitments] = { - if (localParams.isInitiator) { - Left(NonInitiatorCannotSendUpdateFee(channelId)) - } else if (fee.feeratePerKw < FeeratePerKw.MinimumFeeratePerKw) { - Left(FeerateTooSmall(channelId, remoteFeeratePerKw = fee.feeratePerKw)) - } else { - Metrics.RemoteFeeratePerKw.withoutTags().record(fee.feeratePerKw.toLong) - val localFeeratePerKw = feeConf.getCommitmentFeerate(remoteNodeId, channelType, capacity, None) - log.info("remote feeratePerKw={}, local feeratePerKw={}, ratio={}", fee.feeratePerKw, localFeeratePerKw, fee.feeratePerKw.toLong.toDouble / localFeeratePerKw.toLong) - if (feeConf.feerateToleranceFor(remoteNodeId).isFeeDiffTooHigh(channelType, localFeeratePerKw, fee.feeratePerKw) && hasPendingOrProposedHtlcs) { - Left(FeerateTooDifferent(channelId, localFeeratePerKw = localFeeratePerKw, remoteFeeratePerKw = fee.feeratePerKw)) - } else { - // NB: we check that the initiator can afford this new fee even if spec allows to do it at next signature - // It is easier to do it here because under certain (race) conditions spec allows a lower-than-normal fee to be paid, - // and it would be tricky to check if the conditions are met at signing - // (it also means that we need to check the fee of the initial commitment tx somewhere) - - // let's compute the current commitment *as seen by us* including this change - // update_fee replace each other, so we can remove previous ones - val commitments1 = copy(remoteChanges = remoteChanges.copy(proposed = remoteChanges.proposed.filterNot(_.isInstanceOf[UpdateFee]) :+ fee)) - val reduced = CommitmentSpec.reduce(commitments1.localCommit.spec, commitments1.localChanges.acked, commitments1.remoteChanges.proposed) - - // a node cannot spend pending incoming htlcs, and need to keep funds above the reserve required by the counterparty, after paying the fee - val fees = commitTxTotalCost(commitments1.localParams.dustLimit, reduced, commitmentFormat) - val missing = reduced.toRemote.truncateToSatoshi - commitments1.remoteChannelReserve - fees - if (missing < 0.sat) { - return Left(CannotAffordFees(channelId, missing = -missing, reserve = commitments1.remoteChannelReserve, fees = fees)) - } - - // if we would overflow our dust exposure with the new feerate, we reject this fee update - if (feeConf.feerateToleranceFor(remoteNodeId).dustTolerance.closeOnUpdateFeeOverflow) { - val maxDustExposure = feeConf.feerateToleranceFor(remoteNodeId).dustTolerance.maxExposure - val localReduced = DustExposure.reduceForDustExposure(localCommit.spec, localChanges.all, commitments1.remoteChanges.all) - val localDustExposureAfterFeeUpdate = DustExposure.computeExposure(localReduced, fee.feeratePerKw, localParams.dustLimit, commitmentFormat) - if (localDustExposureAfterFeeUpdate > maxDustExposure) { - return Left(LocalDustHtlcExposureTooHigh(channelId, maxDustExposure, localDustExposureAfterFeeUpdate)) - } - // this is the commitment as it would be if their update_fee was immediately signed by both parties (it is only an - // estimate because there can be concurrent updates) - val remoteReduced = DustExposure.reduceForDustExposure(remoteCommit.spec, commitments1.remoteChanges.all, localChanges.all) - val remoteDustExposureAfterFeeUpdate = DustExposure.computeExposure(remoteReduced, fee.feeratePerKw, remoteParams.dustLimit, commitmentFormat) - if (remoteDustExposureAfterFeeUpdate > maxDustExposure) { - return Left(RemoteDustHtlcExposureTooHigh(channelId, maxDustExposure, remoteDustExposureAfterFeeUpdate)) - } - } - - Right(commitments1) - } - } - } - - def localHasUnsignedOutgoingHtlcs: Boolean = localChanges.proposed.collectFirst { case u: UpdateAddHtlc => u }.isDefined - - def remoteHasUnsignedOutgoingHtlcs: Boolean = remoteChanges.proposed.collectFirst { case u: UpdateAddHtlc => u }.isDefined - - def localHasUnsignedOutgoingUpdateFee: Boolean = localChanges.proposed.collectFirst { case u: UpdateFee => u }.isDefined + def params: Params = Params(channelId, channelConfig, channelFeatures, localParams, remoteParams, channelFlags) - def remoteHasUnsignedOutgoingUpdateFee: Boolean = remoteChanges.proposed.collectFirst { case u: UpdateFee => u }.isDefined + def common: Common = Common(localChanges, remoteChanges, localNextHtlcId, remoteNextHtlcId, localCommit.index, remoteCommit.index, originChannels, remoteNextCommitInfo.swap.map(waitingForRevocation => WaitForRev(waitingForRevocation.sent, waitingForRevocation.sentAfterLocalCommitIndex)).swap, remotePerCommitmentSecrets) - def localHasChanges: Boolean = remoteChanges.acked.nonEmpty || localChanges.proposed.nonEmpty + def commitment: Commitment = Commitment(localFundingStatus, remoteFundingStatus, localCommit, remoteCommit, remoteNextCommitInfo.swap.map(_.nextRemoteCommit).toOption) - def remoteHasChanges: Boolean = localChanges.acked.nonEmpty || remoteChanges.proposed.nonEmpty + def commitInput: InputInfo = commitment.commitInput - def sendCommit(keyManager: ChannelKeyManager)(implicit log: LoggingAdapter): Either[ChannelException, (Commitments, CommitSig)] = { - remoteNextCommitInfo match { - case Right(_) if !localHasChanges => - Left(CannotSignWithoutChanges(channelId)) - case Right(remoteNextPerCommitmentPoint) => - // remote commitment will includes all local changes + remote acked changes - val spec = CommitmentSpec.reduce(remoteCommit.spec, remoteChanges.acked, localChanges.proposed) - val (remoteCommitTx, htlcTxs) = makeRemoteTxs(keyManager, channelConfig, channelFeatures, remoteCommit.index + 1, localParams, remoteParams, commitInput, remoteNextPerCommitmentPoint, spec) - val sig = keyManager.sign(remoteCommitTx, keyManager.fundingPublicKey(localParams.fundingKeyPath), TxOwner.Remote, commitmentFormat) + def fundingTxId: ByteVector32 = commitment.fundingTxId - val sortedHtlcTxs: Seq[TransactionWithInputInfo] = htlcTxs.sortBy(_.input.outPoint.index) - val channelKeyPath = keyManager.keyPath(localParams, channelConfig) - val htlcSigs = sortedHtlcTxs.map(keyManager.sign(_, keyManager.htlcPoint(channelKeyPath), remoteNextPerCommitmentPoint, TxOwner.Remote, commitmentFormat)) + def commitmentFormat: CommitmentFormat = params.commitmentFormat - // NB: IN/OUT htlcs are inverted because this is the remote commit - log.info(s"built remote commit number=${remoteCommit.index + 1} toLocalMsat=${spec.toLocal.toLong} toRemoteMsat=${spec.toRemote.toLong} htlc_in={} htlc_out={} feeratePerKw=${spec.commitTxFeerate} txid=${remoteCommitTx.tx.txid} tx={}", spec.htlcs.collect(outgoing).map(_.id).mkString(","), spec.htlcs.collect(incoming).map(_.id).mkString(","), remoteCommitTx.tx) - Metrics.recordHtlcsInFlight(spec, remoteCommit.spec) + def channelType: SupportedChannelType = params.channelType - val commitSig = CommitSig( - channelId = channelId, - signature = sig, - htlcSignatures = htlcSigs.toList) - val commitments1 = copy( - remoteNextCommitInfo = Left(WaitingForRevocation(RemoteCommit(remoteCommit.index + 1, spec, remoteCommitTx.tx.txid, remoteNextPerCommitmentPoint), commitSig, localCommit.index)), - localChanges = localChanges.copy(proposed = Nil, signed = localChanges.proposed), - remoteChanges = remoteChanges.copy(acked = Nil, signed = remoteChanges.acked)) - Right(commitments1, commitSig) - case Left(_) => - Left(CannotSignBeforeRevocation(channelId)) - } - } + def localNodeId: PublicKey = params.localNodeId - def receiveCommit(commit: CommitSig, keyManager: ChannelKeyManager)(implicit log: LoggingAdapter): Either[ChannelException, (Commitments, RevokeAndAck)] = { - // they sent us a signature for *their* view of *our* next commit tx - // so in terms of rev.hashes and indexes we have: - // ourCommit.index -> our current revocation hash, which is about to become our old revocation hash - // ourCommit.index + 1 -> our next revocation hash, used by *them* to build the sig we've just received, and which - // is about to become our current revocation hash - // ourCommit.index + 2 -> which is about to become our next revocation hash - // we will reply to this sig with our old revocation hash preimage (at index) and our next revocation hash (at index + 1) - // and will increment our index + def remoteNodeId: PublicKey = params.remoteNodeId - // lnd sometimes sends a new signature without any changes, which is a (harmless) spec violation - if (!remoteHasChanges) { - // throw CannotSignWithoutChanges(channelId) - log.warning("received a commit sig with no changes (probably coming from lnd)") - } + def announceChannel: Boolean = params.announceChannel - val spec = CommitmentSpec.reduce(localCommit.spec, localChanges.acked, remoteChanges.proposed) - val channelKeyPath = keyManager.keyPath(localParams, channelConfig) - val localPerCommitmentPoint = keyManager.commitmentPoint(channelKeyPath, localCommit.index + 1) - val (localCommitTx, htlcTxs) = makeLocalTxs(keyManager, channelConfig, channelFeatures, localCommit.index + 1, localParams, remoteParams, commitInput, localPerCommitmentPoint, spec) + def capacity: Satoshi = commitment.capacity - log.info(s"built local commit number=${localCommit.index + 1} toLocalMsat=${spec.toLocal.toLong} toRemoteMsat=${spec.toRemote.toLong} htlc_in={} htlc_out={} feeratePerKw=${spec.commitTxFeerate} txid=${localCommitTx.tx.txid} tx={}", spec.htlcs.collect(incoming).map(_.id).mkString(","), spec.htlcs.collect(outgoing).map(_.id).mkString(","), localCommitTx.tx) + def maxHtlcAmount: MilliSatoshi = params.maxHtlcAmount - if (!Transactions.checkSig(localCommitTx, commit.signature, remoteParams.fundingPubKey, TxOwner.Remote, commitmentFormat)) { - return Left(InvalidCommitmentSignature(channelId, localCommitTx.tx.txid)) - } + def localChannelReserve: Satoshi = commitment.localChannelReserve(params) - val sortedHtlcTxs: Seq[HtlcTx] = htlcTxs.sortBy(_.input.outPoint.index) - if (commit.htlcSignatures.size != sortedHtlcTxs.size) { - return Left(HtlcSigCountMismatch(channelId, sortedHtlcTxs.size, commit.htlcSignatures.size)) - } + def remoteChannelReserve: Satoshi = commitment.remoteChannelReserve(params) - val remoteHtlcPubkey = Generators.derivePubKey(remoteParams.htlcBasepoint, localPerCommitmentPoint) - val htlcTxsAndRemoteSigs = sortedHtlcTxs.zip(commit.htlcSignatures).toList.map { - case (htlcTx: HtlcTx, remoteSig) => - if (!Transactions.checkSig(htlcTx, remoteSig, remoteHtlcPubkey, TxOwner.Remote, commitmentFormat)) { - return Left(InvalidHtlcSignature(channelId, htlcTx.tx.txid)) - } - HtlcTxAndRemoteSig(htlcTx, remoteSig) - } + def availableBalanceForSend: MilliSatoshi = commitment.availableBalanceForSend(params, common) - // we will send our revocation preimage + our next revocation hash - val localPerCommitmentSecret = keyManager.commitmentSecret(channelKeyPath, localCommit.index) - val localNextPerCommitmentPoint = keyManager.commitmentPoint(channelKeyPath, localCommit.index + 2) - val revocation = RevokeAndAck( - channelId = channelId, - perCommitmentSecret = localPerCommitmentSecret, - nextPerCommitmentPoint = localNextPerCommitmentPoint - ) + def availableBalanceForReceive: MilliSatoshi = commitment.availableBalanceForReceive(params, common) - // update our commitment data - val localCommit1 = LocalCommit( - index = localCommit.index + 1, - spec, - commitTxAndRemoteSig = CommitTxAndRemoteSig(localCommitTx, commit.signature), - htlcTxsAndRemoteSigs = htlcTxsAndRemoteSigs) - val ourChanges1 = localChanges.copy(acked = Nil) - val theirChanges1 = remoteChanges.copy(proposed = Nil, acked = remoteChanges.acked ++ remoteChanges.proposed) - val commitments1 = copy(localCommit = localCommit1, localChanges = ourChanges1, remoteChanges = theirChanges1) + def getOutgoingHtlcCrossSigned(htlcId: Long): Option[UpdateAddHtlc] = commitment.getOutgoingHtlcCrossSigned(htlcId) - Right(commitments1, revocation) - } + def getIncomingHtlcCrossSigned(htlcId: Long): Option[UpdateAddHtlc] = commitment.getIncomingHtlcCrossSigned(htlcId) - def receiveRevocation(revocation: RevokeAndAck, maxDustExposure: Satoshi): Either[ChannelException, (Commitments, Seq[PostRevocationAction])] = { - // we receive a revocation because we just sent them a sig for their next commit tx - remoteNextCommitInfo match { - case Left(_) if revocation.perCommitmentSecret.publicKey != remoteCommit.remotePerCommitmentPoint => - Left(InvalidRevocation(channelId)) - case Left(WaitingForRevocation(theirNextCommit, _, _)) => - val receivedHtlcs = remoteChanges.signed.collect { - // we forward adds downstream only when they have been committed by both sides - // it always happen when we receive a revocation, because they send the add, then they sign it, then we sign it - case add: UpdateAddHtlc => add - } - val failedHtlcs = remoteChanges.signed.collect { - // same for fails: we need to make sure that they are in neither commitment before propagating the fail upstream - case fail: UpdateFailHtlc => - val origin = originChannels(fail.id) - val add = remoteCommit.spec.findIncomingHtlcById(fail.id).map(_.add).get - RES_ADD_SETTLED(origin, add, HtlcResult.RemoteFail(fail)) - // same as above - case fail: UpdateFailMalformedHtlc => - val origin = originChannels(fail.id) - val add = remoteCommit.spec.findIncomingHtlcById(fail.id).map(_.add).get - RES_ADD_SETTLED(origin, add, HtlcResult.RemoteFailMalformed(fail)) - } - val (acceptedHtlcs, rejectedHtlcs) = { - // the received htlcs have already been added to commitments (they've been signed by our peer), and may already - // overflow our dust exposure (we cannot prevent them from adding htlcs): we artificially remove them before - // deciding which we'll keep and relay and which we'll fail without relaying. - val localSpecWithoutNewHtlcs = localCommit.spec.copy(htlcs = localCommit.spec.htlcs.filter { - case IncomingHtlc(add) if receivedHtlcs.contains(add) => false - case _ => true - }) - val remoteSpecWithoutNewHtlcs = theirNextCommit.spec.copy(htlcs = theirNextCommit.spec.htlcs.filter { - case OutgoingHtlc(add) if receivedHtlcs.contains(add) => false - case _ => true - }) - val localReduced = DustExposure.reduceForDustExposure(localSpecWithoutNewHtlcs, localChanges.all, remoteChanges.acked) - val localCommitDustExposure = DustExposure.computeExposure(localReduced, localParams.dustLimit, commitmentFormat) - val remoteReduced = DustExposure.reduceForDustExposure(remoteSpecWithoutNewHtlcs, remoteChanges.acked, localChanges.all) - val remoteCommitDustExposure = DustExposure.computeExposure(remoteReduced, remoteParams.dustLimit, commitmentFormat) - // we sort incoming htlcs by decreasing amount: we want to prioritize higher amounts. - val sortedReceivedHtlcs = receivedHtlcs.sortBy(_.amountMsat).reverse - DustExposure.filterBeforeForward( - maxDustExposure, - localReduced, - localParams.dustLimit, - localCommitDustExposure, - remoteReduced, - remoteParams.dustLimit, - remoteCommitDustExposure, - sortedReceivedHtlcs, - commitmentFormat) - } - val actions = acceptedHtlcs.map(add => PostRevocationAction.RelayHtlc(add)) ++ - rejectedHtlcs.map(add => PostRevocationAction.RejectHtlc(add)) ++ - failedHtlcs.map(res => PostRevocationAction.RelayFailure(res)) - // the outgoing following htlcs have been completed (fulfilled or failed) when we received this revocation - // they have been removed from both local and remote commitment - // (since fulfill/fail are sent by remote, they are (1) signed by them, (2) revoked by us, (3) signed by us, (4) revoked by them - val completedOutgoingHtlcs = remoteCommit.spec.htlcs.collect(incoming).map(_.id) -- theirNextCommit.spec.htlcs.collect(incoming).map(_.id) - // we remove the newly completed htlcs from the origin map - val originChannels1 = originChannels -- completedOutgoingHtlcs - val commitments1 = copy( - localChanges = localChanges.copy(signed = Nil, acked = localChanges.acked ++ localChanges.signed), - remoteChanges = remoteChanges.copy(signed = Nil), - remoteCommit = theirNextCommit, - remoteNextCommitInfo = Right(revocation.nextPerCommitmentPoint), - remotePerCommitmentSecrets = remotePerCommitmentSecrets.addHash(revocation.perCommitmentSecret.value, 0xFFFFFFFFFFFFL - remoteCommit.index), - originChannels = originChannels1) - Right(commitments1, actions) - case Right(_) => - Left(UnexpectedRevocation(channelId)) - } - } - - def changes2String: String = { - s"""commitments: - | localChanges: - | proposed: ${localChanges.proposed.map(msg2String(_)).mkString(" ")} - | signed: ${localChanges.signed.map(msg2String(_)).mkString(" ")} - | acked: ${localChanges.acked.map(msg2String(_)).mkString(" ")} - | remoteChanges: - | proposed: ${remoteChanges.proposed.map(msg2String(_)).mkString(" ")} - | acked: ${remoteChanges.acked.map(msg2String(_)).mkString(" ")} - | signed: ${remoteChanges.signed.map(msg2String(_)).mkString(" ")} - | nextHtlcId: - | local: $localNextHtlcId - | remote: $remoteNextHtlcId""".stripMargin - } + def fullySignedLocalCommitTx(keyManager: ChannelKeyManager): CommitTx = commitment.fullySignedLocalCommitTx(params, keyManager) def specs2String: String = { s"""specs: @@ -857,19 +135,6 @@ case class Commitments(channelId: ByteVector32, |${remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit.spec.htlcs.map(h => s" ${h.direction} ${h.add.id} ${h.add.cltvExpiry}").mkString("\n")).getOrElse("N/A")}""".stripMargin } - def validateSeed(keyManager: ChannelKeyManager): Boolean = { - val localFundingKey = keyManager.fundingPublicKey(localParams.fundingKeyPath).publicKey - val remoteFundingKey = remoteParams.fundingPubKey - val fundingScript = Script.write(Scripts.multiSig2of2(localFundingKey, remoteFundingKey)) - commitInput.redeemScript == fundingScript - } - - def params: Params = Params(channelId, channelConfig, channelFeatures, localParams, remoteParams, channelFlags) - - def common: Common = Common(localChanges, remoteChanges, localNextHtlcId, remoteNextHtlcId, localCommit.index, remoteCommit.index, originChannels, remoteNextCommitInfo.swap.map(waitingForRevocation => WaitForRev(waitingForRevocation.sent, waitingForRevocation.sentAfterLocalCommitIndex)).swap, remotePerCommitmentSecrets) - - def commitment: Commitment = Commitment(localFundingStatus, remoteFundingStatus, localCommit, remoteCommit, remoteNextCommitInfo.swap.map(_.nextRemoteCommit).toOption) - } object Commitments { @@ -895,86 +160,4 @@ object Commitments { remotePerCommitmentSecrets = common.remotePerCommitmentSecrets ) - def alreadyProposed(changes: List[UpdateMessage], id: Long): Boolean = changes.exists { - case u: UpdateFulfillHtlc => id == u.id - case u: UpdateFailHtlc => id == u.id - case u: UpdateFailMalformedHtlc => id == u.id - case _ => false - } - - // @formatter:off - sealed trait PostRevocationAction - object PostRevocationAction { - case class RelayHtlc(incomingHtlc: UpdateAddHtlc) extends PostRevocationAction - case class RejectHtlc(incomingHtlc: UpdateAddHtlc) extends PostRevocationAction - case class RelayFailure(result: RES_ADD_SETTLED[Origin, HtlcResult]) extends PostRevocationAction - } - // @formatter:on - - def makeLocalTxs(keyManager: ChannelKeyManager, - channelConfig: ChannelConfig, - channelFeatures: ChannelFeatures, - commitTxNumber: Long, - localParams: LocalParams, - remoteParams: RemoteParams, - commitmentInput: InputInfo, - localPerCommitmentPoint: PublicKey, - spec: CommitmentSpec): (CommitTx, Seq[HtlcTx]) = { - val channelKeyPath = keyManager.keyPath(localParams, channelConfig) - val localFundingPubkey = keyManager.fundingPublicKey(localParams.fundingKeyPath).publicKey - val localDelayedPaymentPubkey = Generators.derivePubKey(keyManager.delayedPaymentPoint(channelKeyPath).publicKey, localPerCommitmentPoint) - val localHtlcPubkey = Generators.derivePubKey(keyManager.htlcPoint(channelKeyPath).publicKey, localPerCommitmentPoint) - val remotePaymentPubkey = if (channelFeatures.hasFeature(Features.StaticRemoteKey)) { - remoteParams.paymentBasepoint - } else { - Generators.derivePubKey(remoteParams.paymentBasepoint, localPerCommitmentPoint) - } - val remoteHtlcPubkey = Generators.derivePubKey(remoteParams.htlcBasepoint, localPerCommitmentPoint) - val localRevocationPubkey = Generators.revocationPubKey(remoteParams.revocationBasepoint, localPerCommitmentPoint) - val localPaymentBasepoint = localParams.walletStaticPaymentBasepoint.getOrElse(keyManager.paymentPoint(channelKeyPath).publicKey) - val outputs = makeCommitTxOutputs(localParams.isInitiator, localParams.dustLimit, localRevocationPubkey, remoteParams.toSelfDelay, localDelayedPaymentPubkey, remotePaymentPubkey, localHtlcPubkey, remoteHtlcPubkey, localFundingPubkey, remoteParams.fundingPubKey, spec, channelFeatures.commitmentFormat) - val commitTx = Transactions.makeCommitTx(commitmentInput, commitTxNumber, localPaymentBasepoint, remoteParams.paymentBasepoint, localParams.isInitiator, outputs) - val htlcTxs = Transactions.makeHtlcTxs(commitTx.tx, localParams.dustLimit, localRevocationPubkey, remoteParams.toSelfDelay, localDelayedPaymentPubkey, spec.htlcTxFeerate(channelFeatures.commitmentFormat), outputs, channelFeatures.commitmentFormat) - (commitTx, htlcTxs) - } - - def makeRemoteTxs(keyManager: ChannelKeyManager, - channelConfig: ChannelConfig, - channelFeatures: ChannelFeatures, - commitTxNumber: Long, - localParams: LocalParams, - remoteParams: RemoteParams, - commitmentInput: InputInfo, - remotePerCommitmentPoint: PublicKey, - spec: CommitmentSpec): (CommitTx, Seq[HtlcTx]) = { - val channelKeyPath = keyManager.keyPath(localParams, channelConfig) - val localFundingPubkey = keyManager.fundingPublicKey(localParams.fundingKeyPath).publicKey - val localPaymentBasepoint = localParams.walletStaticPaymentBasepoint.getOrElse(keyManager.paymentPoint(channelKeyPath).publicKey) - val localPaymentPubkey = if (channelFeatures.hasFeature(Features.StaticRemoteKey)) { - localPaymentBasepoint - } else { - Generators.derivePubKey(localPaymentBasepoint, remotePerCommitmentPoint) - } - val localHtlcPubkey = Generators.derivePubKey(keyManager.htlcPoint(channelKeyPath).publicKey, remotePerCommitmentPoint) - val remoteDelayedPaymentPubkey = Generators.derivePubKey(remoteParams.delayedPaymentBasepoint, remotePerCommitmentPoint) - val remoteHtlcPubkey = Generators.derivePubKey(remoteParams.htlcBasepoint, remotePerCommitmentPoint) - val remoteRevocationPubkey = Generators.revocationPubKey(keyManager.revocationPoint(channelKeyPath).publicKey, remotePerCommitmentPoint) - val outputs = makeCommitTxOutputs(!localParams.isInitiator, remoteParams.dustLimit, remoteRevocationPubkey, localParams.toSelfDelay, remoteDelayedPaymentPubkey, localPaymentPubkey, remoteHtlcPubkey, localHtlcPubkey, remoteParams.fundingPubKey, localFundingPubkey, spec, channelFeatures.commitmentFormat) - val commitTx = Transactions.makeCommitTx(commitmentInput, commitTxNumber, remoteParams.paymentBasepoint, localPaymentBasepoint, !localParams.isInitiator, outputs) - val htlcTxs = Transactions.makeHtlcTxs(commitTx.tx, remoteParams.dustLimit, remoteRevocationPubkey, localParams.toSelfDelay, remoteDelayedPaymentPubkey, spec.htlcTxFeerate(channelFeatures.commitmentFormat), outputs, channelFeatures.commitmentFormat) - (commitTx, htlcTxs) - } - - def msg2String(msg: LightningMessage): String = msg match { - case u: UpdateAddHtlc => s"add-${u.id}" - case u: UpdateFulfillHtlc => s"ful-${u.id}" - case u: UpdateFailHtlc => s"fail-${u.id}" - case _: UpdateFee => s"fee" - case _: CommitSig => s"sig" - case _: RevokeAndAck => s"rev" - case _: Error => s"err" - case _: ChannelReady => s"channel_ready" - case _ => "???" - } - } diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/channel/Helpers.scala b/eclair-core/src/main/scala/fr/acinq/eclair/channel/Helpers.scala index a51544df01..24b8d9de80 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/channel/Helpers.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/channel/Helpers.scala @@ -454,8 +454,8 @@ object Helpers { val channelKeyPath = keyManager.keyPath(localParams, channelConfig) val commitmentInput = makeFundingInputInfo(fundingTxHash, fundingTxOutputIndex, fundingAmount, fundingPubKey.publicKey, remoteParams.fundingPubKey) val localPerCommitmentPoint = keyManager.commitmentPoint(channelKeyPath, commitmentIndex) - val (localCommitTx, _) = Commitments.makeLocalTxs(keyManager, channelConfig, channelFeatures, commitmentIndex, localParams, remoteParams, commitmentInput, localPerCommitmentPoint, localSpec) - val (remoteCommitTx, _) = Commitments.makeRemoteTxs(keyManager, channelConfig, channelFeatures, commitmentIndex, localParams, remoteParams, commitmentInput, remotePerCommitmentPoint, remoteSpec) + val (localCommitTx, _) = Commitment.makeLocalTxs(keyManager, channelConfig, channelFeatures, commitmentIndex, localParams, remoteParams, commitmentInput, localPerCommitmentPoint, localSpec) + val (remoteCommitTx, _) = Commitment.makeRemoteTxs(keyManager, channelConfig, channelFeatures, commitmentIndex, localParams, remoteParams, commitmentInput, remotePerCommitmentPoint, remoteSpec) Right(localSpec, localCommitTx, remoteSpec, remoteCommitTx) } @@ -502,13 +502,13 @@ object Helpers { case Some(revocation) => SyncResult.Success(retransmit = revocation +: signedUpdates :+ commitSig) } - case Left(waitingForRevocation) if remoteChannelReestablish.nextLocalCommitmentNumber == (common.nextRemoteCommitIndex + 1) => + case Left(_) if remoteChannelReestablish.nextLocalCommitmentNumber == (common.nextRemoteCommitIndex + 1) => // we just sent a new commit_sig, they have received it but we haven't received their revocation SyncResult.Success(retransmit = retransmitRevocation_opt.toSeq) - case Left(waitingForRevocation) if remoteChannelReestablish.nextLocalCommitmentNumber < common.nextRemoteCommitIndex => + case Left(_) if remoteChannelReestablish.nextLocalCommitmentNumber < common.nextRemoteCommitIndex => // they are behind SyncResult.RemoteLate - case Left(waitingForRevocation) => + case Left(_) => // we are behind SyncResult.LocalLateUnproven( ourRemoteCommitmentNumber = common.nextRemoteCommitIndex, @@ -982,7 +982,7 @@ object Helpers { * Claim our htlc outputs only */ def claimHtlcOutputs(keyManager: ChannelKeyManager, commitments: Commitments, remoteCommit: RemoteCommit, feeEstimator: FeeEstimator)(implicit log: LoggingAdapter): Map[OutPoint, Option[ClaimHtlcTx]] = { - val (remoteCommitTx, _) = Commitments.makeRemoteTxs(keyManager, commitments.channelConfig, commitments.channelFeatures, remoteCommit.index, commitments.localParams, commitments.remoteParams, commitments.commitInput, remoteCommit.remotePerCommitmentPoint, remoteCommit.spec) + val (remoteCommitTx, _) = Commitment.makeRemoteTxs(keyManager, commitments.channelConfig, commitments.channelFeatures, remoteCommit.index, commitments.localParams, commitments.remoteParams, commitments.commitInput, remoteCommit.remotePerCommitmentPoint, remoteCommit.spec) require(remoteCommitTx.tx.txid == remoteCommit.txid, "txid mismatch, cannot recompute the current remote commit tx") val channelKeyPath = keyManager.keyPath(commitments.localParams, commitments.channelConfig) val localFundingPubkey = keyManager.fundingPublicKey(commitments.localParams.fundingKeyPath).publicKey diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/channel/MetaCommitments.scala b/eclair-core/src/main/scala/fr/acinq/eclair/channel/MetaCommitments.scala index 270fd2e05e..f376d92c2d 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/channel/MetaCommitments.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/channel/MetaCommitments.scala @@ -3,15 +3,19 @@ package fr.acinq.eclair.channel import akka.event.LoggingAdapter import com.softwaremill.quicklens.ModifyPimp import fr.acinq.bitcoin.scalacompat.Crypto.{PrivateKey, PublicKey} -import fr.acinq.bitcoin.scalacompat.{ByteVector32, Satoshi} -import fr.acinq.eclair.BlockHeight -import fr.acinq.eclair.blockchain.fee.OnChainFeeConf -import fr.acinq.eclair.channel.Commitments.{PostRevocationAction, msg2String} -import fr.acinq.eclair.crypto.ShaChain +import fr.acinq.bitcoin.scalacompat.{ByteVector32, Crypto, Satoshi, SatoshiLong, Script} +import fr.acinq.eclair.blockchain.fee.{FeeratePerKw, OnChainFeeConf} +import fr.acinq.eclair.channel.Helpers.Closing +import fr.acinq.eclair.channel.Monitoring.Metrics +import fr.acinq.eclair.channel.fsm.Channel import fr.acinq.eclair.crypto.keymanager.ChannelKeyManager -import fr.acinq.eclair.transactions.Transactions.InputInfo +import fr.acinq.eclair.crypto.{Generators, ShaChain} +import fr.acinq.eclair.payment.OutgoingPaymentPacket +import fr.acinq.eclair.transactions.Transactions.{CommitmentFormat, InputInfo, TransactionWithInputInfo} +import fr.acinq.eclair.transactions._ import fr.acinq.eclair.wire.protocol.CommitSigTlv.{AlternativeCommitSig, AlternativeCommitSigsTlv} import fr.acinq.eclair.wire.protocol._ +import fr.acinq.eclair.{BlockHeight, CltvExpiry, CltvExpiryDelta, Features, MilliSatoshi, MilliSatoshiLong, payment} import scodec.bits.ByteVector /** Static parameters shared by all commitments. */ @@ -20,6 +24,21 @@ case class Params(channelId: ByteVector32, channelFeatures: ChannelFeatures, localParams: LocalParams, remoteParams: RemoteParams, channelFlags: ChannelFlags) { + + require(channelFeatures.paysDirectlyToWallet == localParams.walletStaticPaymentBasepoint.isDefined, s"localParams.walletStaticPaymentBasepoint must be defined only for commitments that pay directly to our wallet (channel features: $channelFeatures") + require(channelFeatures.hasFeature(Features.DualFunding) == localParams.requestedChannelReserve_opt.isEmpty, "custom local channel reserve is incompatible with dual-funded channels") + require(channelFeatures.hasFeature(Features.DualFunding) == remoteParams.requestedChannelReserve_opt.isEmpty, "custom remote channel reserve is incompatible with dual-funded channels") + + val commitmentFormat: CommitmentFormat = channelFeatures.commitmentFormat + val channelType: SupportedChannelType = channelFeatures.channelType + val announceChannel: Boolean = channelFlags.announceChannel + + val localNodeId: PublicKey = localParams.nodeId + val remoteNodeId: PublicKey = remoteParams.nodeId + + // We can safely cast to millisatoshis since we verify that it's less than a valid millisatoshi amount. + val maxHtlcAmount: MilliSatoshi = remoteParams.maxHtlcValueInFlightMsat.toBigInt.min(localParams.maxHtlcValueInFlightMsat.toLong).toLong.msat + /** * We update local/global features at reconnection */ @@ -27,6 +46,41 @@ case class Params(channelId: ByteVector32, localParams = localParams.copy(initFeatures = localInit.features), remoteParams = remoteParams.copy(initFeatures = remoteInit.features) ) + + /** + * @param scriptPubKey optional local script pubkey provided in CMD_CLOSE + * @return the actual local shutdown script that we should use + */ + def getLocalShutdownScript(scriptPubKey: Option[ByteVector]): Either[ChannelException, ByteVector] = { + // to check whether shutdown_any_segwit is active we check features in local and remote parameters, which are negotiated each time we connect to our peer. + val allowAnySegwit = Features.canUseFeature(localParams.initFeatures, remoteParams.initFeatures, Features.ShutdownAnySegwit) + (channelFeatures.hasFeature(Features.UpfrontShutdownScript), scriptPubKey) match { + case (true, Some(script)) if script != localParams.defaultFinalScriptPubKey => Left(InvalidFinalScript(channelId)) + case (false, Some(script)) if !Closing.MutualClose.isValidFinalScriptPubkey(script, allowAnySegwit) => Left(InvalidFinalScript(channelId)) + case (false, Some(script)) => Right(script) + case _ => Right(localParams.defaultFinalScriptPubKey) + } + } + + /** + * @param remoteScriptPubKey remote script included in a Shutdown message + * @return the actual remote script that we should use + */ + def getRemoteShutdownScript(remoteScriptPubKey: ByteVector): Either[ChannelException, ByteVector] = { + // to check whether shutdown_any_segwit is active we check features in local and remote parameters, which are negotiated each time we connect to our peer. + val allowAnySegwit = Features.canUseFeature(localParams.initFeatures, remoteParams.initFeatures, Features.ShutdownAnySegwit) + (channelFeatures.hasFeature(Features.UpfrontShutdownScript), remoteParams.shutdownScript) match { + case (false, _) if !Closing.MutualClose.isValidFinalScriptPubkey(remoteScriptPubKey, allowAnySegwit) => Left(InvalidFinalScript(channelId)) + case (false, _) => Right(remoteScriptPubKey) + case (true, None) if !Closing.MutualClose.isValidFinalScriptPubkey(remoteScriptPubKey, allowAnySegwit) => + // this is a special case: they set option_upfront_shutdown_script but did not provide a script in their open/accept message + Left(InvalidFinalScript(channelId)) + case (true, None) => Right(remoteScriptPubKey) + case (true, Some(script)) if script != remoteScriptPubKey => Left(InvalidFinalScript(channelId)) + case (true, Some(script)) => Right(script) + } + } + } case class WaitForRev(sent: CommitSig, sentAfterLocalCommitIndex: Long) @@ -38,8 +92,22 @@ case class Common(localChanges: LocalChanges, remoteChanges: RemoteChanges, originChannels: Map[Long, Origin], // for outgoing htlcs relayed through us, details about the corresponding incoming htlcs remoteNextCommitInfo: Either[WaitForRev, PublicKey], // this one is tricky, it must be kept in sync with Commitment.nextRemoteCommit_opt remotePerCommitmentSecrets: ShaChain) { + + import Common._ + val nextRemoteCommitIndex = remoteCommitIndex + 1 + val localHasChanges: Boolean = remoteChanges.acked.nonEmpty || localChanges.proposed.nonEmpty + val remoteHasChanges: Boolean = localChanges.acked.nonEmpty || remoteChanges.proposed.nonEmpty + val localHasUnsignedOutgoingHtlcs: Boolean = localChanges.proposed.collectFirst { case u: UpdateAddHtlc => u }.isDefined + val remoteHasUnsignedOutgoingHtlcs: Boolean = remoteChanges.proposed.collectFirst { case u: UpdateAddHtlc => u }.isDefined + val localHasUnsignedOutgoingUpdateFee: Boolean = localChanges.proposed.collectFirst { case u: UpdateFee => u }.isDefined + val remoteHasUnsignedOutgoingUpdateFee: Boolean = remoteChanges.proposed.collectFirst { case u: UpdateFee => u }.isDefined + + def addLocalProposal(proposal: UpdateMessage): Common = copy(localChanges = localChanges.copy(proposed = localChanges.proposed :+ proposal)) + + def addRemoteProposal(proposal: UpdateMessage): Common = copy(remoteChanges = remoteChanges.copy(proposed = remoteChanges.proposed :+ proposal)) + /** * When reconnecting, we drop all unsigned changes. */ @@ -57,12 +125,262 @@ case class Common(localChanges: LocalChanges, remoteChanges: RemoteChanges, } } +object Common { + def alreadyProposed(changes: List[UpdateMessage], id: Long): Boolean = changes.exists { + case u: UpdateFulfillHtlc => id == u.id + case u: UpdateFailHtlc => id == u.id + case u: UpdateFailMalformedHtlc => id == u.id + case _ => false + } + + def msg2String(msg: LightningMessage): String = msg match { + case u: UpdateAddHtlc => s"add-${u.id}" + case u: UpdateFulfillHtlc => s"ful-${u.id}" + case u: UpdateFailHtlc => s"fail-${u.id}" + case _: UpdateFee => s"fee" + case _: CommitSig => s"sig" + case _: RevokeAndAck => s"rev" + case _: Error => s"err" + case _: ChannelReady => s"channel_ready" + case _ => "???" + } +} + /** A minimal commitment for a given funding tx. */ case class Commitment(localFundingStatus: LocalFundingStatus, remoteFundingStatus: RemoteFundingStatus, localCommit: LocalCommit, remoteCommit: RemoteCommit, nextRemoteCommit_opt: Option[RemoteCommit]) { val commitInput: InputInfo = localCommit.commitTxAndRemoteSig.commitTx.input val fundingTxId: ByteVector32 = commitInput.outPoint.txid + val capacity: Satoshi = commitInput.txOut.amount + + /** Channel reserve that applies to our funds. */ + def localChannelReserve(params: Params): Satoshi = if (params.channelFeatures.hasFeature(Features.DualFunding)) { + (capacity / 100).max(params.remoteParams.dustLimit) + } else { + params.remoteParams.requestedChannelReserve_opt.get // this is guarded by a require() in Params + } + + /** Channel reserve that applies to our peer's funds. */ + def remoteChannelReserve(params: Params): Satoshi = if (params.channelFeatures.hasFeature(Features.DualFunding)) { + (capacity / 100).max(params.localParams.dustLimit) + } else { + params.localParams.requestedChannelReserve_opt.get // this is guarded by a require() in Params + } + + // NB: when computing availableBalanceForSend and availableBalanceForReceive, the initiator keeps an extra buffer on + // top of its usual channel reserve to avoid getting channels stuck in case the on-chain feerate increases (see + // https://github.com/lightningnetwork/lightning-rfc/issues/728 for details). + // + // This extra buffer (which we call "funder fee buffer") is calculated as follows: + // 1) Simulate a x2 feerate increase and compute the corresponding commit tx fee (note that it may trim some HTLCs) + // 2) Add the cost of adding a new untrimmed HTLC at that increased feerate. This ensures that we'll be able to + // actually use the channel to add new HTLCs if the feerate doubles. + // + // If for example the current feerate is 1000 sat/kw, the dust limit 546 sat, and we have 3 pending outgoing HTLCs for + // respectively 1250 sat, 2000 sat and 2500 sat. + // commit tx fee = commitWeight * feerate + 3 * htlcOutputWeight * feerate = 724 * 1000 + 3 * 172 * 1000 = 1240 sat + // To calculate the funder fee buffer, we first double the feerate and calculate the corresponding commit tx fee. + // By doubling the feerate, the first HTLC becomes trimmed so the result is: 724 * 2000 + 2 * 172 * 2000 = 2136 sat + // We then add the additional fee for a potential new untrimmed HTLC: 172 * 2000 = 344 sat + // The funder fee buffer is 2136 + 344 = 2480 sat + // + // If there are many pending HTLCs that are only slightly above the trim threshold, the funder fee buffer may be + // smaller than the current commit tx fee because those HTLCs will be trimmed and the commit tx weight will decrease. + // For example if we have 10 outgoing HTLCs of 1250 sat: + // - commit tx fee = 724 * 1000 + 10 * 172 * 1000 = 2444 sat + // - commit tx fee at twice the feerate = 724 * 2000 = 1448 sat (all HTLCs have been trimmed) + // - cost of an additional untrimmed HTLC = 172 * 2000 = 344 sat + // - funder fee buffer = 1448 + 344 = 1792 sat + // In that case the current commit tx fee is higher than the funder fee buffer and will dominate the balance restrictions. + + def availableBalanceForSend(params: Params, common: Common): MilliSatoshi = { + import params._ + // we need to base the next current commitment on the last sig we sent, even if we didn't yet receive their revocation + val remoteCommit1 = nextRemoteCommit_opt.getOrElse(remoteCommit) + val reduced = CommitmentSpec.reduce(remoteCommit1.spec, common.remoteChanges.acked, common.localChanges.proposed) + val balanceNoFees = (reduced.toRemote - localChannelReserve(params)).max(0 msat) + if (localParams.isInitiator) { + // The initiator always pays the on-chain fees, so we must subtract that from the amount we can send. + val commitFees = Transactions.commitTxTotalCostMsat(remoteParams.dustLimit, reduced, commitmentFormat) + // the initiator needs to keep a "funder fee buffer" (see explanation above) + val funderFeeBuffer = Transactions.commitTxTotalCostMsat(remoteParams.dustLimit, reduced.copy(commitTxFeerate = reduced.commitTxFeerate * 2), commitmentFormat) + Transactions.htlcOutputFee(reduced.commitTxFeerate * 2, commitmentFormat) + val amountToReserve = commitFees.max(funderFeeBuffer) + if (balanceNoFees - amountToReserve < Transactions.offeredHtlcTrimThreshold(remoteParams.dustLimit, reduced, commitmentFormat)) { + // htlc will be trimmed + (balanceNoFees - amountToReserve).max(0 msat) + } else { + // htlc will have an output in the commitment tx, so there will be additional fees. + val commitFees1 = commitFees + Transactions.htlcOutputFee(reduced.commitTxFeerate, commitmentFormat) + // we take the additional fees for that htlc output into account in the fee buffer at a x2 feerate increase + val funderFeeBuffer1 = funderFeeBuffer + Transactions.htlcOutputFee(reduced.commitTxFeerate * 2, commitmentFormat) + val amountToReserve1 = commitFees1.max(funderFeeBuffer1) + (balanceNoFees - amountToReserve1).max(0 msat) + } + } else { + // The non-initiator doesn't pay on-chain fees. + balanceNoFees + } + } + + def availableBalanceForReceive(params: Params, common: Common): MilliSatoshi = { + import params._ + val reduced = CommitmentSpec.reduce(localCommit.spec, common.localChanges.acked, common.remoteChanges.proposed) + val balanceNoFees = (reduced.toRemote - remoteChannelReserve(params)).max(0 msat) + if (localParams.isInitiator) { + // The non-initiator doesn't pay on-chain fees so we don't take those into account when receiving. + balanceNoFees + } else { + // The initiator always pays the on-chain fees, so we must subtract that from the amount we can receive. + val commitFees = Transactions.commitTxTotalCostMsat(localParams.dustLimit, reduced, commitmentFormat) + // we expected the initiator to keep a "funder fee buffer" (see explanation above) + val funderFeeBuffer = Transactions.commitTxTotalCostMsat(localParams.dustLimit, reduced.copy(commitTxFeerate = reduced.commitTxFeerate * 2), commitmentFormat) + Transactions.htlcOutputFee(reduced.commitTxFeerate * 2, commitmentFormat) + val amountToReserve = commitFees.max(funderFeeBuffer) + if (balanceNoFees - amountToReserve < Transactions.receivedHtlcTrimThreshold(localParams.dustLimit, reduced, commitmentFormat)) { + // htlc will be trimmed + (balanceNoFees - amountToReserve).max(0 msat) + } else { + // htlc will have an output in the commitment tx, so there will be additional fees. + val commitFees1 = commitFees + Transactions.htlcOutputFee(reduced.commitTxFeerate, commitmentFormat) + // we take the additional fees for that htlc output into account in the fee buffer at a x2 feerate increase + val funderFeeBuffer1 = funderFeeBuffer + Transactions.htlcOutputFee(reduced.commitTxFeerate * 2, commitmentFormat) + val amountToReserve1 = commitFees1.max(funderFeeBuffer1) + (balanceNoFees - amountToReserve1).max(0 msat) + } + } + } + + def hasNoPendingHtlcs: Boolean = localCommit.spec.htlcs.isEmpty && remoteCommit.spec.htlcs.isEmpty && nextRemoteCommit_opt.isEmpty + + def hasNoPendingHtlcsOrFeeUpdate(common: Common): Boolean = + nextRemoteCommit_opt.isEmpty && + localCommit.spec.htlcs.isEmpty && + remoteCommit.spec.htlcs.isEmpty && + (common.localChanges.signed ++ common.localChanges.acked ++ common.remoteChanges.signed ++ common.remoteChanges.acked).collectFirst { case _: UpdateFee => true }.isEmpty + + def hasPendingOrProposedHtlcs(common: Common): Boolean = !hasNoPendingHtlcs || + common.localChanges.all.exists(_.isInstanceOf[UpdateAddHtlc]) || + common.remoteChanges.all.exists(_.isInstanceOf[UpdateAddHtlc]) + + def timedOutOutgoingHtlcs(currentHeight: BlockHeight): Set[UpdateAddHtlc] = { + def expired(add: UpdateAddHtlc): Boolean = currentHeight >= add.cltvExpiry.blockHeight + + localCommit.spec.htlcs.collect(DirectedHtlc.outgoing).filter(expired) ++ + remoteCommit.spec.htlcs.collect(DirectedHtlc.incoming).filter(expired) ++ + nextRemoteCommit_opt.toSeq.flatMap(_.spec.htlcs.collect(DirectedHtlc.incoming).filter(expired).toSet) + } + + /** + * Return the outgoing HTLC with the given id if it is: + * - signed by us in their commitment transaction (remote) + * - signed by them in our commitment transaction (local) + * + * NB: if we're in the middle of fulfilling or failing that HTLC, it will not be returned by this function. + */ + def getOutgoingHtlcCrossSigned(htlcId: Long): Option[UpdateAddHtlc] = for { + localSigned <- nextRemoteCommit_opt.getOrElse(remoteCommit).spec.findIncomingHtlcById(htlcId) + remoteSigned <- localCommit.spec.findOutgoingHtlcById(htlcId) + } yield { + require(localSigned.add == remoteSigned.add) + localSigned.add + } + + /** + * Return the incoming HTLC with the given id if it is: + * - signed by us in their commitment transaction (remote) + * - signed by them in our commitment transaction (local) + * + * NB: if we're in the middle of fulfilling or failing that HTLC, it will not be returned by this function. + */ + def getIncomingHtlcCrossSigned(htlcId: Long): Option[UpdateAddHtlc] = for { + localSigned <- nextRemoteCommit_opt.getOrElse(remoteCommit).spec.findOutgoingHtlcById(htlcId) + remoteSigned <- localCommit.spec.findIncomingHtlcById(htlcId) + } yield { + require(localSigned.add == remoteSigned.add) + localSigned.add + } + + /** + * HTLCs that are close to timing out upstream are potentially dangerous. If we received the preimage for those HTLCs, + * we need to get a remote signed updated commitment that removes those HTLCs. + * Otherwise when we get close to the upstream timeout, we risk an on-chain race condition between their HTLC timeout + * and our HTLC success in case of a force-close. + */ + def almostTimedOutIncomingHtlcs(currentHeight: BlockHeight, fulfillSafety: CltvExpiryDelta): Set[UpdateAddHtlc] = { + def nearlyExpired(add: UpdateAddHtlc): Boolean = currentHeight >= (add.cltvExpiry - fulfillSafety).blockHeight + + localCommit.spec.htlcs.collect(DirectedHtlc.incoming).filter(nearlyExpired) + } + + /** + * Return a fully signed commit tx, that can be published as-is. + */ + def fullySignedLocalCommitTx(params: Params, keyManager: ChannelKeyManager): Transactions.CommitTx = { + val unsignedCommitTx = localCommit.commitTxAndRemoteSig.commitTx + val localSig = keyManager.sign(unsignedCommitTx, keyManager.fundingPublicKey(params.localParams.fundingKeyPath), Transactions.TxOwner.Local, params.commitmentFormat) + val remoteSig = localCommit.commitTxAndRemoteSig.remoteSig + val commitTx = Transactions.addSigs(unsignedCommitTx, keyManager.fundingPublicKey(params.localParams.fundingKeyPath).publicKey, params.remoteParams.fundingPubKey, localSig, remoteSig) + // We verify the remote signature when receiving their commit_sig, so this check should always pass. + require(Transactions.checkSpendable(commitTx).isSuccess, "commit signatures are invalid") + commitTx + } + +} + +object Commitment { + def makeLocalTxs(keyManager: ChannelKeyManager, + channelConfig: ChannelConfig, + channelFeatures: ChannelFeatures, + commitTxNumber: Long, + localParams: LocalParams, + remoteParams: RemoteParams, + commitmentInput: InputInfo, + localPerCommitmentPoint: PublicKey, + spec: CommitmentSpec): (Transactions.CommitTx, Seq[Transactions.HtlcTx]) = { + val channelKeyPath = keyManager.keyPath(localParams, channelConfig) + val localFundingPubkey = keyManager.fundingPublicKey(localParams.fundingKeyPath).publicKey + val localDelayedPaymentPubkey = Generators.derivePubKey(keyManager.delayedPaymentPoint(channelKeyPath).publicKey, localPerCommitmentPoint) + val localHtlcPubkey = Generators.derivePubKey(keyManager.htlcPoint(channelKeyPath).publicKey, localPerCommitmentPoint) + val remotePaymentPubkey = if (channelFeatures.hasFeature(Features.StaticRemoteKey)) { + remoteParams.paymentBasepoint + } else { + Generators.derivePubKey(remoteParams.paymentBasepoint, localPerCommitmentPoint) + } + val remoteHtlcPubkey = Generators.derivePubKey(remoteParams.htlcBasepoint, localPerCommitmentPoint) + val localRevocationPubkey = Generators.revocationPubKey(remoteParams.revocationBasepoint, localPerCommitmentPoint) + val localPaymentBasepoint = localParams.walletStaticPaymentBasepoint.getOrElse(keyManager.paymentPoint(channelKeyPath).publicKey) + val outputs = Transactions.makeCommitTxOutputs(localParams.isInitiator, localParams.dustLimit, localRevocationPubkey, remoteParams.toSelfDelay, localDelayedPaymentPubkey, remotePaymentPubkey, localHtlcPubkey, remoteHtlcPubkey, localFundingPubkey, remoteParams.fundingPubKey, spec, channelFeatures.commitmentFormat) + val commitTx = Transactions.makeCommitTx(commitmentInput, commitTxNumber, localPaymentBasepoint, remoteParams.paymentBasepoint, localParams.isInitiator, outputs) + val htlcTxs = Transactions.makeHtlcTxs(commitTx.tx, localParams.dustLimit, localRevocationPubkey, remoteParams.toSelfDelay, localDelayedPaymentPubkey, spec.htlcTxFeerate(channelFeatures.commitmentFormat), outputs, channelFeatures.commitmentFormat) + (commitTx, htlcTxs) + } + + def makeRemoteTxs(keyManager: ChannelKeyManager, + channelConfig: ChannelConfig, + channelFeatures: ChannelFeatures, + commitTxNumber: Long, + localParams: LocalParams, + remoteParams: RemoteParams, + commitmentInput: InputInfo, + remotePerCommitmentPoint: PublicKey, + spec: CommitmentSpec): (Transactions.CommitTx, Seq[Transactions.HtlcTx]) = { + val channelKeyPath = keyManager.keyPath(localParams, channelConfig) + val localFundingPubkey = keyManager.fundingPublicKey(localParams.fundingKeyPath).publicKey + val localPaymentBasepoint = localParams.walletStaticPaymentBasepoint.getOrElse(keyManager.paymentPoint(channelKeyPath).publicKey) + val localPaymentPubkey = if (channelFeatures.hasFeature(Features.StaticRemoteKey)) { + localPaymentBasepoint + } else { + Generators.derivePubKey(localPaymentBasepoint, remotePerCommitmentPoint) + } + val localHtlcPubkey = Generators.derivePubKey(keyManager.htlcPoint(channelKeyPath).publicKey, remotePerCommitmentPoint) + val remoteDelayedPaymentPubkey = Generators.derivePubKey(remoteParams.delayedPaymentBasepoint, remotePerCommitmentPoint) + val remoteHtlcPubkey = Generators.derivePubKey(remoteParams.htlcBasepoint, remotePerCommitmentPoint) + val remoteRevocationPubkey = Generators.revocationPubKey(keyManager.revocationPoint(channelKeyPath).publicKey, remotePerCommitmentPoint) + val outputs = Transactions.makeCommitTxOutputs(!localParams.isInitiator, remoteParams.dustLimit, remoteRevocationPubkey, localParams.toSelfDelay, remoteDelayedPaymentPubkey, localPaymentPubkey, remoteHtlcPubkey, localHtlcPubkey, remoteParams.fundingPubKey, localFundingPubkey, spec, channelFeatures.commitmentFormat) + val commitTx = Transactions.makeCommitTx(commitmentInput, commitTxNumber, remoteParams.paymentBasepoint, localPaymentBasepoint, !localParams.isInitiator, outputs) + val htlcTxs = Transactions.makeHtlcTxs(commitTx.tx, remoteParams.dustLimit, remoteRevocationPubkey, localParams.toSelfDelay, remoteDelayedPaymentPubkey, spec.htlcTxFeerate(channelFeatures.commitmentFormat), outputs, channelFeatures.commitmentFormat) + (commitTx, htlcTxs) + } } /** @@ -74,125 +392,577 @@ case class MetaCommitments(params: Params, commitments: List[Commitment], remoteChannelData_opt: Option[ByteVector] = None) { + import MetaCommitments._ + require(commitments.nonEmpty, "there must be at least one commitments") + val channelId: ByteVector32 = params.channelId + val localNodeId: PublicKey = params.localNodeId + val remoteNodeId: PublicKey = params.remoteNodeId + val all: List[Commitments] = commitments.map(Commitments(params, common, _)) /** current valid commitments, according to our view of the blockchain */ val main: Commitments = all.head - private def sequence[T](collection: List[Either[ChannelException, T]]): Either[ChannelException, List[T]] = - collection.foldRight[Either[ChannelException, List[T]]](Right(Nil)) { - case (Right(success), Right(res)) => Right(success +: res) - case (Right(_), Left(res)) => Left(res) - case (Left(failure), _) => Left(failure) - } + lazy val availableBalanceForSend: MilliSatoshi = commitments.map(_.availableBalanceForSend(params, common)).min + lazy val availableBalanceForReceive: MilliSatoshi = commitments.map(_.availableBalanceForReceive(params, common)).min + + def hasNoPendingHtlcs: Boolean = commitments.head.hasNoPendingHtlcs + + def hasNoPendingHtlcsOrFeeUpdate: Boolean = commitments.head.hasNoPendingHtlcsOrFeeUpdate(common) - // NB: in the below, some common values are duplicated among all commitments, we only keep the first occurrence + def hasPendingOrProposedHtlcs: Boolean = commitments.head.hasPendingOrProposedHtlcs(common) + def timedOutOutgoingHtlcs(currentHeight: BlockHeight): Set[UpdateAddHtlc] = commitments.head.timedOutOutgoingHtlcs(currentHeight) + + def almostTimedOutIncomingHtlcs(currentHeight: BlockHeight, fulfillSafety: CltvExpiryDelta): Set[UpdateAddHtlc] = commitments.head.almostTimedOutIncomingHtlcs(currentHeight, fulfillSafety) + + def getOutgoingHtlcCrossSigned(htlcId: Long): Option[UpdateAddHtlc] = commitments.head.getOutgoingHtlcCrossSigned(htlcId) + + def getIncomingHtlcCrossSigned(htlcId: Long): Option[UpdateAddHtlc] = commitments.head.getIncomingHtlcCrossSigned(htlcId) + + /** + * @param cmd add HTLC command + * @return either Left(failure, error message) where failure is a failure message (see BOLT #4 and the Failure Message class) or Right(new commitments, updateAddHtlc) + */ def sendAdd(cmd: CMD_ADD_HTLC, currentHeight: BlockHeight, feeConf: OnChainFeeConf): Either[ChannelException, (MetaCommitments, UpdateAddHtlc)] = { - sequence(all.map(_.sendAdd(cmd, currentHeight, feeConf))) - .map { res: List[(Commitments, UpdateAddHtlc)] => (res.head._1.common, res.map(_._1.commitment), res.head._2) } - .map { case (common, commitments, add) => (this.copy(common = common, commitments = commitments), add) } + // we must ensure we're not relaying htlcs that are already expired, otherwise the downstream channel will instantly close + // NB: we add a 3 blocks safety to reduce the probability of running into this when our bitcoin node is slightly outdated + val minExpiry = CltvExpiry(currentHeight + 3) + if (cmd.cltvExpiry < minExpiry) { + return Left(ExpiryTooSmall(channelId, minimum = minExpiry, actual = cmd.cltvExpiry, blockHeight = currentHeight)) + } + // we don't want to use too high a refund timeout, because our funds will be locked during that time if the payment is never fulfilled + val maxExpiry = Channel.MAX_CLTV_EXPIRY_DELTA.toCltvExpiry(currentHeight) + if (cmd.cltvExpiry >= maxExpiry) { + return Left(ExpiryTooBig(channelId, maximum = maxExpiry, actual = cmd.cltvExpiry, blockHeight = currentHeight)) + } + + // even if remote advertises support for 0 msat htlc, we limit ourselves to values strictly positive, hence the max(1 msat) + val htlcMinimum = params.remoteParams.htlcMinimum.max(1 msat) + if (cmd.amount < htlcMinimum) { + return Left(HtlcValueTooSmall(params.channelId, minimum = htlcMinimum, actual = cmd.amount)) + } + + val add = UpdateAddHtlc(channelId, common.localNextHtlcId, cmd.amount, cmd.paymentHash, cmd.cltvExpiry, cmd.onion, cmd.nextBlindingKey_opt) + // we increment the local htlc index and add an entry to the origins map + val common1 = common.addLocalProposal(add).copy(localNextHtlcId = common.localNextHtlcId + 1, originChannels = common.originChannels + (add.id -> cmd.origin)) + + // let's compute the current commitments *as seen by them* with this change taken into account + commitments.foreach(commitment => { + // we allowed mismatches between our feerates and our remote's as long as commitments didn't contain any HTLC at risk + // we need to verify that we're not disagreeing on feerates anymore before offering new HTLCs + // NB: there may be a pending update_fee that hasn't been applied yet that needs to be taken into account + val localFeeratePerKw = feeConf.getCommitmentFeerate(remoteNodeId, params.channelType, commitment.capacity, None) + val remoteFeeratePerKw = commitment.localCommit.spec.commitTxFeerate +: common1.remoteChanges.all.collect { case f: UpdateFee => f.feeratePerKw } + remoteFeeratePerKw.find(feerate => feeConf.feerateToleranceFor(remoteNodeId).isFeeDiffTooHigh(params.channelType, localFeeratePerKw, feerate)) match { + case Some(feerate) => return Left(FeerateTooDifferent(channelId, localFeeratePerKw = localFeeratePerKw, remoteFeeratePerKw = feerate)) + case None => + } + + // we need to base the next current commitment on the last sig we sent, even if we didn't yet receive their revocation + val remoteCommit1 = commitment.nextRemoteCommit_opt.getOrElse(commitment.remoteCommit) + val reduced = CommitmentSpec.reduce(remoteCommit1.spec, common1.remoteChanges.acked, common1.localChanges.proposed) + // the HTLC we are about to create is outgoing, but from their point of view it is incoming + val outgoingHtlcs = reduced.htlcs.collect(DirectedHtlc.incoming) + + // note that the initiator pays the fee, so if sender != initiator, both sides will have to afford this payment + val fees = Transactions.commitTxTotalCost(params.remoteParams.dustLimit, reduced, params.commitmentFormat) + // the initiator needs to keep an extra buffer to be able to handle a x2 feerate increase and an additional htlc to avoid + // getting the channel stuck (see https://github.com/lightningnetwork/lightning-rfc/issues/728). + val funderFeeBuffer = Transactions.commitTxTotalCostMsat(params.remoteParams.dustLimit, reduced.copy(commitTxFeerate = reduced.commitTxFeerate * 2), params.commitmentFormat) + Transactions.htlcOutputFee(reduced.commitTxFeerate * 2, params.commitmentFormat) + // NB: increasing the feerate can actually remove htlcs from the commit tx (if they fall below the trim threshold) + // which may result in a lower commit tx fee; this is why we take the max of the two. + val missingForSender = reduced.toRemote - commitment.localChannelReserve(params) - (if (params.localParams.isInitiator) fees.max(funderFeeBuffer.truncateToSatoshi) else 0.sat) + val missingForReceiver = reduced.toLocal - commitment.remoteChannelReserve(params) - (if (params.localParams.isInitiator) 0.sat else fees) + if (missingForSender < 0.msat) { + return Left(InsufficientFunds(channelId, amount = cmd.amount, missing = -missingForSender.truncateToSatoshi, reserve = commitment.localChannelReserve(params), fees = if (params.localParams.isInitiator) fees else 0.sat)) + } else if (missingForReceiver < 0.msat) { + if (params.localParams.isInitiator) { + // receiver is not the channel initiator; it is ok if it can't maintain its channel_reserve for now, as long as its balance is increasing, which is the case if it is receiving a payment + } else { + return Left(RemoteCannotAffordFeesForNewHtlc(channelId, amount = cmd.amount, missing = -missingForReceiver.truncateToSatoshi, reserve = commitment.remoteChannelReserve(params), fees = fees)) + } + } + + // We apply local *and* remote restrictions, to ensure both peers are happy with the resulting number of HTLCs. + // NB: we need the `toSeq` because otherwise duplicate amountMsat would be removed (since outgoingHtlcs is a Set). + val htlcValueInFlight = outgoingHtlcs.toSeq.map(_.amountMsat).sum + val allowedHtlcValueInFlight = params.maxHtlcAmount + if (allowedHtlcValueInFlight < htlcValueInFlight) { + return Left(HtlcValueTooHighInFlight(channelId, maximum = allowedHtlcValueInFlight, actual = htlcValueInFlight)) + } + if (Seq(params.localParams.maxAcceptedHtlcs, params.remoteParams.maxAcceptedHtlcs).min < outgoingHtlcs.size) { + return Left(TooManyAcceptedHtlcs(channelId, maximum = Seq(params.localParams.maxAcceptedHtlcs, params.remoteParams.maxAcceptedHtlcs).min)) + } + + // If sending this htlc would overflow our dust exposure, we reject it. + val maxDustExposure = feeConf.feerateToleranceFor(remoteNodeId).dustTolerance.maxExposure + val localReduced = DustExposure.reduceForDustExposure(commitment.localCommit.spec, common1.localChanges.all, common1.remoteChanges.all) + val localDustExposureAfterAdd = DustExposure.computeExposure(localReduced, params.localParams.dustLimit, params.commitmentFormat) + if (localDustExposureAfterAdd > maxDustExposure) { + return Left(LocalDustHtlcExposureTooHigh(channelId, maxDustExposure, localDustExposureAfterAdd)) + } + val remoteReduced = DustExposure.reduceForDustExposure(remoteCommit1.spec, common1.remoteChanges.all, common1.localChanges.all) + val remoteDustExposureAfterAdd = DustExposure.computeExposure(remoteReduced, params.remoteParams.dustLimit, params.commitmentFormat) + if (remoteDustExposureAfterAdd > maxDustExposure) { + return Left(RemoteDustHtlcExposureTooHigh(channelId, maxDustExposure, remoteDustExposureAfterAdd)) + } + }) + + Right(copy(common = common1), add) } def receiveAdd(add: UpdateAddHtlc, feeConf: OnChainFeeConf): Either[ChannelException, MetaCommitments] = { - sequence(all.map(_.receiveAdd(add, feeConf))) - .map { res: List[Commitments] => (res.head.common, res.map(_.commitment)) } - .map { case (common, commitments) => this.copy(common = common, commitments = commitments) } - } + if (add.id != common.remoteNextHtlcId) { + return Left(UnexpectedHtlcId(channelId, expected = common.remoteNextHtlcId, actual = add.id)) + } - def sendFulfill(cmd: CMD_FULFILL_HTLC): Either[ChannelException, (MetaCommitments, UpdateFulfillHtlc)] = { - sequence(all.map(_.sendFulfill(cmd))) - .map { res: List[(Commitments, UpdateFulfillHtlc)] => (res.head._1.common, res.map(_._1.commitment), res.head._2) } - .map { case (common, commitments, fulfill) => (this.copy(common = common, commitments = commitments), fulfill) } - } + // we used to not enforce a strictly positive minimum, hence the max(1 msat) + val htlcMinimum = params.localParams.htlcMinimum.max(1 msat) + if (add.amountMsat < htlcMinimum) { + return Left(HtlcValueTooSmall(channelId, minimum = htlcMinimum, actual = add.amountMsat)) + } - def receiveFulfill(fulfill: UpdateFulfillHtlc): Either[ChannelException, (MetaCommitments, Origin, UpdateAddHtlc)] = { - sequence(all.map(_.receiveFulfill(fulfill))) - .map { res: List[(Commitments, Origin, UpdateAddHtlc)] => (res.head._1.common, res.map(_._1.commitment), res.head._2, res.head._3) } - .map { case (common, commitments, origin, add) => (this.copy(common = common, commitments = commitments), origin, add) } - } + val common1 = common.addRemoteProposal(add).copy(remoteNextHtlcId = common.remoteNextHtlcId + 1) + + // let's compute the current commitment *as seen by us* including this change + commitments.foreach(commitment => { + // we allowed mismatches between our feerates and our remote's as long as commitments didn't contain any HTLC at risk + // we need to verify that we're not disagreeing on feerates anymore before accepting new HTLCs + // NB: there may be a pending update_fee that hasn't been applied yet that needs to be taken into account + val localFeeratePerKw = feeConf.getCommitmentFeerate(remoteNodeId, params.channelType, commitment.capacity, None) + val remoteFeeratePerKw = commitment.localCommit.spec.commitTxFeerate +: common1.remoteChanges.all.collect { case f: UpdateFee => f.feeratePerKw } + remoteFeeratePerKw.find(feerate => feeConf.feerateToleranceFor(remoteNodeId).isFeeDiffTooHigh(params.channelType, localFeeratePerKw, feerate)) match { + case Some(feerate) => return Left(FeerateTooDifferent(channelId, localFeeratePerKw = localFeeratePerKw, remoteFeeratePerKw = feerate)) + case None => + } + + val reduced = CommitmentSpec.reduce(commitment.localCommit.spec, common1.localChanges.acked, common1.remoteChanges.proposed) + val incomingHtlcs = reduced.htlcs.collect(DirectedHtlc.incoming) + + // note that the initiator pays the fee, so if sender != initiator, both sides will have to afford this payment + val fees = Transactions.commitTxTotalCost(params.remoteParams.dustLimit, reduced, params.commitmentFormat) + // NB: we don't enforce the funderFeeReserve (see sendAdd) because it would confuse a remote initiator that doesn't have this mitigation in place + // We could enforce it once we're confident a large portion of the network implements it. + val missingForSender = reduced.toRemote - commitment.remoteChannelReserve(params) - (if (params.localParams.isInitiator) 0.sat else fees) + val missingForReceiver = reduced.toLocal - commitment.localChannelReserve(params) - (if (params.localParams.isInitiator) fees else 0.sat) + if (missingForSender < 0.sat) { + return Left(InsufficientFunds(channelId, amount = add.amountMsat, missing = -missingForSender.truncateToSatoshi, reserve = commitment.remoteChannelReserve(params), fees = if (params.localParams.isInitiator) 0.sat else fees)) + } else if (missingForReceiver < 0.sat) { + if (params.localParams.isInitiator) { + return Left(CannotAffordFees(channelId, missing = -missingForReceiver.truncateToSatoshi, reserve = commitment.localChannelReserve(params), fees = fees)) + } else { + // receiver is not the channel initiator; it is ok if it can't maintain its channel_reserve for now, as long as its balance is increasing, which is the case if it is receiving a payment + } + } + + // NB: we need the `toSeq` because otherwise duplicate amountMsat would be removed (since incomingHtlcs is a Set). + val htlcValueInFlight = incomingHtlcs.toSeq.map(_.amountMsat).sum + if (params.localParams.maxHtlcValueInFlightMsat < htlcValueInFlight) { + return Left(HtlcValueTooHighInFlight(channelId, maximum = params.localParams.maxHtlcValueInFlightMsat, actual = htlcValueInFlight)) + } + + if (incomingHtlcs.size > params.localParams.maxAcceptedHtlcs) { + return Left(TooManyAcceptedHtlcs(channelId, maximum = params.localParams.maxAcceptedHtlcs)) + } + }) - def sendFail(cmd: CMD_FAIL_HTLC, nodeSecret: PrivateKey): Either[ChannelException, (MetaCommitments, HtlcFailureMessage)] = { - sequence(all.map(_.sendFail(cmd, nodeSecret))) - .map { res: List[(Commitments, HtlcFailureMessage)] => (res.head._1.common, res.map(_._1.commitment), res.head._2) } - .map { case (common, commitments, fail) => (this.copy(common = common, commitments = commitments), fail) } + Right(copy(common = common1)) } + def sendFulfill(cmd: CMD_FULFILL_HTLC): Either[ChannelException, (MetaCommitments, UpdateFulfillHtlc)] = + getIncomingHtlcCrossSigned(cmd.id) match { + case Some(htlc) if Common.alreadyProposed(common.localChanges.proposed, htlc.id) => + // we have already sent a fail/fulfill for this htlc + Left(UnknownHtlcId(channelId, cmd.id)) + case Some(htlc) if htlc.paymentHash == Crypto.sha256(cmd.r) => + val fulfill = UpdateFulfillHtlc(channelId, cmd.id, cmd.r) + val common1 = common.addLocalProposal(fulfill) + payment.Monitoring.Metrics.recordIncomingPaymentDistribution(params.remoteNodeId, htlc.amountMsat) + Right((copy(common = common1), fulfill)) + case Some(_) => Left(InvalidHtlcPreimage(channelId, cmd.id)) + case None => Left(UnknownHtlcId(channelId, cmd.id)) + } + + def receiveFulfill(fulfill: UpdateFulfillHtlc): Either[ChannelException, (MetaCommitments, Origin, UpdateAddHtlc)] = + getOutgoingHtlcCrossSigned(fulfill.id) match { + case Some(htlc) if htlc.paymentHash == Crypto.sha256(fulfill.paymentPreimage) => common.originChannels.get(fulfill.id) match { + case Some(origin) => + payment.Monitoring.Metrics.recordOutgoingPaymentDistribution(params.remoteNodeId, htlc.amountMsat) + val common1 = common.addRemoteProposal(fulfill) + Right(copy(common = common1), origin, htlc) + case None => Left(UnknownHtlcId(channelId, fulfill.id)) + } + case Some(_) => Left(InvalidHtlcPreimage(channelId, fulfill.id)) + case None => Left(UnknownHtlcId(channelId, fulfill.id)) + } + + def sendFail(cmd: CMD_FAIL_HTLC, nodeSecret: PrivateKey): Either[ChannelException, (MetaCommitments, HtlcFailureMessage)] = + getIncomingHtlcCrossSigned(cmd.id) match { + case Some(htlc) if Common.alreadyProposed(common.localChanges.proposed, htlc.id) => + // we have already sent a fail/fulfill for this htlc + Left(UnknownHtlcId(channelId, cmd.id)) + case Some(htlc) => + // we need the shared secret to build the error packet + OutgoingPaymentPacket.buildHtlcFailure(nodeSecret, cmd, htlc).map(fail => (copy(common = common.addLocalProposal(fail)), fail)) + case None => Left(UnknownHtlcId(channelId, cmd.id)) + } + def sendFailMalformed(cmd: CMD_FAIL_MALFORMED_HTLC): Either[ChannelException, (MetaCommitments, UpdateFailMalformedHtlc)] = { - sequence(all.map(_.sendFailMalformed(cmd))) - .map { res: List[(Commitments, UpdateFailMalformedHtlc)] => (res.head._1.common, res.map(_._1.commitment), res.head._2) } - .map { case (common, commitments, fail) => (this.copy(common = common, commitments = commitments), fail) } + // BADONION bit must be set in failure_code + if ((cmd.failureCode & FailureMessageCodecs.BADONION) == 0) { + Left(InvalidFailureCode(channelId)) + } else { + getIncomingHtlcCrossSigned(cmd.id) match { + case Some(htlc) if Common.alreadyProposed(common.localChanges.proposed, htlc.id) => + // we have already sent a fail/fulfill for this htlc + Left(UnknownHtlcId(channelId, cmd.id)) + case Some(_) => + val fail = UpdateFailMalformedHtlc(channelId, cmd.id, cmd.onionHash, cmd.failureCode) + val common1 = common.addLocalProposal(fail) + Right((copy(common = common1), fail)) + case None => Left(UnknownHtlcId(channelId, cmd.id)) + } + } } - def receiveFail(fail: UpdateFailHtlc): Either[ChannelException, (MetaCommitments, Origin, UpdateAddHtlc)] = { - sequence(all.map(_.receiveFail(fail))) - .map { res: List[(Commitments, Origin, UpdateAddHtlc)] => (res.head._1.common, res.map(_._1.commitment), res.head._2, res.head._3) } - .map { case (common, commitments, origin, fail) => (this.copy(common = common, commitments = commitments), origin, fail) } - } + def receiveFail(fail: UpdateFailHtlc): Either[ChannelException, (MetaCommitments, Origin, UpdateAddHtlc)] = + getOutgoingHtlcCrossSigned(fail.id) match { + case Some(htlc) => common.originChannels.get(fail.id) match { + case Some(origin) => Right(copy(common = common.addRemoteProposal(fail)), origin, htlc) + case None => Left(UnknownHtlcId(channelId, fail.id)) + } + case None => Left(UnknownHtlcId(channelId, fail.id)) + } def receiveFailMalformed(fail: UpdateFailMalformedHtlc): Either[ChannelException, (MetaCommitments, Origin, UpdateAddHtlc)] = { - sequence(all.map(_.receiveFailMalformed(fail))) - .map { res: List[(Commitments, Origin, UpdateAddHtlc)] => (res.head._1.common, res.map(_._1.commitment), res.head._2, res.head._3) } - .map { case (common, commitments, origin, fail) => (this.copy(common = common, commitments = commitments), origin, fail) } + // A receiving node MUST fail the channel if the BADONION bit in failure_code is not set for update_fail_malformed_htlc. + if ((fail.failureCode & FailureMessageCodecs.BADONION) == 0) { + Left(InvalidFailureCode(channelId)) + } else { + getOutgoingHtlcCrossSigned(fail.id) match { + case Some(htlc) => common.originChannels.get(fail.id) match { + case Some(origin) => Right(copy(common = common.addRemoteProposal(fail)), origin, htlc) + case None => Left(UnknownHtlcId(channelId, fail.id)) + } + case None => Left(UnknownHtlcId(channelId, fail.id)) + } + } } def sendFee(cmd: CMD_UPDATE_FEE, feeConf: OnChainFeeConf): Either[ChannelException, (MetaCommitments, UpdateFee)] = { - sequence(all.map(_.sendFee(cmd, feeConf))) - .map { res: List[(Commitments, UpdateFee)] => (res.head._1.common, res.map(_._1.commitment), res.head._2) } - .map { case (common, commitments, fee) => (this.copy(common = common, commitments = commitments), fee) } + if (!params.localParams.isInitiator) { + Left(NonInitiatorCannotSendUpdateFee(channelId)) + } else { + // let's compute the current commitment *as seen by them* with this change taken into account + val fee = UpdateFee(channelId, cmd.feeratePerKw) + // update_fee replace each other, so we can remove previous ones + val common1 = common.copy(localChanges = common.localChanges.copy(proposed = common.localChanges.proposed.filterNot(_.isInstanceOf[UpdateFee]) :+ fee)) + commitments.foreach(commitment => { + val reduced = CommitmentSpec.reduce(commitment.remoteCommit.spec, common1.remoteChanges.acked, common1.localChanges.proposed) + // a node cannot spend pending incoming htlcs, and need to keep funds above the reserve required by the counterparty, after paying the fee + // we look from remote's point of view, so if local is initiator remote doesn't pay the fees + val fees = Transactions.commitTxTotalCost(params.remoteParams.dustLimit, reduced, params.commitmentFormat) + val missing = reduced.toRemote.truncateToSatoshi - commitment.localChannelReserve(params) - fees + if (missing < 0.sat) { + return Left(CannotAffordFees(channelId, missing = -missing, reserve = commitment.localChannelReserve(params), fees = fees)) + } + // if we would overflow our dust exposure with the new feerate, we avoid sending this fee update + if (feeConf.feerateToleranceFor(remoteNodeId).dustTolerance.closeOnUpdateFeeOverflow) { + val maxDustExposure = feeConf.feerateToleranceFor(remoteNodeId).dustTolerance.maxExposure + // this is the commitment as it would be if our update_fee was immediately signed by both parties (it is only an + // estimate because there can be concurrent updates) + val localReduced = DustExposure.reduceForDustExposure(commitment.localCommit.spec, common1.localChanges.all, common1.remoteChanges.all) + val localDustExposureAfterFeeUpdate = DustExposure.computeExposure(localReduced, cmd.feeratePerKw, params.localParams.dustLimit, params.commitmentFormat) + if (localDustExposureAfterFeeUpdate > maxDustExposure) { + return Left(LocalDustHtlcExposureTooHigh(channelId, maxDustExposure, localDustExposureAfterFeeUpdate)) + } + val remoteReduced = DustExposure.reduceForDustExposure(commitment.remoteCommit.spec, common1.remoteChanges.all, common1.localChanges.all) + val remoteDustExposureAfterFeeUpdate = DustExposure.computeExposure(remoteReduced, cmd.feeratePerKw, params.remoteParams.dustLimit, params.commitmentFormat) + if (remoteDustExposureAfterFeeUpdate > maxDustExposure) { + return Left(RemoteDustHtlcExposureTooHigh(channelId, maxDustExposure, remoteDustExposureAfterFeeUpdate)) + } + } + }) + Right(copy(common = common1), fee) + } } def receiveFee(fee: UpdateFee, feeConf: OnChainFeeConf)(implicit log: LoggingAdapter): Either[ChannelException, MetaCommitments] = { - sequence(all.map(_.receiveFee(fee, feeConf))) - .map { res: List[Commitments] => (res.head.common, res.map(_.commitment)) } - .map { case (common, commitments) => this.copy(common = common, commitments = commitments) } + if (params.localParams.isInitiator) { + Left(NonInitiatorCannotSendUpdateFee(channelId)) + } else if (fee.feeratePerKw < FeeratePerKw.MinimumFeeratePerKw) { + Left(FeerateTooSmall(channelId, remoteFeeratePerKw = fee.feeratePerKw)) + } else { + Metrics.RemoteFeeratePerKw.withoutTags().record(fee.feeratePerKw.toLong) + // let's compute the current commitment *as seen by us* including this change + // update_fee replace each other, so we can remove previous ones + val common1 = common.copy(remoteChanges = common.remoteChanges.copy(proposed = common.remoteChanges.proposed.filterNot(_.isInstanceOf[UpdateFee]) :+ fee)) + commitments.foreach(commitment => { + val localFeeratePerKw = feeConf.getCommitmentFeerate(remoteNodeId, params.channelType, commitment.capacity, None) + log.info("remote feeratePerKw={}, local feeratePerKw={}, ratio={}", fee.feeratePerKw, localFeeratePerKw, fee.feeratePerKw.toLong.toDouble / localFeeratePerKw.toLong) + if (feeConf.feerateToleranceFor(remoteNodeId).isFeeDiffTooHigh(params.channelType, localFeeratePerKw, fee.feeratePerKw) && commitment.hasPendingOrProposedHtlcs(common)) { + return Left(FeerateTooDifferent(channelId, localFeeratePerKw = localFeeratePerKw, remoteFeeratePerKw = fee.feeratePerKw)) + } else { + // NB: we check that the initiator can afford this new fee even if spec allows to do it at next signature + // It is easier to do it here because under certain (race) conditions spec allows a lower-than-normal fee to be paid, + // and it would be tricky to check if the conditions are met at signing + // (it also means that we need to check the fee of the initial commitment tx somewhere) + val reduced = CommitmentSpec.reduce(commitment.localCommit.spec, common1.localChanges.acked, common1.remoteChanges.proposed) + // a node cannot spend pending incoming htlcs, and need to keep funds above the reserve required by the counterparty, after paying the fee + val fees = Transactions.commitTxTotalCost(params.localParams.dustLimit, reduced, params.commitmentFormat) + val missing = reduced.toRemote.truncateToSatoshi - commitment.remoteChannelReserve(params) - fees + if (missing < 0.sat) { + return Left(CannotAffordFees(channelId, missing = -missing, reserve = commitment.remoteChannelReserve(params), fees = fees)) + } + // if we would overflow our dust exposure with the new feerate, we reject this fee update + if (feeConf.feerateToleranceFor(remoteNodeId).dustTolerance.closeOnUpdateFeeOverflow) { + val maxDustExposure = feeConf.feerateToleranceFor(remoteNodeId).dustTolerance.maxExposure + val localReduced = DustExposure.reduceForDustExposure(commitment.localCommit.spec, common1.localChanges.all, common1.remoteChanges.all) + val localDustExposureAfterFeeUpdate = DustExposure.computeExposure(localReduced, fee.feeratePerKw, params.localParams.dustLimit, params.commitmentFormat) + if (localDustExposureAfterFeeUpdate > maxDustExposure) { + return Left(LocalDustHtlcExposureTooHigh(channelId, maxDustExposure, localDustExposureAfterFeeUpdate)) + } + // this is the commitment as it would be if their update_fee was immediately signed by both parties (it is only an + // estimate because there can be concurrent updates) + val remoteReduced = DustExposure.reduceForDustExposure(commitment.remoteCommit.spec, common1.remoteChanges.all, common1.localChanges.all) + val remoteDustExposureAfterFeeUpdate = DustExposure.computeExposure(remoteReduced, fee.feeratePerKw, params.remoteParams.dustLimit, params.commitmentFormat) + if (remoteDustExposureAfterFeeUpdate > maxDustExposure) { + return Left(RemoteDustHtlcExposureTooHigh(channelId, maxDustExposure, remoteDustExposureAfterFeeUpdate)) + } + } + } + }) + Right(copy(common = common1)) + } } - /** We need to send signatures for each commitments. */ def sendCommit(keyManager: ChannelKeyManager)(implicit log: LoggingAdapter): Either[ChannelException, (MetaCommitments, CommitSig)] = { - sequence(all.map(_.sendCommit(keyManager))) - .map { res: List[(Commitments, CommitSig)] => - val tlv = AlternativeCommitSigsTlv(res.foldLeft(List.empty[AlternativeCommitSig]) { - case (sigs, (commitments, commitSig)) => AlternativeCommitSig(commitments.fundingTxId, commitSig.signature, commitSig.htlcSignatures) +: sigs - }) - // we set all commit_sigs as tlv of the first commit_sig (the first sigs will be duplicated) - val commitSig = res.head._2.modify(_.tlvStream.records).usingIf(tlv.commitSigs.size > 1)(tlv +: _.toList) - val common = res.head._1.common - val commitments = res.map(_._1.commitment) - (this.copy(common = common, commitments = commitments), commitSig) - } - } - - def receiveCommit(sig: CommitSig, keyManager: ChannelKeyManager)(implicit log: LoggingAdapter): Either[ChannelException, (MetaCommitments, RevokeAndAck)] = { - val sigs: Map[ByteVector32, CommitSig] = sig.alternativeCommitSigs match { - case Nil => Map(all.head.fundingTxId -> sig) // no alternative sigs: we use the commit_sig message as-is, we assume it is for the first commitments - case alternativeCommitSig => + common.remoteNextCommitInfo match { + case Right(_) if !common.localHasChanges => + Left(CannotSignWithoutChanges(channelId)) + case Right(remoteNextPerCommitmentPoint) => + val (commitments1, commitSigs) = commitments.map(c => { + // remote commitment will includes all local changes + remote acked changes + val spec = CommitmentSpec.reduce(c.remoteCommit.spec, common.remoteChanges.acked, common.localChanges.proposed) + val (remoteCommitTx, htlcTxs) = Commitment.makeRemoteTxs(keyManager, params.channelConfig, params.channelFeatures, c.remoteCommit.index + 1, params.localParams, params.remoteParams, c.commitInput, remoteNextPerCommitmentPoint, spec) + val sig = keyManager.sign(remoteCommitTx, keyManager.fundingPublicKey(params.localParams.fundingKeyPath), Transactions.TxOwner.Remote, params.commitmentFormat) + + val sortedHtlcTxs: Seq[TransactionWithInputInfo] = htlcTxs.sortBy(_.input.outPoint.index) + val channelKeyPath = keyManager.keyPath(params.localParams, params.channelConfig) + val htlcSigs = sortedHtlcTxs.map(keyManager.sign(_, keyManager.htlcPoint(channelKeyPath), remoteNextPerCommitmentPoint, Transactions.TxOwner.Remote, params.commitmentFormat)) + + // NB: IN/OUT htlcs are inverted because this is the remote commit + log.info(s"built remote commit number=${c.remoteCommit.index + 1} toLocalMsat=${spec.toLocal.toLong} toRemoteMsat=${spec.toRemote.toLong} htlc_in={} htlc_out={} feeratePerKw=${spec.commitTxFeerate} txid=${remoteCommitTx.tx.txid} tx={}", spec.htlcs.collect(DirectedHtlc.outgoing).map(_.id).mkString(","), spec.htlcs.collect(DirectedHtlc.incoming).map(_.id).mkString(","), remoteCommitTx.tx) + Metrics.recordHtlcsInFlight(spec, c.remoteCommit.spec) + + val commitment1 = c.copy(nextRemoteCommit_opt = Some(RemoteCommit(c.remoteCommit.index + 1, spec, remoteCommitTx.tx.txid, remoteNextPerCommitmentPoint))) + (commitment1, AlternativeCommitSig(commitment1.fundingTxId, sig, htlcSigs.toList)) + }).unzip + val commitSig = if (commitments1.size > 1) { + // we set all commit_sigs as tlv of the first commit_sig (the first sigs will be duplicated) + CommitSig(channelId, commitSigs.head.signature, commitSigs.head.htlcSignatures, TlvStream(AlternativeCommitSigsTlv(commitSigs))) + } else { + CommitSig(channelId, commitSigs.head.signature, commitSigs.head.htlcSignatures) + } + val metaCommitments1 = copy( + common = common.copy( + localChanges = common.localChanges.copy(proposed = Nil, signed = common.localChanges.proposed), + remoteChanges = common.remoteChanges.copy(acked = Nil, signed = common.remoteChanges.acked), + remoteNextCommitInfo = Left(WaitForRev(commitSig, common.localCommitIndex)), + ), + commitments = commitments1, + ) + Right(metaCommitments1, commitSig) + case Left(_) => + Left(CannotSignBeforeRevocation(channelId)) + } + } + + def receiveCommit(commit: CommitSig, keyManager: ChannelKeyManager)(implicit log: LoggingAdapter): Either[ChannelException, (MetaCommitments, RevokeAndAck)] = { + // they sent us a signature for *their* view of *our* next commit tx + // so in terms of rev.hashes and indexes we have: + // ourCommit.index -> our current revocation hash, which is about to become our old revocation hash + // ourCommit.index + 1 -> our next revocation hash, used by *them* to build the sig we've just received, and which + // is about to become our current revocation hash + // ourCommit.index + 2 -> which is about to become our next revocation hash + // we will reply to this sig with our old revocation hash preimage (at index) and our next revocation hash (at index + 1) + // and will increment our index + + // lnd sometimes sends a new signature without any changes, which is a (harmless) spec violation + if (!common.remoteHasChanges) { + // throw CannotSignWithoutChanges(channelId) + log.warning("received a commit sig with no changes (probably coming from lnd)") + } + + val sigs: Map[ByteVector32, CommitSig] = commit.alternativeCommitSigs match { + case Nil => Map(commitments.head.fundingTxId -> commit) // no alternative sigs: we use the commit_sig message as-is, we assume it is for the first commitments + case alternativeCommitSigs => // if there are alternative sigs, then we expand the sigs to build n individual commit_sig that we will apply to the corresponding commitments - alternativeCommitSig.map { altSig => - altSig.fundingTxId -> sig + alternativeCommitSigs.map { altSig => + altSig.fundingTxId -> commit .modify(_.signature).setTo(altSig.signature) .modify(_.htlcSignatures).setTo(altSig.htlcSignatures) .modify(_.tlvStream.records).using(_.filterNot(_.isInstanceOf[AlternativeCommitSigsTlv])) }.toMap } - sequence(all.map(c => c.receiveCommit(sigs(c.fundingTxId), keyManager))) - .map { res: List[(Commitments, RevokeAndAck)] => (res.head._1.common, res.map(_._1.commitment), res.head._2) } - .map { case (common, commitments, rev) => (this.copy(common = common, commitments = commitments), rev) } + + val channelKeyPath = keyManager.keyPath(params.localParams, params.channelConfig) + val commitments1 = commitments.map(c => { + val spec = CommitmentSpec.reduce(c.localCommit.spec, common.localChanges.acked, common.remoteChanges.proposed) + val localPerCommitmentPoint = keyManager.commitmentPoint(channelKeyPath, c.localCommit.index + 1) + val (localCommitTx, htlcTxs) = Commitment.makeLocalTxs(keyManager, params.channelConfig, params.channelFeatures, c.localCommit.index + 1, params.localParams, params.remoteParams, c.commitInput, localPerCommitmentPoint, spec) + sigs.get(c.fundingTxId) match { + case Some(sig) => + log.info(s"built local commit number=${c.localCommit.index + 1} toLocalMsat=${spec.toLocal.toLong} toRemoteMsat=${spec.toRemote.toLong} htlc_in={} htlc_out={} feeratePerKw=${spec.commitTxFeerate} txid=${localCommitTx.tx.txid} tx={}", spec.htlcs.collect(DirectedHtlc.incoming).map(_.id).mkString(","), spec.htlcs.collect(DirectedHtlc.outgoing).map(_.id).mkString(","), localCommitTx.tx) + if (!Transactions.checkSig(localCommitTx, sig.signature, params.remoteParams.fundingPubKey, Transactions.TxOwner.Remote, params.commitmentFormat)) { + return Left(InvalidCommitmentSignature(channelId, localCommitTx.tx.txid)) + } + + val sortedHtlcTxs: Seq[Transactions.HtlcTx] = htlcTxs.sortBy(_.input.outPoint.index) + if (sig.htlcSignatures.size != sortedHtlcTxs.size) { + return Left(HtlcSigCountMismatch(channelId, sortedHtlcTxs.size, sig.htlcSignatures.size)) + } + + val remoteHtlcPubkey = Generators.derivePubKey(params.remoteParams.htlcBasepoint, localPerCommitmentPoint) + val htlcTxsAndRemoteSigs = sortedHtlcTxs.zip(sig.htlcSignatures).toList.map { + case (htlcTx: Transactions.HtlcTx, remoteSig) => + if (!Transactions.checkSig(htlcTx, remoteSig, remoteHtlcPubkey, Transactions.TxOwner.Remote, params.commitmentFormat)) { + return Left(InvalidHtlcSignature(channelId, htlcTx.tx.txid)) + } + HtlcTxAndRemoteSig(htlcTx, remoteSig) + } + + // update our commitment data + c.copy(localCommit = LocalCommit( + index = c.localCommit.index + 1, + spec, + commitTxAndRemoteSig = CommitTxAndRemoteSig(localCommitTx, sig.signature), + htlcTxsAndRemoteSigs = htlcTxsAndRemoteSigs + )) + case None => + return Left(InvalidCommitmentSignature(channelId, localCommitTx.tx.txid)) + } + }) + + // we will send our revocation preimage + our next revocation hash + val localPerCommitmentSecret = keyManager.commitmentSecret(channelKeyPath, common.localCommitIndex) + val localNextPerCommitmentPoint = keyManager.commitmentPoint(channelKeyPath, common.localCommitIndex + 2) + val revocation = RevokeAndAck( + channelId = channelId, + perCommitmentSecret = localPerCommitmentSecret, + nextPerCommitmentPoint = localNextPerCommitmentPoint + ) + + val metaCommitments1 = copy( + common = common.copy( + localChanges = common.localChanges.copy(acked = Nil), + remoteChanges = common.remoteChanges.copy(proposed = Nil, acked = common.remoteChanges.acked ++ common.remoteChanges.proposed), + localCommitIndex = common.localCommitIndex + 1, + ), + commitments = commitments1 + ) + + Right(metaCommitments1, revocation) } def receiveRevocation(revocation: RevokeAndAck, maxDustExposure: Satoshi): Either[ChannelException, (MetaCommitments, Seq[PostRevocationAction])] = { - sequence(all.map(c => c.receiveRevocation(revocation, maxDustExposure))) - .map { res: List[(Commitments, Seq[PostRevocationAction])] => (res.head._1.common, res.map(_._1.commitment), res.head._2) } - .map { case (common, commitments, actions) => (this.copy(common = common, commitments = commitments), actions) } + // we receive a revocation because we just sent them a sig for their next commit tx + common.remoteNextCommitInfo match { + case Left(_) if revocation.perCommitmentSecret.publicKey != commitments.head.remoteCommit.remotePerCommitmentPoint => + Left(InvalidRevocation(channelId)) + case Left(_) => + // NB: we are supposed to keep nextRemoteCommit_opt consistent with remoteNextCommitInfo: this should exist. + val theirFirstNextCommitSpec = commitments.head.nextRemoteCommit_opt.get.spec + // Since htlcs are shared across all commitments, we generate the actions only once based on the first commitment. + val receivedHtlcs = common.remoteChanges.signed.collect { + // we forward adds downstream only when they have been committed by both sides + // it always happen when we receive a revocation, because they send the add, then they sign it, then we sign it + case add: UpdateAddHtlc => add + } + val failedHtlcs = common.remoteChanges.signed.collect { + // same for fails: we need to make sure that they are in neither commitment before propagating the fail upstream + case fail: UpdateFailHtlc => + val origin = common.originChannels(fail.id) + val add = commitments.head.remoteCommit.spec.findIncomingHtlcById(fail.id).map(_.add).get + RES_ADD_SETTLED(origin, add, HtlcResult.RemoteFail(fail)) + // same as above + case fail: UpdateFailMalformedHtlc => + val origin = common.originChannels(fail.id) + val add = commitments.head.remoteCommit.spec.findIncomingHtlcById(fail.id).map(_.add).get + RES_ADD_SETTLED(origin, add, HtlcResult.RemoteFailMalformed(fail)) + } + val (acceptedHtlcs, rejectedHtlcs) = { + // the received htlcs have already been added to commitments (they've been signed by our peer), and may already + // overflow our dust exposure (we cannot prevent them from adding htlcs): we artificially remove them before + // deciding which we'll keep and relay and which we'll fail without relaying. + val localSpecWithoutNewHtlcs = commitments.head.localCommit.spec.copy(htlcs = commitments.head.localCommit.spec.htlcs.filter { + case IncomingHtlc(add) if receivedHtlcs.contains(add) => false + case _ => true + }) + val remoteSpecWithoutNewHtlcs = theirFirstNextCommitSpec.copy(htlcs = theirFirstNextCommitSpec.htlcs.filter { + case OutgoingHtlc(add) if receivedHtlcs.contains(add) => false + case _ => true + }) + val localReduced = DustExposure.reduceForDustExposure(localSpecWithoutNewHtlcs, common.localChanges.all, common.remoteChanges.acked) + val localCommitDustExposure = DustExposure.computeExposure(localReduced, params.localParams.dustLimit, params.commitmentFormat) + val remoteReduced = DustExposure.reduceForDustExposure(remoteSpecWithoutNewHtlcs, common.remoteChanges.acked, common.localChanges.all) + val remoteCommitDustExposure = DustExposure.computeExposure(remoteReduced, params.remoteParams.dustLimit, params.commitmentFormat) + // we sort incoming htlcs by decreasing amount: we want to prioritize higher amounts. + val sortedReceivedHtlcs = receivedHtlcs.sortBy(_.amountMsat).reverse + DustExposure.filterBeforeForward( + maxDustExposure, + localReduced, + params.localParams.dustLimit, + localCommitDustExposure, + remoteReduced, + params.remoteParams.dustLimit, + remoteCommitDustExposure, + sortedReceivedHtlcs, + params.commitmentFormat) + } + val actions = acceptedHtlcs.map(add => PostRevocationAction.RelayHtlc(add)) ++ + rejectedHtlcs.map(add => PostRevocationAction.RejectHtlc(add)) ++ + failedHtlcs.map(res => PostRevocationAction.RelayFailure(res)) + // the outgoing following htlcs have been completed (fulfilled or failed) when we received this revocation + // they have been removed from both local and remote commitment + // (since fulfill/fail are sent by remote, they are (1) signed by them, (2) revoked by us, (3) signed by us, (4) revoked by them + val completedOutgoingHtlcs = commitments.head.remoteCommit.spec.htlcs.collect(DirectedHtlc.incoming).map(_.id) -- theirFirstNextCommitSpec.htlcs.collect(DirectedHtlc.incoming).map(_.id) + // we remove the newly completed htlcs from the origin map + val originChannels1 = common.originChannels -- completedOutgoingHtlcs + val commitments1 = commitments.map(c => c.copy( + remoteCommit = c.nextRemoteCommit_opt.get, + nextRemoteCommit_opt = None, + )) + val metaCommitments1 = copy( + common = common.copy( + localChanges = common.localChanges.copy(signed = Nil, acked = common.localChanges.acked ++ common.localChanges.signed), + remoteChanges = common.remoteChanges.copy(signed = Nil), + remoteCommitIndex = common.remoteCommitIndex + 1, + remoteNextCommitInfo = Right(revocation.nextPerCommitmentPoint), + remotePerCommitmentSecrets = common.remotePerCommitmentSecrets.addHash(revocation.perCommitmentSecret.value, 0xFFFFFFFFFFFFL - common.remoteCommitIndex), + originChannels = originChannels1 + ), + commitments = commitments1, + ) + Right(metaCommitments1, actions) + case Right(_) => + Left(UnexpectedRevocation(channelId)) + } } - def discardUnsignedUpdates(implicit log: LoggingAdapter): MetaCommitments = { + def discardUnsignedUpdates()(implicit log: LoggingAdapter): MetaCommitments = { this.copy(common = common.discardUnsignedUpdates()) } - def localHasChanges: Boolean = main.localHasChanges + def validateSeed(keyManager: ChannelKeyManager): Boolean = { + val localFundingKey = keyManager.fundingPublicKey(params.localParams.fundingKeyPath).publicKey + val remoteFundingKey = params.remoteParams.fundingPubKey + val fundingScript = Script.write(Scripts.multiSig2of2(localFundingKey, remoteFundingKey)) + commitments.forall(_.commitInput.redeemScript == fundingScript) + } } @@ -203,4 +973,13 @@ object MetaCommitments { common = commitments.common, commitments = commitments.commitment +: Nil ) + + // @formatter:off + sealed trait PostRevocationAction + object PostRevocationAction { + case class RelayHtlc(incomingHtlc: UpdateAddHtlc) extends PostRevocationAction + case class RejectHtlc(incomingHtlc: UpdateAddHtlc) extends PostRevocationAction + case class RelayFailure(result: RES_ADD_SETTLED[Origin, HtlcResult]) extends PostRevocationAction + } + // @formatter:on } \ No newline at end of file diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/channel/fsm/Channel.scala b/eclair-core/src/main/scala/fr/acinq/eclair/channel/fsm/Channel.scala index 6c0ba723a9..33c9e0fe86 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/channel/fsm/Channel.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/channel/fsm/Channel.scala @@ -29,7 +29,7 @@ import fr.acinq.eclair.blockchain._ import fr.acinq.eclair.blockchain.bitcoind.ZmqWatcher import fr.acinq.eclair.blockchain.bitcoind.ZmqWatcher._ import fr.acinq.eclair.blockchain.bitcoind.rpc.BitcoinCoreClient -import fr.acinq.eclair.channel.Commitments.PostRevocationAction +import fr.acinq.eclair.channel.MetaCommitments.PostRevocationAction import fr.acinq.eclair.channel.Helpers.Syncing.SyncResult import fr.acinq.eclair.channel.Helpers.{Closing, Syncing, getRelayFees, scidForChannelUpdate} import fr.acinq.eclair.channel.Monitoring.Metrics.ProcessMessage @@ -436,7 +436,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val case Event(c: CMD_SIGN, d: DATA_NORMAL) => d.commitments.remoteNextCommitInfo match { - case _ if !d.metaCommitments.localHasChanges => + case _ if !d.metaCommitments.common.localHasChanges => log.debug("ignoring CMD_SIGN (nothing to sign)") stay() case Right(_) => @@ -464,7 +464,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val handleCommandSuccess(c, d.copy(metaCommitments = metaCommitments1)).storing().sending(commit).acking(metaCommitments1.main.localChanges.signed) case Left(cause) => handleCommandError(cause, c) } - case Left(waitForRevocation) => + case Left(_) => log.debug("already in the process of signing, will sign again as soon as possible") stay() } @@ -473,7 +473,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val d.metaCommitments.receiveCommit(commit, keyManager) match { case Right((metaCommitments1, revocation)) => log.debug("received a new sig, spec:\n{}", metaCommitments1.main.specs2String) - if (metaCommitments1.main.localHasChanges) { + if (metaCommitments1.common.localHasChanges) { // if we have newly acknowledged changes let's sign them self ! CMD_SIGN() } @@ -505,10 +505,10 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val log.debug("forwarding {} to relayer", result) relayer ! result } - if (metaCommitments1.main.localHasChanges) { + if (metaCommitments1.common.localHasChanges) { self ! CMD_SIGN() } - if (d.remoteShutdown.isDefined && !metaCommitments1.main.localHasUnsignedOutgoingHtlcs) { + if (d.remoteShutdown.isDefined && !metaCommitments1.common.localHasUnsignedOutgoingHtlcs) { // we were waiting for our pending htlcs to be signed before replying with our local shutdown val localShutdown = Shutdown(d.channelId, metaCommitments1.main.localParams.defaultFinalScriptPubKey) // note: it means that we had pending htlcs to sign, therefore we go to SHUTDOWN, not to NEGOTIATING @@ -523,15 +523,15 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val case Event(r: RevocationTimeout, d: DATA_NORMAL) => handleRevocationTimeout(r, d) case Event(c: CMD_CLOSE, d: DATA_NORMAL) => - d.commitments.getLocalShutdownScript(c.scriptPubKey) match { + d.metaCommitments.params.getLocalShutdownScript(c.scriptPubKey) match { case Left(e) => handleCommandError(e, c) case Right(localShutdownScript) => if (d.localShutdown.isDefined) { handleCommandError(ClosingAlreadyInProgress(d.channelId), c) - } else if (d.commitments.localHasUnsignedOutgoingHtlcs) { + } else if (d.metaCommitments.common.localHasUnsignedOutgoingHtlcs) { // NB: simplistic behavior, we could also sign-then-close handleCommandError(CannotCloseWithUnsignedOutgoingHtlcs(d.channelId), c) - } else if (d.commitments.localHasUnsignedOutgoingUpdateFee) { + } else if (d.metaCommitments.common.localHasUnsignedOutgoingUpdateFee) { handleCommandError(CannotCloseWithUnsignedOutgoingUpdateFee(d.channelId), c) } else { val shutdown = Shutdown(d.channelId, localShutdownScript) @@ -540,7 +540,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val } case Event(remoteShutdown@Shutdown(_, remoteScriptPubKey, _), d: DATA_NORMAL) => - d.commitments.getRemoteShutdownScript(remoteScriptPubKey) match { + d.metaCommitments.params.getRemoteShutdownScript(remoteScriptPubKey) match { case Left(e) => log.warning(s"they sent an invalid closing script: ${e.getMessage}") context.system.scheduler.scheduleOnce(2 second, peer, Peer.Disconnect(remoteNodeId)) @@ -560,11 +560,11 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val // we did not send a shutdown message // there are pending signed changes => go to SHUTDOWN // there are no htlcs => go to NEGOTIATING - if (d.commitments.remoteHasUnsignedOutgoingHtlcs) { + if (d.metaCommitments.common.remoteHasUnsignedOutgoingHtlcs) { handleLocalError(CannotCloseWithUnsignedOutgoingHtlcs(d.channelId), d, Some(remoteShutdown)) - } else if (d.commitments.remoteHasUnsignedOutgoingUpdateFee) { + } else if (d.metaCommitments.common.remoteHasUnsignedOutgoingUpdateFee) { handleLocalError(CannotCloseWithUnsignedOutgoingUpdateFee(d.channelId), d, Some(remoteShutdown)) - } else if (d.commitments.localHasUnsignedOutgoingHtlcs) { // do we have unsigned outgoing htlcs? + } else if (d.metaCommitments.common.localHasUnsignedOutgoingHtlcs) { // do we have unsigned outgoing htlcs? require(d.localShutdown.isEmpty, "can't have pending unsigned outgoing htlcs after having sent Shutdown") // are we in the middle of a signature? d.commitments.remoteNextCommitInfo match { @@ -588,7 +588,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val (localShutdown, localShutdown :: Nil) } // are there pending signed changes on either side? we need to have received their last revocation! - if (d.commitments.hasNoPendingHtlcsOrFeeUpdate) { + if (d.metaCommitments.hasNoPendingHtlcsOrFeeUpdate) { // there are no pending signed changes, let's go directly to NEGOTIATING if (d.commitments.localParams.isInitiator) { // we are the channel initiator, need to initiate the negotiation by sending the first closing_signed @@ -800,7 +800,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val case Event(c: CMD_SIGN, d: DATA_SHUTDOWN) => d.commitments.remoteNextCommitInfo match { - case _ if !d.metaCommitments.localHasChanges => + case _ if !d.metaCommitments.common.localHasChanges => log.debug("ignoring CMD_SIGN (nothing to sign)") stay() case Right(_) => @@ -823,7 +823,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val handleCommandSuccess(c, d.copy(metaCommitments = metaCommitments1)).storing().sending(commit).acking(metaCommitments1.main.localChanges.signed) case Left(cause) => handleCommandError(cause, c) } - case Left(waitForRevocation) => + case Left(_) => log.debug("already in the process of signing, will sign again as soon as possible") stay() } @@ -834,7 +834,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val // we always reply with a revocation log.debug("received a new sig:\n{}", metaCommitments1.main.specs2String) context.system.eventStream.publish(ChannelSignatureReceived(self, metaCommitments1.main)) - if (metaCommitments1.main.hasNoPendingHtlcsOrFeeUpdate) { + if (metaCommitments1.hasNoPendingHtlcsOrFeeUpdate) { if (d.commitments.localParams.isInitiator) { // we are the channel initiator, need to initiate the negotiation by sending the first closing_signed val (closingTx, closingSigned) = Closing.MutualClose.makeFirstClosingTx(keyManager, metaCommitments1.main, localShutdown.scriptPubKey, remoteShutdown.scriptPubKey, nodeParams.onChainFeeConf.feeEstimator, nodeParams.onChainFeeConf.feeTargets, closingFeerates) @@ -844,7 +844,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val goto(NEGOTIATING) using DATA_NEGOTIATING(metaCommitments1, localShutdown, remoteShutdown, closingTxProposed = List(List()), bestUnpublishedClosingTx_opt = None) storing() sending revocation } } else { - if (metaCommitments1.localHasChanges) { + if (metaCommitments1.common.localHasChanges) { // if we have newly acknowledged changes let's sign them self ! CMD_SIGN() } @@ -873,7 +873,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val log.debug("forwarding {} to relayer", result) relayer ! result } - if (metaCommitments1.main.hasNoPendingHtlcsOrFeeUpdate) { + if (metaCommitments1.hasNoPendingHtlcsOrFeeUpdate) { log.debug("switching to NEGOTIATING spec:\n{}", metaCommitments1.main.specs2String) if (d.commitments.localParams.isInitiator) { // we are the channel initiator, need to initiate the negotiation by sending the first closing_signed @@ -884,7 +884,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val goto(NEGOTIATING) using DATA_NEGOTIATING(metaCommitments1, localShutdown, remoteShutdown, closingTxProposed = List(List()), bestUnpublishedClosingTx_opt = None) storing() } } else { - if (metaCommitments1.localHasChanges) { + if (metaCommitments1.common.localHasChanges) { self ! CMD_SIGN() } stay() using d.copy(metaCommitments = metaCommitments1) storing() @@ -1374,7 +1374,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val sendQueue = sendQueue ++ syncSuccess.retransmit // then we clean up unsigned updates - val metaCommitments1 = d.metaCommitments.discardUnsignedUpdates + val metaCommitments1 = d.metaCommitments.discardUnsignedUpdates() metaCommitments1.main.remoteNextCommitInfo match { case Left(_) => @@ -1384,7 +1384,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val } // do I have something to sign? - if (metaCommitments1.localHasChanges) { + if (metaCommitments1.common.localHasChanges) { self ! CMD_SIGN() } @@ -1443,7 +1443,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val case syncFailure: SyncResult.Failure => handleSyncFailure(channelReestablish, syncFailure, d) case syncSuccess: SyncResult.Success => - val metaCommitments1 = d.metaCommitments.discardUnsignedUpdates + val metaCommitments1 = d.metaCommitments.discardUnsignedUpdates() val sendQueue = Queue.empty[LightningMessage] ++ syncSuccess.retransmit :+ d.localShutdown // BOLT 2: A node if it has sent a previous shutdown MUST retransmit shutdown. goto(SHUTDOWN) using d.copy(metaCommitments = metaCommitments1) sending sendQueue @@ -1745,7 +1745,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val val shouldUpdateFee = commitments.localParams.isInitiator && nodeParams.onChainFeeConf.shouldUpdateFee(currentFeeratePerKw, networkFeeratePerKw) val shouldClose = !commitments.localParams.isInitiator && nodeParams.onChainFeeConf.feerateToleranceFor(commitments.remoteNodeId).isFeeDiffTooHigh(commitments.channelType, networkFeeratePerKw, currentFeeratePerKw) && - commitments.hasPendingOrProposedHtlcs // we close only if we have HTLCs potentially at risk + d.metaCommitments.hasPendingOrProposedHtlcs // we close only if we have HTLCs potentially at risk if (shouldUpdateFee) { self ! CMD_UPDATE_FEE(networkFeeratePerKw, commit = true) stay() @@ -1771,7 +1771,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val // if the network fees are too high we risk to not be able to confirm our current commitment val shouldClose = networkFeeratePerKw > currentFeeratePerKw && nodeParams.onChainFeeConf.feerateToleranceFor(commitments.remoteNodeId).isFeeDiffTooHigh(commitments.channelType, networkFeeratePerKw, currentFeeratePerKw) && - commitments.hasPendingOrProposedHtlcs // we close only if we have HTLCs potentially at risk + d.metaCommitments.hasPendingOrProposedHtlcs // we close only if we have HTLCs potentially at risk if (shouldClose) { if (nodeParams.onChainFeeConf.closeOnOfflineMismatch) { log.warning(s"closing OFFLINE channel due to fee mismatch: currentFeeratePerKw=$currentFeeratePerKw networkFeeratePerKw=$networkFeeratePerKw") @@ -1895,8 +1895,8 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder, val // note: this can only happen if state is NORMAL or SHUTDOWN // -> in NEGOTIATING there are no more htlcs // -> in CLOSING we either have mutual closed (so no more htlcs), or already have unilaterally closed (so no action required), and we can't be in OFFLINE state anyway - val timedOutOutgoing = commitments.timedOutOutgoingHtlcs(c.blockHeight) - val almostTimedOutIncoming = commitments.almostTimedOutIncomingHtlcs(c.blockHeight, nodeParams.channelConf.fulfillSafetyBeforeTimeout) + val timedOutOutgoing = d.metaCommitments.timedOutOutgoingHtlcs(c.blockHeight) + val almostTimedOutIncoming = d.metaCommitments.almostTimedOutIncomingHtlcs(c.blockHeight, nodeParams.channelConf.fulfillSafetyBeforeTimeout) if (timedOutOutgoing.nonEmpty) { // Downstream timed out. handleLocalError(HtlcsTimedoutDownstream(d.channelId, timedOutOutgoing), d, Some(c)) diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/Pipe.scala b/eclair-core/src/test/scala/fr/acinq/eclair/Pipe.scala index 96a0d63003..d5d07be67e 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/Pipe.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/Pipe.scala @@ -17,7 +17,7 @@ package fr.acinq.eclair import akka.actor.{Actor, ActorLogging, ActorRef, Stash} -import fr.acinq.eclair.channel.Commitments.msg2String +import fr.acinq.eclair.channel.Common.msg2String import fr.acinq.eclair.wire.protocol.LightningMessage /** diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/channel/CommitmentsSpec.scala b/eclair-core/src/test/scala/fr/acinq/eclair/channel/CommitmentsSpec.scala index a99c4e765f..f99df28118 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/channel/CommitmentsSpec.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/channel/CommitmentsSpec.scala @@ -18,16 +18,16 @@ package fr.acinq.eclair.channel import fr.acinq.bitcoin.scalacompat.Crypto.PublicKey import fr.acinq.bitcoin.scalacompat.{Block, ByteVector32, ByteVector64, DeterministicWallet, Satoshi, SatoshiLong, Transaction} +import fr.acinq.eclair._ import fr.acinq.eclair.blockchain.fee._ -import fr.acinq.eclair.channel.LocalFundingStatus.UnknownFundingTx import fr.acinq.eclair.channel.Helpers.Funding +import fr.acinq.eclair.channel.LocalFundingStatus.UnknownFundingTx import fr.acinq.eclair.channel.states.ChannelStateTestsBase import fr.acinq.eclair.crypto.ShaChain import fr.acinq.eclair.crypto.keymanager.LocalChannelKeyManager import fr.acinq.eclair.transactions.CommitmentSpec import fr.acinq.eclair.transactions.Transactions.CommitTx import fr.acinq.eclair.wire.protocol.{IncorrectOrUnknownPaymentDetails, UpdateAddHtlc, UpdateFailHtlc} -import fr.acinq.eclair._ import org.scalatest.funsuite.FixtureAnyFunSuiteLike import org.scalatest.{Outcome, Tag} import scodec.bits.ByteVector @@ -71,8 +71,8 @@ class CommitmentsSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with val htlcOutputFee = 2 * 1720000 msat // fee due to the additional htlc output; we count it twice because we keep a reserve for a x2 feerate increase val maxDustExposure = 500000 sat - val ac0 = alice.stateData.asInstanceOf[DATA_NORMAL].commitments - val bc0 = bob.stateData.asInstanceOf[DATA_NORMAL].commitments + val ac0 = alice.stateData.asInstanceOf[DATA_NORMAL].metaCommitments + val bc0 = bob.stateData.asInstanceOf[DATA_NORMAL].metaCommitments assert(ac0.availableBalanceForSend > p) // alice can afford the payment assert(ac0.availableBalanceForSend == a) @@ -156,8 +156,8 @@ class CommitmentsSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with val htlcOutputFee = 2 * 1720000 msat // fee due to the additional htlc output; we count it twice because we keep a reserve for a x2 feerate increase val maxDustExposure = 500000 sat - val ac0 = alice.stateData.asInstanceOf[DATA_NORMAL].commitments - val bc0 = bob.stateData.asInstanceOf[DATA_NORMAL].commitments + val ac0 = alice.stateData.asInstanceOf[DATA_NORMAL].metaCommitments + val bc0 = bob.stateData.asInstanceOf[DATA_NORMAL].metaCommitments assert(ac0.availableBalanceForSend > p) // alice can afford the payment assert(ac0.availableBalanceForSend == a) @@ -243,8 +243,8 @@ class CommitmentsSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with val htlcOutputFee = 2 * 1720000 msat // fee due to the additional htlc output; we count it twice because we keep a reserve for a x2 feerate increase val maxDustExposure = 500000 sat - val ac0 = alice.stateData.asInstanceOf[DATA_NORMAL].commitments - val bc0 = bob.stateData.asInstanceOf[DATA_NORMAL].commitments + val ac0 = alice.stateData.asInstanceOf[DATA_NORMAL].metaCommitments + val bc0 = bob.stateData.asInstanceOf[DATA_NORMAL].metaCommitments assert(ac0.availableBalanceForSend > (p1 + p2)) // alice can afford the payments assert(bc0.availableBalanceForSend > p3) // bob can afford the payment @@ -383,7 +383,7 @@ class CommitmentsSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // See https://github.com/lightningnetwork/lightning-rfc/issues/728 test("funder keeps additional reserve to avoid channel being stuck") { f => val isInitiator = true - val c = CommitmentsSpec.makeCommitments(100000000 msat, 50000000 msat, FeeratePerKw(2500 sat), 546 sat, isInitiator) + val c = MetaCommitments(CommitmentsSpec.makeCommitments(100000000 msat, 50000000 msat, FeeratePerKw(2500 sat), 546 sat, isInitiator)) val (_, cmdAdd) = makeCmdAdd(c.availableBalanceForSend, randomKey().publicKey, f.currentBlockHeight) val Right((c1, _)) = c.sendAdd(cmdAdd, f.currentBlockHeight, feeConfNoMismatch) assert(c1.availableBalanceForSend == 0.msat) @@ -398,7 +398,7 @@ class CommitmentsSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with test("can send availableForSend") { f => for (isInitiator <- Seq(true, false)) { - val c = CommitmentsSpec.makeCommitments(702000000 msat, 52000000 msat, FeeratePerKw(2679 sat), 546 sat, isInitiator) + val c = MetaCommitments(CommitmentsSpec.makeCommitments(702000000 msat, 52000000 msat, FeeratePerKw(2679 sat), 546 sat, isInitiator)) val (_, cmdAdd) = makeCmdAdd(c.availableBalanceForSend, randomKey().publicKey, f.currentBlockHeight) val result = c.sendAdd(cmdAdd, f.currentBlockHeight, feeConfNoMismatch) assert(result.isRight, result) @@ -407,8 +407,8 @@ class CommitmentsSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with test("can receive availableForReceive") { f => for (isInitiator <- Seq(true, false)) { - val c = CommitmentsSpec.makeCommitments(31000000 msat, 702000000 msat, FeeratePerKw(2679 sat), 546 sat, isInitiator) - val add = UpdateAddHtlc(randomBytes32(), c.remoteNextHtlcId, c.availableBalanceForReceive, randomBytes32(), CltvExpiry(f.currentBlockHeight), TestConstants.emptyOnionPacket, None) + val c = MetaCommitments(CommitmentsSpec.makeCommitments(31000000 msat, 702000000 msat, FeeratePerKw(2679 sat), 546 sat, isInitiator)) + val add = UpdateAddHtlc(randomBytes32(), c.common.remoteNextHtlcId, c.availableBalanceForReceive, randomBytes32(), CltvExpiry(f.currentBlockHeight), TestConstants.emptyOnionPacket, None) c.receiveAdd(add, feeConfNoMismatch) } } @@ -425,7 +425,7 @@ class CommitmentsSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // We make sure both sides have enough to send/receive at least the initial pending HTLCs. toLocal = maxPendingHtlcAmount * 2 * 10 + Random.nextInt(1000000000).msat, toRemote = maxPendingHtlcAmount * 2 * 10 + Random.nextInt(1000000000).msat) - var c = CommitmentsSpec.makeCommitments(t.toLocal, t.toRemote, t.feeRatePerKw, t.dustLimit, t.isInitiator) + var c = MetaCommitments(CommitmentsSpec.makeCommitments(t.toLocal, t.toRemote, t.feeRatePerKw, t.dustLimit, t.isInitiator)) // Add some initial HTLCs to the pending list (bigger commit tx). for (_ <- 1 to t.pendingHtlcs) { val amount = Random.nextInt(maxPendingHtlcAmount.toLong.toInt).msat.max(1 msat) @@ -455,18 +455,18 @@ class CommitmentsSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // We make sure both sides have enough to send/receive at least the initial pending HTLCs. toLocal = maxPendingHtlcAmount * 2 * 10 + Random.nextInt(1000000000).msat, toRemote = maxPendingHtlcAmount * 2 * 10 + Random.nextInt(1000000000).msat) - var c = CommitmentsSpec.makeCommitments(t.toLocal, t.toRemote, t.feeRatePerKw, t.dustLimit, t.isInitiator) + var c = MetaCommitments(CommitmentsSpec.makeCommitments(t.toLocal, t.toRemote, t.feeRatePerKw, t.dustLimit, t.isInitiator)) // Add some initial HTLCs to the pending list (bigger commit tx). for (_ <- 1 to t.pendingHtlcs) { val amount = Random.nextInt(maxPendingHtlcAmount.toLong.toInt).msat.max(1 msat) - val add = UpdateAddHtlc(randomBytes32(), c.remoteNextHtlcId, amount, randomBytes32(), CltvExpiry(f.currentBlockHeight), TestConstants.emptyOnionPacket, None) + val add = UpdateAddHtlc(randomBytes32(), c.common.remoteNextHtlcId, amount, randomBytes32(), CltvExpiry(f.currentBlockHeight), TestConstants.emptyOnionPacket, None) c.receiveAdd(add, feeConfNoMismatch) match { case Right(cc) => c = cc case Left(e) => ignore(s"$t -> could not setup initial htlcs: $e") } } if (c.availableBalanceForReceive > 0.msat) { - val add = UpdateAddHtlc(randomBytes32(), c.remoteNextHtlcId, c.availableBalanceForReceive, randomBytes32(), CltvExpiry(f.currentBlockHeight), TestConstants.emptyOnionPacket, None) + val add = UpdateAddHtlc(randomBytes32(), c.common.remoteNextHtlcId, c.availableBalanceForReceive, randomBytes32(), CltvExpiry(f.currentBlockHeight), TestConstants.emptyOnionPacket, None) c.receiveAdd(add, feeConfNoMismatch) match { case Right(_) => () case Left(e) => fail(s"$t -> $e") @@ -476,7 +476,7 @@ class CommitmentsSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with } test("check if channel seed has been modified") { f => - val commitments = f.alice.stateData.asInstanceOf[DATA_NORMAL].commitments + val commitments = f.alice.stateData.asInstanceOf[DATA_NORMAL].metaCommitments assert(commitments.validateSeed(TestConstants.Alice.channelKeyManager)) assert(!commitments.validateSeed(new LocalChannelKeyManager(ByteVector32.fromValidHex("42" * 32), Block.RegtestGenesisBlock.hash))) } diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/channel/FuzzyPipe.scala b/eclair-core/src/test/scala/fr/acinq/eclair/channel/FuzzyPipe.scala index afadbf78ce..73897557cc 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/channel/FuzzyPipe.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/channel/FuzzyPipe.scala @@ -18,7 +18,7 @@ package fr.acinq.eclair.channel import akka.actor.{Actor, ActorLogging, ActorRef, Stash} import fr.acinq.eclair.Features -import fr.acinq.eclair.channel.Commitments.msg2String +import fr.acinq.eclair.channel.Common.msg2String import fr.acinq.eclair.wire.protocol.{Init, LightningMessage} import scala.concurrent.duration._ diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/ChannelStateTestsHelperMethods.scala b/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/ChannelStateTestsHelperMethods.scala index d4b389786c..a490282ca0 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/ChannelStateTestsHelperMethods.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/ChannelStateTestsHelperMethods.scala @@ -416,7 +416,7 @@ trait ChannelStateTestsBase extends Assertions with Eventually { val sender = TestProbe() val sCommitIndex = s.stateData.asInstanceOf[PersistentChannelData].commitments.localCommit.index val rCommitIndex = r.stateData.asInstanceOf[PersistentChannelData].commitments.localCommit.index - val rHasChanges = r.stateData.asInstanceOf[PersistentChannelData].commitments.localHasChanges + val rHasChanges = r.stateData.asInstanceOf[PersistentChannelData].metaCommitments.common.localHasChanges s ! CMD_SIGN(Some(sender.ref)) sender.expectMsgType[RES_SUCCESS[CMD_SIGN]] s2r.expectMsgType[CommitSig] diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/payment/PaymentPacketSpec.scala b/eclair-core/src/test/scala/fr/acinq/eclair/payment/PaymentPacketSpec.scala index 130e519b47..5c718a5ed6 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/payment/PaymentPacketSpec.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/payment/PaymentPacketSpec.scala @@ -23,7 +23,7 @@ import fr.acinq.eclair.FeatureSupport.{Mandatory, Optional} import fr.acinq.eclair.Features._ import fr.acinq.eclair.channel._ import fr.acinq.eclair.channel.fsm.Channel -import fr.acinq.eclair.crypto.Sphinx +import fr.acinq.eclair.crypto.{ShaChain, Sphinx} import fr.acinq.eclair.payment.IncomingPaymentPacket.{ChannelRelayPacket, FinalPacket, NodeRelayPacket, decrypt} import fr.acinq.eclair.payment.OutgoingPaymentPacket._ import fr.acinq.eclair.payment.send.{BlindedRecipient, ClearRecipient, ClearTrampolineRecipient} @@ -700,12 +700,15 @@ object PaymentPacketSpec { def makeCommitments(channelId: ByteVector32, testAvailableBalanceForSend: MilliSatoshi = 50000000 msat, testAvailableBalanceForReceive: MilliSatoshi = 50000000 msat, testCapacity: Satoshi = 100000 sat, channelFeatures: ChannelFeatures = ChannelFeatures()): Commitments = { val channelReserve = testCapacity * 0.01 - val params = LocalParams(null, null, null, Long.MaxValue.msat, Some(channelReserve), null, null, 0, isInitiator = true, null, None, null) + val localParams = LocalParams(null, null, null, Long.MaxValue.msat, Some(channelReserve), null, null, 0, isInitiator = true, null, None, null) val remoteParams = RemoteParams(randomKey().publicKey, null, UInt64.MaxValue, Some(channelReserve), null, null, maxAcceptedHtlcs = 0, null, null, null, null, null, null, None) val commitInput = InputInfo(OutPoint(randomBytes32(), 1), TxOut(testCapacity, Nil), Nil) - val localCommit = LocalCommit(0, null, CommitTxAndRemoteSig(Transactions.CommitTx(commitInput, null), null), null) + val localCommit = LocalCommit(0, null, CommitTxAndRemoteSig(Transactions.CommitTx(commitInput, null), null), Nil) + val remoteCommit = RemoteCommit(0, null, null, randomKey().publicKey) + val localChanges = LocalChanges(Nil, Nil, Nil) + val remoteChanges = RemoteChanges(Nil, Nil, Nil) val channelFlags = ChannelFlags.Private - new Commitments(channelId, ChannelConfig.standard, channelFeatures, params, remoteParams, channelFlags, localCommit, null, null, null, 0, 0, Map.empty, null, null, null, null) { + new Commitments(channelId, ChannelConfig.standard, channelFeatures, localParams, remoteParams, channelFlags, localCommit, remoteCommit, localChanges, remoteChanges, 0, 0, Map.empty, Right(randomKey().publicKey), LocalFundingStatus.UnknownFundingTx, RemoteFundingStatus.Locked, ShaChain.init) { override lazy val availableBalanceForSend: MilliSatoshi = testAvailableBalanceForSend.max(0 msat) override lazy val availableBalanceForReceive: MilliSatoshi = testAvailableBalanceForReceive.max(0 msat) }