From be9ffb48193162c92ec6b32eb60d53b1af86c4d4 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 9 Aug 2021 22:03:53 +0000 Subject: [PATCH 01/13] Add #[allow(unused_mut)] on reorg_test as older rustc requires mut --- lightning/src/ln/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index 5576d8bd057..758927fdf17 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -53,6 +53,7 @@ mod functional_tests; #[allow(unused_mut)] mod chanmon_update_fail_tests; #[cfg(test)] +#[allow(unused_mut)] mod reorg_tests; #[cfg(test)] #[allow(unused_mut)] From bd14069f049066007fd159be85f92c217be50f55 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 26 Jul 2021 20:01:36 +0000 Subject: [PATCH 02/13] Add new `ChannelError` category to send `warning` messages We don't actually yet support `warning` messages as there are issues left to resolve in the spec PR, but there's nothing to stop us adding an internal enum variant for sending a warning message before we actually support doing so. --- lightning/src/ln/channel.rs | 5 ++++- lightning/src/ln/channelmanager.rs | 13 +++++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a0071d5436b..d6ebbf1cff2 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -574,6 +574,7 @@ pub const MIN_DUST_LIMIT_SATOSHIS: u64 = 330; /// channel_id in ChannelManager. pub(super) enum ChannelError { Ignore(String), + Warn(String), Close(String), CloseDelayBroadcast(String), } @@ -582,6 +583,7 @@ impl fmt::Debug for ChannelError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e), + &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e), &ChannelError::Close(ref e) => write!(f, "Close : {}", e), &ChannelError::CloseDelayBroadcast(ref e) => write!(f, "CloseDelayBroadcast : {}", e) } @@ -3323,7 +3325,8 @@ impl Channel { // now! match self.free_holding_cell_htlcs(logger) { Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)), - Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"), + Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) => + panic!("Got non-channel-failing result from free_holding_cell_htlcs"), Ok((Some((commitment_update, monitor_update)), htlcs_to_fail)) => { return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(monitor_update), self.resend_order.clone(), htlcs_to_fail, shutdown_msg)); }, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7304c698ab1..6c8673b4ff2 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -276,6 +276,10 @@ impl MsgHandleErrInternal { fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { Self { err: match err { + ChannelError::Warn(msg) => LightningError { + err: msg, + action: msgs::ErrorAction::IgnoreError, + }, ChannelError::Ignore(msg) => LightningError { err: msg, action: msgs::ErrorAction::IgnoreError, @@ -819,6 +823,11 @@ macro_rules! handle_error { macro_rules! convert_chan_err { ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => { match $err { + ChannelError::Warn(msg) => { + //TODO: Once warning messages are merged, we should send a `warning` message to our + //peer here. + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) + }, ChannelError::Ignore(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) }, @@ -2322,9 +2331,9 @@ impl ChannelMana // close channel and then send error message to peer. let counterparty_node_id = chan.get().get_counterparty_node_id(); let err: Result<(), _> = match e { - ChannelError::Ignore(_) => { + ChannelError::Ignore(_) | ChannelError::Warn(_) => { panic!("Stated return value requirements in send_commitment() were not met"); - }, + } ChannelError::Close(msg) => { log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); let (channel_id, mut channel) = chan.remove_entry(); From 769eea8a8d706db198d7fb338f159f79ca9db652 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 5 Jul 2021 23:18:41 +0000 Subject: [PATCH 03/13] Improve TLV serialization macro callability very slightly This allows decode_tlv_stream!() to be called with either a mutable reference to a stream or a stream itself and allows encode_tlv_stream!() to be called with an excess , at the end of the parameter list. --- lightning/src/util/ser_macros.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index 960fae45d26..0ad9710a794 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -159,6 +159,7 @@ macro_rules! decode_tlv_stream { ($stream: expr, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}) => { { use ln::msgs::DecodeError; let mut last_seen_type: Option = None; + let mut stream_ref = $stream; 'tlv_read: loop { use util::ser; @@ -168,7 +169,7 @@ macro_rules! decode_tlv_stream { // determine whether we should break or return ShortRead if we get an // UnexpectedEof. This should in every case be largely cosmetic, but its nice to // pass the TLV test vectors exactly, which requre this distinction. - let mut tracking_reader = ser::ReadTrackingReader::new($stream); + let mut tracking_reader = ser::ReadTrackingReader::new(&mut stream_ref); match ser::Readable::read(&mut tracking_reader) { Err(DecodeError::ShortRead) => { if !tracking_reader.have_read { @@ -196,8 +197,8 @@ macro_rules! decode_tlv_stream { last_seen_type = Some(typ.0); // Finally, read the length and value itself: - let length: ser::BigSize = ser::Readable::read($stream)?; - let mut s = ser::FixedLengthReader::new($stream, length.0); + let length: ser::BigSize = ser::Readable::read(&mut stream_ref)?; + let mut s = ser::FixedLengthReader::new(&mut stream_ref, length.0); match typ.0 { $($type => { decode_tlv!(s, $field, $fieldty); From 6dc9076e664a47486610cdc4572e4f43527426af Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 5 Jul 2021 23:21:36 +0000 Subject: [PATCH 04/13] Implement the closing_signed TLV suffix with allowed fee ranges This adds the serialization and structures for the new fee range specifiers in closing_signed as added upstream at https://github.com/lightningnetwork/lightning-rfc/pull/847 --- fuzz/src/msg_targets/gen_target.sh | 2 +- fuzz/src/msg_targets/mod.rs | 2 +- fuzz/src/msg_targets/msg_closing_signed.rs | 4 +- lightning/src/ln/channel.rs | 3 + lightning/src/ln/msgs.rs | 66 ++++++++++++++++++++-- 5 files changed, 69 insertions(+), 8 deletions(-) diff --git a/fuzz/src/msg_targets/gen_target.sh b/fuzz/src/msg_targets/gen_target.sh index 044f1a1ef08..0c1d061a74d 100755 --- a/fuzz/src/msg_targets/gen_target.sh +++ b/fuzz/src/msg_targets/gen_target.sh @@ -14,7 +14,6 @@ echo "mod utils;" > mod.rs GEN_TEST AcceptChannel test_msg "" GEN_TEST AnnouncementSignatures test_msg "" GEN_TEST ChannelReestablish test_msg "" -GEN_TEST ClosingSigned test_msg "" GEN_TEST CommitmentSigned test_msg "" GEN_TEST DecodedOnionErrorPacket test_msg "" GEN_TEST FundingCreated test_msg "" @@ -40,6 +39,7 @@ GEN_TEST UpdateAddHTLC test_msg_hole ", 85, 33" GEN_TEST ErrorMessage test_msg_hole ", 32, 2" GEN_TEST ChannelUpdate test_msg_hole ", 108, 1" +GEN_TEST ClosingSigned test_msg_simple "" GEN_TEST Init test_msg_simple "" GEN_TEST OnionHopData test_msg_simple "" GEN_TEST Ping test_msg_simple "" diff --git a/fuzz/src/msg_targets/mod.rs b/fuzz/src/msg_targets/mod.rs index e11e3eb26a2..0f273cb7606 100644 --- a/fuzz/src/msg_targets/mod.rs +++ b/fuzz/src/msg_targets/mod.rs @@ -2,7 +2,6 @@ mod utils; pub mod msg_accept_channel; pub mod msg_announcement_signatures; pub mod msg_channel_reestablish; -pub mod msg_closing_signed; pub mod msg_commitment_signed; pub mod msg_decoded_onion_error_packet; pub mod msg_funding_created; @@ -25,6 +24,7 @@ pub mod msg_gossip_timestamp_filter; pub mod msg_update_add_htlc; pub mod msg_error_message; pub mod msg_channel_update; +pub mod msg_closing_signed; pub mod msg_init; pub mod msg_onion_hop_data; pub mod msg_ping; diff --git a/fuzz/src/msg_targets/msg_closing_signed.rs b/fuzz/src/msg_targets/msg_closing_signed.rs index 47881d32d24..52f39af29a9 100644 --- a/fuzz/src/msg_targets/msg_closing_signed.rs +++ b/fuzz/src/msg_targets/msg_closing_signed.rs @@ -17,11 +17,11 @@ use utils::test_logger; #[inline] pub fn msg_closing_signed_test(data: &[u8], _out: Out) { - test_msg!(msgs::ClosingSigned, data); + test_msg_simple!(msgs::ClosingSigned, data); } #[no_mangle] pub extern "C" fn msg_closing_signed_run(data: *const u8, datalen: usize) { let data = unsafe { std::slice::from_raw_parts(data, datalen) }; - test_msg!(msgs::ClosingSigned, data); + test_msg_simple!(msgs::ClosingSigned, data); } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index d6ebbf1cff2..cd819fb14dd 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3384,6 +3384,7 @@ impl Channel { channel_id: self.channel_id, fee_satoshis: total_fee_satoshis, signature: sig.unwrap(), + fee_range: None, }) } @@ -3567,6 +3568,7 @@ impl Channel { channel_id: self.channel_id, fee_satoshis: used_total_fee, signature: sig, + fee_range: None, }), None)) } } @@ -3608,6 +3610,7 @@ impl Channel { channel_id: self.channel_id, fee_satoshis: msg.fee_satoshis, signature: sig, + fee_range: None, }), Some(closing_tx))) } diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 0042cf51bf3..f94909a9ae5 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -226,6 +226,19 @@ pub struct Shutdown { pub scriptpubkey: Script, } +/// The minimum and maximum fees which the sender is willing to place on the closing transaction. +/// This is provided in [`ClosingSigned`] by both sides to indicate the fee range they are willing +/// to use. +#[derive(Clone, Debug, PartialEq)] +pub struct ClosingSignedFeeRange { + /// The minimum absolute fee, in satoshis, which the sender is willing to place on the closing + /// transaction. + pub min_fee_satoshis: u64, + /// The maximum absolute fee, in satoshis, which the sender is willing to place on the closing + /// transaction. + pub max_fee_satoshis: u64, +} + /// A closing_signed message to be sent or received from a peer #[derive(Clone, Debug, PartialEq)] pub struct ClosingSigned { @@ -235,6 +248,9 @@ pub struct ClosingSigned { pub fee_satoshis: u64, /// A signature on the closing transaction pub signature: Signature, + /// The minimum and maximum fees which the sender is willing to accept, provided only by new + /// nodes. + pub fee_range: Option, } /// An update_add_htlc message to be sent or received from a peer @@ -1103,10 +1119,35 @@ impl Readable for ChannelReestablish{ } } -impl_writeable!(ClosingSigned, 32+8+64, { - channel_id, - fee_satoshis, - signature +impl Writeable for ClosingSigned { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + w.size_hint(32 + 8 + 64 + if self.fee_range.is_some() { 1+1+ 2*8 } else { 0 }); + self.channel_id.write(w)?; + self.fee_satoshis.write(w)?; + self.signature.write(w)?; + encode_tlv_stream!(w, { + (1, self.fee_range, option), + }); + Ok(()) + } +} + +impl Readable for ClosingSigned { + fn read(r: &mut R) -> Result { + let channel_id = Readable::read(r)?; + let fee_satoshis = Readable::read(r)?; + let signature = Readable::read(r)?; + let mut fee_range = None; + decode_tlv_stream!(r, { + (1, fee_range, option), + }); + Ok(Self { channel_id, fee_satoshis, signature, fee_range }) + } +} + +impl_writeable!(ClosingSignedFeeRange, 2*8, { + min_fee_satoshis, + max_fee_satoshis }); impl_writeable_len_match!(CommitmentSigned, { @@ -2323,10 +2364,27 @@ mod tests { channel_id: [2; 32], fee_satoshis: 2316138423780173, signature: sig_1, + fee_range: None, }; let encoded_value = closing_signed.encode(); let target_value = hex::decode("020202020202020202020202020202020202020202020202020202020202020200083a840000034dd977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap(); assert_eq!(encoded_value, target_value); + assert_eq!(msgs::ClosingSigned::read(&mut Cursor::new(&target_value)).unwrap(), closing_signed); + + let closing_signed_with_range = msgs::ClosingSigned { + channel_id: [2; 32], + fee_satoshis: 2316138423780173, + signature: sig_1, + fee_range: Some(msgs::ClosingSignedFeeRange { + min_fee_satoshis: 0xdeadbeef, + max_fee_satoshis: 0x1badcafe01234567, + }), + }; + let encoded_value_with_range = closing_signed_with_range.encode(); + let target_value_with_range = hex::decode("020202020202020202020202020202020202020202020202020202020202020200083a840000034dd977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a011000000000deadbeef1badcafe01234567").unwrap(); + assert_eq!(encoded_value_with_range, target_value_with_range); + assert_eq!(msgs::ClosingSigned::read(&mut Cursor::new(&target_value_with_range)).unwrap(), + closing_signed_with_range); } #[test] From 267053ff74a09763f6c12223a61fd01137e53624 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 6 Jul 2021 23:51:11 +0000 Subject: [PATCH 05/13] Log shutdown including which side of the channel initiated shutdown --- lightning/src/ln/channel.rs | 10 ++++++++++ lightning/src/ln/channelmanager.rs | 6 ++++++ 2 files changed, 16 insertions(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index cd819fb14dd..e41d6d287b9 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3816,6 +3816,16 @@ impl Channel { self.channel_state >= ChannelState::FundingSent as u32 } + /// Returns true if our peer has either initiated or agreed to shut down the channel. + pub fn received_shutdown(&self) -> bool { + (self.channel_state & ChannelState::RemoteShutdownSent as u32) != 0 + } + + /// Returns true if we either initiated or agreed to shut down the channel. + pub fn sent_shutdown(&self) -> bool { + (self.channel_state & ChannelState::LocalShutdownSent as u32) != 0 + } + /// Returns true if this channel is fully shut down. True here implies that no further actions /// may/will be taken on this channel, and thus this object should be freed. Any future changes /// will be handled appropriately by the chain monitor. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 6c8673b4ff2..a50c072b4cb 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3366,6 +3366,12 @@ impl ChannelMana return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } + if !chan_entry.get().received_shutdown() { + log_info!(self.logger, "Received a shutdown message from our couterparty for channel {}{}.", + log_bytes!(msg.channel_id), + if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); + } + let (shutdown, closing_signed, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &self.keys_manager, &their_features, &msg), channel_state, chan_entry); dropped_htlcs = htlcs; From b1aa950f4fd0333465fb9193a7a70e4e20cd2abe Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 19 Jul 2021 18:32:11 +0000 Subject: [PATCH 06/13] Do not serialize `Channel::last_sent_closing_fee` to disk We're supposed to write `Channel` to disk as if `remove_uncommitted_htlcs_and_mark_paused` had just run, however we were writing `last_sent_closing_fee` to disk (if it is not-None), whereas `remove_uncommitted_htlcs_and_mark_paused` clears it. Indeed, the BOLTs say fee "... negotiation restarts on reconnection." --- lightning/src/ln/channel.rs | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index e41d6d287b9..d47b4e72439 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -4973,15 +4973,11 @@ impl Writeable for Channel { self.update_time_counter.write(writer)?; self.feerate_per_kw.write(writer)?; - match self.last_sent_closing_fee { - Some((feerate, fee, sig)) => { - 1u8.write(writer)?; - feerate.write(writer)?; - fee.write(writer)?; - sig.write(writer)?; - }, - None => 0u8.write(writer)?, - } + // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here, + // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe + // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never + // consider the stale state on reload. + 0u8.write(writer)?; self.funding_tx_confirmed_in.write(writer)?; self.funding_tx_confirmation_height.write(writer)?; @@ -5189,11 +5185,19 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel let update_time_counter = Readable::read(reader)?; let feerate_per_kw = Readable::read(reader)?; - let last_sent_closing_fee = match ::read(reader)? { - 0 => None, - 1 => Some((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?)), + // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here, + // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe + // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never + // consider the stale state on reload. + match ::read(reader)? { + 0 => {}, + 1 => { + let _: u32 = Readable::read(reader)?; + let _: u64 = Readable::read(reader)?; + let _: Signature = Readable::read(reader)?; + }, _ => return Err(DecodeError::InvalidValue), - }; + } let funding_tx_confirmed_in = Readable::read(reader)?; let funding_tx_confirmation_height = Readable::read(reader)?; @@ -5321,7 +5325,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel #[cfg(debug_assertions)] counterparty_max_commitment_tx_output: Mutex::new((0, 0)), - last_sent_closing_fee, + last_sent_closing_fee: None, funding_tx_confirmed_in, funding_tx_confirmation_height, From 67ddd46aeded274caf44cc7a3f9f61975ebb1dd7 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 19 Jul 2021 19:57:37 +0000 Subject: [PATCH 07/13] Send initial closing_signed message asynchronously and handle errs When we added the support for external signing, many of the signing functions were allowed to return an error, closing the channel in such a case. `sign_closing_transaction` is one such function which can now return an error, except instead of handling it properly we'd simply never send a `closing_signed` message, hanging the channel until users intervene and force-close it. Piping the channel-closing error back through the various callsites (several of which already have pending results by the time they call `maybe_propose_first_closing_signed`) may be rather complicated, so instead we simply attempt to propose the initial `closing_signed` in `get_and_clear_pending_msg_events` like we do for holding-cell freeing. Further, since we now (possibly) generate a `ChannelMonitorUpdate` on `shutdown`, we may need to wait for monitor updating to complete before we can send a `closing_signed`, meaning we need to handle the send asynchronously anyway. This simplifies a few function interfaces and has no impact on behavior, aside from a few message-ordering edge-cases, as seen in the two small test changes required. --- lightning/src/ln/chanmon_update_fail_tests.rs | 40 +++++++- lightning/src/ln/channel.rs | 92 ++++++++++-------- lightning/src/ln/channelmanager.rs | 94 +++++++++++++------ lightning/src/ln/functional_tests.rs | 38 +++++--- 4 files changed, 182 insertions(+), 82 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 37bcabfcea8..ee9a492746b 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -142,7 +142,7 @@ fn test_monitor_and_persister_update_fail() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2) { - if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) { + if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Check that even though the persister is returning a TemporaryFailure, // because the update is bogus, ultimately the error that's returned // should be a PermanentFailure. @@ -2561,8 +2561,8 @@ fn test_reconnect_dup_htlc_claims() { #[test] fn test_temporary_error_during_shutdown() { - // Test that temporary failures when updating the monitor's shutdown script do not prevent - // cooperative close. + // Test that temporary failures when updating the monitor's shutdown script delay cooperative + // close. let mut config = test_default_channel_config(); config.channel_options.commit_upfront_shutdown_pubkey = false; @@ -2575,9 +2575,39 @@ fn test_temporary_error_during_shutdown() { *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, false); - check_added_monitors!(nodes[0], 1); + + nodes[0].node.close_channel(&channel_id).unwrap(); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id())); + check_added_monitors!(nodes[0], 1); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[0].chain_monitor.update_ret.lock().unwrap() = None; + *nodes[1].chain_monitor.update_ret.lock().unwrap() = None; + + let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id())); + + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[1].chain_monitor.update_ret.lock().unwrap() = None; + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + let (_, closing_signed_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &closing_signed_b.unwrap()); + let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + + let (_, none_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(none_a.is_none()); + let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + + assert_eq!(txn_a, txn_b); + assert_eq!(txn_a.len(), 1); + check_spends!(txn_a[0], funding_tx); } #[test] diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index d47b4e72439..373bfef1343 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -455,6 +455,11 @@ pub(super) struct Channel { last_sent_closing_fee: Option<(u32, u64, Signature)>, // (feerate, fee, holder_sig) + /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor` + /// update, we need to delay processing it until later. We do that here by simply storing the + /// closing_signed message and handling it in `maybe_propose_closing_signed`. + pending_counterparty_closing_signed: Option, + /// The hash of the block in which the funding transaction was included. funding_tx_confirmed_in: Option, funding_tx_confirmation_height: u32, @@ -695,6 +700,7 @@ impl Channel { counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, funding_tx_confirmed_in: None, funding_tx_confirmation_height: 0, @@ -954,6 +960,7 @@ impl Channel { counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, funding_tx_confirmed_in: None, funding_tx_confirmation_height: 0, @@ -2373,9 +2380,8 @@ impl Channel { Ok(()) } - pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, fee_estimator: &F, logger: &L) -> Result<(msgs::RevokeAndACK, Option, Option, ChannelMonitorUpdate), (Option, ChannelError)> - where F::Target: FeeEstimator, - L::Target: Logger + pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<(msgs::RevokeAndACK, Option, ChannelMonitorUpdate), (Option, ChannelError)> + where L::Target: Logger { if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { return Err((None, ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()))); @@ -2541,12 +2547,10 @@ impl Channel { } log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id)); - // TODO: Call maybe_propose_first_closing_signed on restoration (or call it here and - // re-send the message on restoration) return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA".to_owned()))); } - let (commitment_signed, closing_signed) = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { + let commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok - // we'll send one right away when we get the revoke_and_ack when we // free_holding_cell_htlcs(). @@ -2555,10 +2559,8 @@ impl Channel { // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - (Some(msg), None) - } else if !need_commitment { - (None, self.maybe_propose_first_closing_signed(fee_estimator)) - } else { (None, None) }; + Some(msg) + } else { None }; log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", log_bytes!(self.channel_id()), if commitment_signed.is_some() { " our own commitment_signed and" } else { "" }); @@ -2567,7 +2569,7 @@ impl Channel { channel_id: self.channel_id, per_commitment_secret, next_per_commitment_point, - }, commitment_signed, closing_signed, monitor_update)) + }, commitment_signed, monitor_update)) } /// Public version of the below, checking relevant preconditions first. @@ -2704,9 +2706,8 @@ impl Channel { /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail, /// generating an appropriate error *after* the channel state has been updated based on the /// revoke_and_ack message. - pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, fee_estimator: &F, logger: &L) -> Result<(Option, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, Option, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>), ChannelError> - where F::Target: FeeEstimator, - L::Target: Logger, + pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Option, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>), ChannelError> + where L::Target: Logger, { if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned())); @@ -2887,7 +2888,7 @@ impl Channel { self.monitor_pending_forwards.append(&mut to_forward_infos); self.monitor_pending_failures.append(&mut revoked_htlcs); log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id())); - return Ok((None, Vec::new(), Vec::new(), None, monitor_update, Vec::new())) + return Ok((None, Vec::new(), Vec::new(), monitor_update, Vec::new())) } match self.free_holding_cell_htlcs(logger)? { @@ -2906,7 +2907,7 @@ impl Channel { self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - Ok((Some(commitment_update), to_forward_infos, revoked_htlcs, None, monitor_update, htlcs_to_fail)) + Ok((Some(commitment_update), to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail)) }, (None, htlcs_to_fail) => { if require_commitment { @@ -2926,14 +2927,13 @@ impl Channel { update_fail_malformed_htlcs, update_fee: None, commitment_signed - }), to_forward_infos, revoked_htlcs, None, monitor_update, htlcs_to_fail)) + }), to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail)) } else { log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id())); - Ok((None, to_forward_infos, revoked_htlcs, self.maybe_propose_first_closing_signed(fee_estimator), monitor_update, htlcs_to_fail)) + Ok((None, to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail)) } } } - } /// Adds a pending update to this channel. See the doc for send_htlc for @@ -2988,6 +2988,7 @@ impl Channel { // Upon reconnect we have to start the closing_signed dance over, but shutdown messages // will be retransmitted. self.last_sent_closing_fee = None; + self.pending_counterparty_closing_signed = None; let mut inbound_drop_count = 0; self.pending_inbound_htlcs.retain(|htlc| { @@ -3355,13 +3356,24 @@ impl Channel { } } - fn maybe_propose_first_closing_signed(&mut self, fee_estimator: &F) -> Option - where F::Target: FeeEstimator + pub fn maybe_propose_closing_signed(&mut self, fee_estimator: &F, logger: &L) + -> Result<(Option, Option), ChannelError> + where F::Target: FeeEstimator, L::Target: Logger { - if !self.is_outbound() || !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() || - self.channel_state & (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32) != BOTH_SIDES_SHUTDOWN_MASK || + if !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() || + self.channel_state & + (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 | + ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32) + != BOTH_SIDES_SHUTDOWN_MASK || self.last_sent_closing_fee.is_some() || self.pending_update_fee.is_some() { - return None; + return Ok((None, None)); + } + + if !self.is_outbound() { + if let Some(msg) = &self.pending_counterparty_closing_signed.take() { + return self.closing_signed(fee_estimator, &msg); + } + return Ok((None, None)); } let mut proposed_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background); @@ -3371,29 +3383,27 @@ impl Channel { assert!(self.shutdown_scriptpubkey.is_some()); let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.counterparty_shutdown_scriptpubkey.as_ref().unwrap())); let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000; + log_trace!(logger, "Proposing initial closing signed for our counterparty with a feerate of {} sat/kWeight (= {} sats)", + proposed_feerate, proposed_total_fee_satoshis); let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(proposed_total_fee_satoshis, false); let sig = self.holder_signer .sign_closing_transaction(&closing_tx, &self.secp_ctx) - .ok(); - assert!(closing_tx.get_weight() as u64 <= tx_weight); - if sig.is_none() { return None; } + .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?; - self.last_sent_closing_fee = Some((proposed_feerate, total_fee_satoshis, sig.clone().unwrap())); - Some(msgs::ClosingSigned { + self.last_sent_closing_fee = Some((proposed_feerate, total_fee_satoshis, sig.clone())); + Ok((Some(msgs::ClosingSigned { channel_id: self.channel_id, fee_satoshis: total_fee_satoshis, - signature: sig.unwrap(), + signature: sig, fee_range: None, - }) + }), None)) } - pub fn shutdown( - &mut self, fee_estimator: &F, keys_provider: &K, their_features: &InitFeatures, msg: &msgs::Shutdown - ) -> Result<(Option, Option, Option, Vec<(HTLCSource, PaymentHash)>), ChannelError> - where - F::Target: FeeEstimator, - K::Target: KeysInterface + pub fn shutdown( + &mut self, keys_provider: &K, their_features: &InitFeatures, msg: &msgs::Shutdown + ) -> Result<(Option, Option, Vec<(HTLCSource, PaymentHash)>), ChannelError> + where K::Target: KeysInterface { if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned())); @@ -3481,7 +3491,7 @@ impl Channel { self.channel_state |= ChannelState::LocalShutdownSent as u32; self.update_time_counter += 1; - Ok((shutdown, self.maybe_propose_first_closing_signed(fee_estimator), monitor_update, dropped_outbound_htlcs)) + Ok((shutdown, monitor_update, dropped_outbound_htlcs)) } fn build_signed_closing_transaction(&self, tx: &mut Transaction, counterparty_sig: &Signature, sig: &Signature) { @@ -3522,6 +3532,11 @@ impl Channel { return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned())); } + if self.channel_state & ChannelState::MonitorUpdateFailed as u32 != 0 { + self.pending_counterparty_closing_signed = Some(msg.clone()); + return Ok((None, None)); + } + let funding_redeemscript = self.get_funding_redeemscript(); let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false); if used_total_fee != msg.fee_satoshis { @@ -5326,6 +5341,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel counterparty_max_commitment_tx_output: Mutex::new((0, 0)), last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, funding_tx_confirmed_in, funding_tx_confirmation_height, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a50c072b4cb..f85fe21af02 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3372,7 +3372,7 @@ impl ChannelMana if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); } - let (shutdown, closing_signed, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &self.keys_manager, &their_features, &msg), channel_state, chan_entry); + let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry); dropped_htlcs = htlcs; // Update the monitor with the shutdown script if necessary. @@ -3393,13 +3393,6 @@ impl ChannelMana msg, }); } - if let Some(msg) = closing_signed { - // TODO: Do not send this if the monitor update failed. - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: *counterparty_node_id, - msg, - }); - } break Ok(()); }, @@ -3585,8 +3578,8 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) = - match chan.get_mut().commitment_signed(&msg, &self.fee_estimator, &self.logger) { + let (revoke_and_ack, commitment_signed, monitor_update) = + match chan.get_mut().commitment_signed(&msg, &self.logger) { Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), Err((Some(update), e)) => { assert!(chan.get().is_awaiting_monitor_update()); @@ -3598,7 +3591,6 @@ impl ChannelMana }; if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); - //TODO: Rebroadcast closing_signed if present on monitor update restoration } channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: counterparty_node_id.clone(), @@ -3617,12 +3609,6 @@ impl ChannelMana }, }); } - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } Ok(()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -3678,12 +3664,12 @@ impl ChannelMana break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); - let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail_in) = - break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan); + let (commitment_update, pending_forwards, pending_failures, monitor_update, htlcs_to_fail_in) = + break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); htlcs_to_fail = htlcs_to_fail_in; if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { if was_frozen_for_monitor { - assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); + assert!(commitment_update.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned())); } else { if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) { @@ -3697,12 +3683,6 @@ impl ChannelMana updates, }); } - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap())) }, hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -3946,7 +3926,7 @@ impl ChannelMana }); } - let has_update = has_monitor_update || !failed_htlcs.is_empty(); + let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty(); for (failures, channel_id) in failed_htlcs.drain(..) { self.fail_holding_cell_htlcs(failures, channel_id); } @@ -3958,6 +3938,63 @@ impl ChannelMana has_update } + /// Check whether any channels have finished removing all pending updates after a shutdown + /// exchange and can now send a closing_signed. + /// Returns whether any closing_signed messages were generated. + fn maybe_generate_initial_closing_signed(&self) -> bool { + let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new(); + let mut has_update = false; + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let by_id = &mut channel_state.by_id; + let short_to_id = &mut channel_state.short_to_id; + let pending_msg_events = &mut channel_state.pending_msg_events; + + by_id.retain(|channel_id, chan| { + match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { + Ok((msg_opt, tx_opt)) => { + if let Some(msg) = msg_opt { + has_update = true; + pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: chan.get_counterparty_node_id(), msg, + }); + } + if let Some(tx) = tx_opt { + // We're done with this channel. We got a closing_signed and sent back + // a closing_signed with a closing transaction to broadcast. + if let Some(short_id) = chan.get_short_channel_id() { + short_to_id.remove(&short_id); + } + + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + + log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); + self.tx_broadcaster.broadcast_transaction(&tx); + false + } else { true } + }, + Err(e) => { + has_update = true; + let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); + handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + !close_channel + } + } + }); + } + + for (counterparty_node_id, err) in handle_errors.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); + } + + has_update + } + /// Handle a list of channel failures during a block_connected or block_disconnected call, /// pushing the channel monitor update (if any) to the background events queue and removing the /// Channel object. @@ -4111,6 +4148,9 @@ impl MessageSend if self.check_free_holding_cells() { result = NotifyOption::DoPersist; } + if self.maybe_generate_initial_closing_signed() { + result = NotifyOption::DoPersist; + } let mut pending_events = Vec::new(); let mut channel_state = self.channel_state.lock().unwrap(); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 1a20d86fb2b..ea1c8993067 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1011,19 +1011,19 @@ fn htlc_fail_async_shutdown() { let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 2); - let node_0_closing_signed = match msg_events[0] { + match msg_events[0] { + MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { + assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id); + }, + _ => panic!("Unexpected event"), + } + let node_0_closing_signed = match msg_events[1] { MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { assert_eq!(*node_id, nodes[1].node.get_our_node_id()); (*msg).clone() }, _ => panic!("Unexpected event"), }; - match msg_events[1] { - MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { - assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id); - }, - _ => panic!("Unexpected event"), - } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); @@ -1140,7 +1140,23 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish); - let node_0_3rd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + let node_0_msgs = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(node_0_msgs.len(), 2); + let node_0_2nd_closing_signed = match node_0_msgs[1] { + MessageSendEvent::SendClosingSigned { ref msg, .. } => { + assert_eq!(node_0_closing_signed, *msg); + msg.clone() + }, + _ => panic!(), + }; + + let node_0_3rd_shutdown = match node_0_msgs[0] { + MessageSendEvent::SendShutdown { ref msg, .. } => { + assert_eq!(node_0_2nd_shutdown, *msg); + msg.clone() + }, + _ => panic!(), + }; assert!(node_0_2nd_shutdown == node_0_3rd_shutdown); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish); @@ -1151,8 +1167,6 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_3rd_shutdown); - let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); - assert!(node_0_closing_signed == node_0_2nd_closing_signed); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed); let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); @@ -9023,7 +9037,7 @@ fn test_update_err_monitor_lockdown() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) { - if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) { + if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { if let Err(_) = watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); } } else { assert!(false); } @@ -9117,7 +9131,7 @@ fn test_concurrent_monitor_claim() { assert_eq!(updates.update_add_htlcs.len(), 1); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) { - if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) { + if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update if let Err(_) = watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } if let Ok(_) = watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } From 177810b1522833f85b8b1c422a455e736afe3676 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 20 Jul 2021 03:19:01 +0000 Subject: [PATCH 08/13] Clean up existing and add range-based closing_signed negotiation This adds the new range-based closing_signed negotiation specified in https://github.com/lightningnetwork/lightning-rfc/pull/847 as well as cleans up the existing closing_signed negotiation to unify the new codepaths and the old ones. Note that because the new range-based closing_signed negotiation allows the channel fundee to ultimately select the fee out of a range specified by the funder, which we, of course, always select the highest allowed amount from. Thus, we've added an extra round of closing_signed in the common case as we will not simply accept the first fee we see, always preferring to make the funder pay as much as they're willing to. --- lightning/src/ln/chanmon_update_fail_tests.rs | 12 +- lightning/src/ln/channel.rs | 219 +++++++++++++----- lightning/src/ln/channelmanager.rs | 55 ++++- lightning/src/ln/functional_test_utils.rs | 30 +-- lightning/src/ln/functional_tests.rs | 66 +++--- lightning/src/util/config.rs | 25 ++ 6 files changed, 290 insertions(+), 117 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index ee9a492746b..70bffeeb201 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2597,14 +2597,16 @@ fn test_temporary_error_during_shutdown() { *nodes[1].chain_monitor.update_ret.lock().unwrap() = None; let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].node.channel_monitor_updated(&outpoint, latest_update); - let (_, closing_signed_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &closing_signed_b.unwrap()); - let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - let (_, none_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(none_a.is_none()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); + let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap()); + let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(none_b.is_none()); + let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(txn_a, txn_b); assert_eq!(txn_a.len(), 1); check_spends!(txn_a[0], funding_tx); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 373bfef1343..0ada482f367 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -453,7 +453,9 @@ pub(super) struct Channel { /// Max to_local and to_remote outputs in a remote-generated commitment transaction counterparty_max_commitment_tx_output: Mutex<(u64, u64)>, - last_sent_closing_fee: Option<(u32, u64, Signature)>, // (feerate, fee, holder_sig) + last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig) + closing_fee_limits: Option<(u64, u64)>, + target_closing_feerate_sats_per_kw: Option, /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor` /// update, we need to delay processing it until later. We do that here by simply storing the @@ -701,6 +703,8 @@ impl Channel { last_sent_closing_fee: None, pending_counterparty_closing_signed: None, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw: None, funding_tx_confirmed_in: None, funding_tx_confirmation_height: 0, @@ -961,6 +965,8 @@ impl Channel { last_sent_closing_fee: None, pending_counterparty_closing_signed: None, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw: None, funding_tx_confirmed_in: None, funding_tx_confirmation_height: 0, @@ -2989,6 +2995,7 @@ impl Channel { // will be retransmitted. self.last_sent_closing_fee = None; self.pending_counterparty_closing_signed = None; + self.closing_fee_limits = None; let mut inbound_drop_count = 0; self.pending_inbound_htlcs.retain(|htlc| { @@ -3356,6 +3363,56 @@ impl Channel { } } + /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole + /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart, + /// at which point they will be recalculated. + fn calculate_closing_fee_limits(&mut self, fee_estimator: &F) -> (u64, u64) + where F::Target: FeeEstimator + { + if let Some((min, max)) = self.closing_fee_limits { return (min, max); } + + // Propose a range from our current Background feerate to our Normal feerate plus our + // force_close_avoidance_max_fee_satoshis. + // If we fail to come to consensus, we'll have to force-close. + let mut proposed_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background); + let normal_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + let mut proposed_max_feerate = if self.is_outbound() { normal_feerate } else { u32::max_value() }; + + // The spec requires that (when the channel does not have anchors) we only send absolute + // channel fees no greater than the absolute channel fee on the current commitment + // transaction. It's unclear *which* commitment transaction this refers to, and there isn't + // very good reason to apply such a limit in any case. We don't bother doing so, risking + // some force-closure by old nodes, but we wanted to close the channel anyway. + + if let Some(target_feerate) = self.target_closing_feerate_sats_per_kw { + let min_feerate = if self.is_outbound() { target_feerate } else { cmp::min(self.feerate_per_kw, target_feerate) }; + proposed_feerate = cmp::max(proposed_feerate, min_feerate); + proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate); + } + + // Note that technically we could end up with a lower minimum fee if one sides' balance is + // below our dust limit, causing the output to disappear. We don't bother handling this + // case, however, as this should only happen if a channel is closed before any (material) + // payments have been made on it. This may cause slight fee overpayment and/or failure to + // come to consensus with our counterparty on appropriate fees, however it should be a + // relatively rare case. We can revisit this later, though note that in order to determine + // if the funders' output is dust we have to know the absolute fee we're going to use. + let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.counterparty_shutdown_scriptpubkey.as_ref().unwrap())); + let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000; + let proposed_max_total_fee_satoshis = if self.is_outbound() { + // We always add force_close_avoidance_max_fee_satoshis to our normal + // feerate-calculated fee, but allow the max to be overridden if we're using a + // target feerate-calculated fee. + cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.config.force_close_avoidance_max_fee_satoshis, + proposed_max_feerate as u64 * tx_weight / 1000) + } else { + u64::max_value() + }; + + self.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis)); + self.closing_fee_limits.clone().unwrap() + } + pub fn maybe_propose_closing_signed(&mut self, fee_estimator: &F, logger: &L) -> Result<(Option, Option), ChannelError> where F::Target: FeeEstimator, L::Target: Logger @@ -3376,27 +3433,26 @@ impl Channel { return Ok((None, None)); } - let mut proposed_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background); - if self.feerate_per_kw > proposed_feerate { - proposed_feerate = self.feerate_per_kw; - } + let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator); + assert!(self.shutdown_scriptpubkey.is_some()); - let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.counterparty_shutdown_scriptpubkey.as_ref().unwrap())); - let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000; - log_trace!(logger, "Proposing initial closing signed for our counterparty with a feerate of {} sat/kWeight (= {} sats)", - proposed_feerate, proposed_total_fee_satoshis); + let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false); + log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)", + our_min_fee, our_max_fee, total_fee_satoshis); - let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(proposed_total_fee_satoshis, false); let sig = self.holder_signer .sign_closing_transaction(&closing_tx, &self.secp_ctx) .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?; - self.last_sent_closing_fee = Some((proposed_feerate, total_fee_satoshis, sig.clone())); + self.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone())); Ok((Some(msgs::ClosingSigned { channel_id: self.channel_id, fee_satoshis: total_fee_satoshis, signature: sig, - fee_range: None, + fee_range: Some(msgs::ClosingSignedFeeRange { + min_fee_satoshis: our_min_fee, + max_fee_satoshis: our_max_fee, + }), }), None)) } @@ -3532,6 +3588,10 @@ impl Channel { return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned())); } + if self.is_outbound() && self.last_sent_closing_fee.is_none() { + return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned())); + } + if self.channel_state & ChannelState::MonitorUpdateFailed as u32 != 0 { self.pending_counterparty_closing_signed = Some(msg.clone()); return Ok((None, None)); @@ -3555,78 +3615,104 @@ impl Channel { }, }; - let closing_tx_max_weight = self.get_closing_transaction_weight( - if let Some(oup) = closing_tx.output.get(0) { Some(&oup.script_pubkey) } else { None }, - if let Some(oup) = closing_tx.output.get(1) { Some(&oup.script_pubkey) } else { None }); - if let Some((_, last_fee, sig)) = self.last_sent_closing_fee { + assert!(self.shutdown_scriptpubkey.is_some()); + if let Some((last_fee, sig)) = self.last_sent_closing_fee { if last_fee == msg.fee_satoshis { self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig); - assert!(closing_tx.get_weight() as u64 <= closing_tx_max_weight); - debug_assert!(closing_tx.get_weight() as u64 >= closing_tx_max_weight - 2); self.channel_state = ChannelState::ShutdownComplete as u32; self.update_time_counter += 1; return Ok((None, Some(closing_tx))); } } - macro_rules! propose_new_feerate { - ($new_feerate: expr) => { - assert!(self.shutdown_scriptpubkey.is_some()); - let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.counterparty_shutdown_scriptpubkey.as_ref().unwrap())); - let (closing_tx, used_total_fee) = self.build_closing_transaction($new_feerate as u64 * tx_weight / 1000, false); + let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator); + + macro_rules! propose_fee { + ($new_fee: expr) => { + let (mut tx, used_fee) = if $new_fee == msg.fee_satoshis { + (closing_tx, $new_fee) + } else { + self.build_closing_transaction($new_fee, false) + }; + let sig = self.holder_signer - .sign_closing_transaction(&closing_tx, &self.secp_ctx) + .sign_closing_transaction(&tx, &self.secp_ctx) .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?; - assert!(closing_tx.get_weight() as u64 <= tx_weight); - self.last_sent_closing_fee = Some(($new_feerate, used_total_fee, sig.clone())); + + let signed_tx = if $new_fee == msg.fee_satoshis { + self.channel_state = ChannelState::ShutdownComplete as u32; + self.update_time_counter += 1; + self.build_signed_closing_transaction(&mut tx, &msg.signature, &sig); + Some(tx) + } else { None }; + + self.last_sent_closing_fee = Some((used_fee, sig.clone())); return Ok((Some(msgs::ClosingSigned { channel_id: self.channel_id, - fee_satoshis: used_total_fee, + fee_satoshis: used_fee, signature: sig, - fee_range: None, - }), None)) + fee_range: Some(msgs::ClosingSignedFeeRange { + min_fee_satoshis: our_min_fee, + max_fee_satoshis: our_max_fee, + }), + }), signed_tx)) } } - let mut min_feerate = 253; - if self.is_outbound() { - let max_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); - if (msg.fee_satoshis as u64) > max_feerate as u64 * closing_tx_max_weight / 1000 { - if let Some((last_feerate, _, _)) = self.last_sent_closing_fee { - if max_feerate <= last_feerate { - return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wanted something higher ({}) than our Normal feerate ({})", last_feerate, max_feerate))); - } + if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range { + if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis { + return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis))); + } + if max_fee_satoshis < our_min_fee { + return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee))); + } + if min_fee_satoshis > our_max_fee { + return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee))); + } + + if !self.is_outbound() { + // They have to pay, so pick the highest fee in the overlapping range. + debug_assert_eq!(our_max_fee, u64::max_value()); // We should never set an upper bound + propose_fee!(cmp::min(max_fee_satoshis, our_max_fee)); + } else { + if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee { + return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.", + msg.fee_satoshis, our_min_fee, our_max_fee))); } - propose_new_feerate!(max_feerate); + // The proposed fee is in our acceptable range, accept it and broadcast! + propose_fee!(msg.fee_satoshis); } } else { - min_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background); - } - if (msg.fee_satoshis as u64) < min_feerate as u64 * closing_tx_max_weight / 1000 { - if let Some((last_feerate, _, _)) = self.last_sent_closing_fee { - if min_feerate >= last_feerate { - return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wanted something lower ({}) than our Background feerate ({}).", last_feerate, min_feerate))); + // Old fee style negotiation. We don't bother to enforce whether they are complying + // with the "making progress" requirements, we just comply and hope for the best. + if let Some((last_fee, _)) = self.last_sent_closing_fee { + if msg.fee_satoshis > last_fee { + if msg.fee_satoshis < our_max_fee { + propose_fee!(msg.fee_satoshis); + } else if last_fee < our_max_fee { + propose_fee!(our_max_fee); + } else { + return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee))); + } + } else { + if msg.fee_satoshis > our_min_fee { + propose_fee!(msg.fee_satoshis); + } else if last_fee > our_min_fee { + propose_fee!(our_min_fee); + } else { + return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee))); + } + } + } else { + if msg.fee_satoshis < our_min_fee { + propose_fee!(our_min_fee); + } else if msg.fee_satoshis > our_max_fee { + propose_fee!(our_max_fee); + } else { + propose_fee!(msg.fee_satoshis); } } - propose_new_feerate!(min_feerate); } - - let sig = self.holder_signer - .sign_closing_transaction(&closing_tx, &self.secp_ctx) - .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?; - self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig); - assert!(closing_tx.get_weight() as u64 <= closing_tx_max_weight); - debug_assert!(closing_tx.get_weight() as u64 >= closing_tx_max_weight - 2); - - self.channel_state = ChannelState::ShutdownComplete as u32; - self.update_time_counter += 1; - - Ok((Some(msgs::ClosingSigned { - channel_id: self.channel_id, - fee_satoshis: msg.fee_satoshis, - signature: sig, - fee_range: None, - }), Some(closing_tx))) } // Public utilities: @@ -4669,7 +4755,8 @@ impl Channel { /// Begins the shutdown process, getting a message for the remote peer and returning all /// holding cell HTLCs for payment failure. - pub fn get_shutdown(&mut self, keys_provider: &K, their_features: &InitFeatures) -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError> + pub fn get_shutdown(&mut self, keys_provider: &K, their_features: &InitFeatures, target_feerate_sats_per_kw: Option) + -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError> where K::Target: KeysInterface { for htlc in self.pending_outbound_htlcs.iter() { if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { @@ -4702,6 +4789,7 @@ impl Channel { }; // From here on out, we may not fail! + self.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw; if self.channel_state < ChannelState::FundingSent as u32 { self.channel_state = ChannelState::ShutdownComplete as u32; } else { @@ -5054,6 +5142,7 @@ impl Writeable for Channel { (3, self.counterparty_selected_channel_reserve_satoshis, option), (5, self.config, required), (7, self.shutdown_scriptpubkey, option), + (9, self.target_closing_feerate_sats_per_kw, option), }); Ok(()) @@ -5286,12 +5375,14 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel }; let mut announcement_sigs = None; + let mut target_closing_feerate_sats_per_kw = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), (1, minimum_depth, option), (3, counterparty_selected_channel_reserve_satoshis, option), (5, config, option), // Note that if none is provided we will *not* overwrite the existing one. (7, shutdown_scriptpubkey, option), + (9, target_closing_feerate_sats_per_kw, option), }); let mut secp_ctx = Secp256k1::new(); @@ -5342,6 +5433,8 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel last_sent_closing_fee: None, pending_counterparty_closing_signed: None, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw, funding_tx_confirmed_in, funding_tx_confirmation_height, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f85fe21af02..c0e32bbdd47 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1284,12 +1284,7 @@ impl ChannelMana self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) } - /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs - /// will be accepted on the given channel, and after additional timeout/the closing of all - /// pending HTLCs, the channel will be closed on chain. - /// - /// May generate a SendShutdown message event on success, which should be relayed. - pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { + fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let counterparty_node_id; @@ -1305,7 +1300,7 @@ impl ChannelMana Some(peer_state) => { let peer_state = peer_state.lock().unwrap(); let their_features = &peer_state.latest_features; - chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features)? + chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)? }, None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), }; @@ -1350,6 +1345,50 @@ impl ChannelMana Ok(()) } + /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs + /// will be accepted on the given channel, and after additional timeout/the closing of all + /// pending HTLCs, the channel will be closed on chain. + /// + /// * If we are the channel initiator, we will pay between our [`Background`] and + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee + /// estimate. + /// * If our counterparty is the channel initiator, we will require a channel closing + /// transaction feerate of at least our [`Background`] feerate or the feerate which + /// would appear on a force-closure transaction, whichever is lower. We will allow our + /// counterparty to pay as much fee as they'd like, however. + /// + /// May generate a SendShutdown message event on success, which should be relayed. + /// + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis + /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background + /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { + self.close_channel_internal(channel_id, None) + } + + /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs + /// will be accepted on the given channel, and after additional timeout/the closing of all + /// pending HTLCs, the channel will be closed on chain. + /// + /// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated + /// the channel being closed or not: + /// * If we are the channel initiator, we will pay at least this feerate on the closing + /// transaction. The upper-bound is set by + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee + /// estimate (or `target_feerate_sat_per_1000_weight`, if it is greater). + /// * If our counterparty is the channel initiator, we will refuse to accept a channel closure + /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which + /// will appear on a force-closure transaction, whichever is lower). + /// + /// May generate a SendShutdown message event on success, which should be relayed. + /// + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis + /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background + /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> { + self.close_channel_internal(channel_id, Some(target_feerate_sats_per_1000_weight)) + } + #[inline] fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { let (monitor_update_option, mut failed_htlcs) = shutdown_res; @@ -3367,7 +3406,7 @@ impl ChannelMana } if !chan_entry.get().received_shutdown() { - log_info!(self.logger, "Received a shutdown message from our couterparty for channel {}{}.", + log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", log_bytes!(msg.channel_id), if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 6c08065bc2f..565f1305963 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -755,7 +755,7 @@ macro_rules! check_closed_broadcast { pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) { let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) }; - let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) }; + let (node_b, broadcaster_b, struct_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) } else { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) }; let (tx_a, tx_b); node_a.close_channel(channel_id).unwrap(); @@ -788,29 +788,33 @@ pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: let (as_update, bs_update) = if close_inbound_first { assert!(node_a.get_and_clear_pending_msg_events().is_empty()); node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()); - assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); - tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); - let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); - node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()); - let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); - assert!(none_b.is_none()); + node_b.handle_closing_signed(&node_a.get_our_node_id(), &get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id())); assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); + let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); + + node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()); + let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); + assert!(none_a.is_none()); + assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); + tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); (as_update, bs_update) } else { let closing_signed_a = get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id()); node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a); - assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); - tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); - let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); + node_a.handle_closing_signed(&node_b.get_our_node_id(), &get_event_msg!(struct_b, MessageSendEvent::SendClosingSigned, node_a.get_our_node_id())); - node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()); - let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); - assert!(none_a.is_none()); assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); + let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); + + node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()); + let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); + assert!(none_b.is_none()); + assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); + tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); (as_update, bs_update) }; assert_eq!(tx_a, tx_b); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index ea1c8993067..59ccfbe9675 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -876,10 +876,12 @@ fn pre_funding_lock_shutdown_test() { let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(node_0_none.is_none()); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); @@ -949,10 +951,12 @@ fn updates_shutdown_wait() { let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(node_0_none.is_none()); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); assert!(nodes[0].node.list_channels().is_empty()); @@ -1027,10 +1031,12 @@ fn htlc_fail_async_shutdown() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(node_0_none.is_none()); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); assert!(nodes[0].node.list_channels().is_empty()); @@ -1125,19 +1131,21 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); if recv_count > 0 { nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - assert!(node_1_closing_signed.is_some()); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_2nd_closing_signed.is_some()); } nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); - let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); if recv_count == 0 { // If all closing_signeds weren't delivered we can just resume where we left off... - let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish); let node_0_msgs = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1169,10 +1177,12 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_3rd_shutdown); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(node_0_none.is_none()); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); } else { // If one node, however, received + responded with an identical closing_signed we end // up erroring and node[0] will try to broadcast its own latest commitment transaction. @@ -1181,15 +1191,15 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // closing_signed and avoiding broadcasting local commitment txn for some timeout to // give our counterparty enough time to (potentially) broadcast a cooperative closing // transaction. - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish); - let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish); + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { match action { &ErrorAction::SendErrorMessage { ref msg } => { - nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg); + nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &msg); assert_eq!(msg.channel_id, chan_1.2); }, _ => panic!("Unexpected event!"), @@ -1197,10 +1207,10 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { } else { panic!("Needed SendErrorMessage close"); } // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and - // checks it, but in this case nodes[0] didn't ever get a chance to receive a + // checks it, but in this case nodes[1] didn't ever get a chance to receive a // closing_signed so we do it ourselves - check_closed_broadcast!(nodes[0], false); - check_added_monitors!(nodes[0], 1); + check_closed_broadcast!(nodes[1], false); + check_added_monitors!(nodes[1], 1); } assert!(nodes[0].node.list_channels().is_empty()); diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index 3348b7b254e..fb4f6fce737 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -223,6 +223,29 @@ pub struct ChannelConfig { /// /// Default value: 5_000_000 msat. pub max_dust_htlc_exposure_msat: u64, + /// The additional fee we're willing to pay to avoid waiting for the counterparty's + /// `to_self_delay` to reclaim funds. + /// + /// When we close a channel cooperatively with our counterparty, we negotiate a fee for the + /// closing transaction which both sides find acceptable, ultimately paid by the channel + /// funder/initiator. + /// + /// When we are the funder, because we have to pay the channel closing fee, we bound the + /// acceptable fee by our [`Background`] and [`Normal`] fees, with the upper bound increased by + /// this value. Because the on-chain fee we'd pay to force-close the channel is kept near our + /// [`Normal`] feerate during normal operation, this value represents the additional fee we're + /// willing to pay in order to avoid waiting for our counterparty's to_self_delay to reclaim our + /// funds. + /// + /// When we are not the funder, we require the closing transaction fee pay at least our + /// [`Background`] fee estimate, but allow our counterparty to pay as much fee as they like. + /// Thus, this value is ignored when we are not the funder. + /// + /// Default value: 1000 satoshis. + /// + /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background + pub force_close_avoidance_max_fee_satoshis: u64, } impl Default for ChannelConfig { @@ -235,6 +258,7 @@ impl Default for ChannelConfig { announced_channel: false, commit_upfront_shutdown_pubkey: true, max_dust_htlc_exposure_msat: 5_000_000, + force_close_avoidance_max_fee_satoshis: 1000, } } } @@ -243,6 +267,7 @@ impl_writeable_tlv_based!(ChannelConfig, { (0, forwarding_fee_proportional_millionths, required), (1, max_dust_htlc_exposure_msat, (default_value, 5_000_000)), (2, cltv_expiry_delta, required), + (3, force_close_avoidance_max_fee_satoshis, (default_value, 1000)), (4, announced_channel, required), (6, commit_upfront_shutdown_pubkey, required), (8, forwarding_fee_base_msat, required), From 5712de2da0d4591b96b2e5b5d1e7bd68c30c6ad5 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 26 Jul 2021 18:59:03 +0000 Subject: [PATCH 09/13] Move shutdown-related tests into a new module Because ln::functional_tests if over 9000 LoC long, its useful to move tests into new modules as we can. Here we move all cooperative shutdown related tests into a new module entitled `shutdown_tests` --- lightning/src/ln/functional_tests.rs | 693 ------------------------- lightning/src/ln/mod.rs | 3 + lightning/src/ln/shutdown_tests.rs | 728 +++++++++++++++++++++++++++ 3 files changed, 731 insertions(+), 693 deletions(-) create mode 100644 lightning/src/ln/shutdown_tests.rs diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 59ccfbe9675..980c6ea40f1 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -28,10 +28,8 @@ use routing::network_graph::RoutingFees; use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures}; use ln::msgs; use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, ErrorAction}; -use ln::script::ShutdownScript; use util::enforcing_trait_impls::EnforcingSigner; use util::{byte_utils, test_utils}; -use util::test_utils::OnGetShutdownScriptpubkey; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; use util::errors::APIError; use util::ser::{Writeable, ReadableArgs}; @@ -57,12 +55,10 @@ use io; use prelude::*; use alloc::collections::BTreeSet; use core::default::Default; -use core::num::NonZeroU8; use sync::{Arc, Mutex}; use ln::functional_test_utils::*; use ln::chan_utils::CommitmentTransaction; -use ln::msgs::OptionalField::Present; #[test] fn test_insane_channel_opens() { @@ -857,378 +853,6 @@ fn test_update_fee() { close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); } -#[test] -fn pre_funding_lock_shutdown_test() { - // Test sending a shutdown prior to funding_locked after funding generation - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0, InitFeatures::known(), InitFeatures::known()); - mine_transaction(&nodes[0], &tx); - mine_transaction(&nodes[1], &tx); - - nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap(); - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); - - let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); - let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); - let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - assert!(node_1_none.is_none()); - - assert!(nodes[0].node.list_channels().is_empty()); - assert!(nodes[1].node.list_channels().is_empty()); -} - -#[test] -fn updates_shutdown_wait() { - // Test sending a shutdown with outstanding updates pending - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - - let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); - - nodes[0].node.close_channel(&chan_1.2).unwrap(); - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); - - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]); - - let net_graph_msg_handler0 = &nodes[0].net_graph_msg_handler; - let net_graph_msg_handler1 = &nodes[1].net_graph_msg_handler; - let route_1 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler0.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); - let route_2 = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler1.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); - unwrap_send_err!(nodes[0].node.send_payment(&route_1, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {}); - unwrap_send_err!(nodes[1].node.send_payment(&route_2, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {}); - - assert!(nodes[2].node.claim_funds(our_payment_preimage)); - check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fail_htlcs.is_empty()); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_fee.is_none()); - assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - expect_payment_forwarded!(nodes[1], Some(1000), false); - check_added_monitors!(nodes[1], 1); - let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); - - assert!(updates_2.update_add_htlcs.is_empty()); - assert!(updates_2.update_fail_htlcs.is_empty()); - assert!(updates_2.update_fail_malformed_htlcs.is_empty()); - assert!(updates_2.update_fee.is_none()); - assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { ref payment_preimage } => { - assert_eq!(our_payment_preimage, *payment_preimage); - }, - _ => panic!("Unexpected event"), - } - - let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); - let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); - let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - assert!(node_1_none.is_none()); - - assert!(nodes[0].node.list_channels().is_empty()); - - assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); - close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); - assert!(nodes[1].node.list_channels().is_empty()); - assert!(nodes[2].node.list_channels().is_empty()); -} - -#[test] -fn htlc_fail_async_shutdown() { - // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); - nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - assert_eq!(updates.update_add_htlcs.len(), 1); - assert!(updates.update_fulfill_htlcs.is_empty()); - assert!(updates.update_fail_htlcs.is_empty()); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_fee.is_none()); - - nodes[1].node.close_channel(&chan_1.2).unwrap(); - let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); - - let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert!(updates_2.update_add_htlcs.is_empty()); - assert!(updates_2.update_fulfill_htlcs.is_empty()); - assert_eq!(updates_2.update_fail_htlcs.len(), 1); - assert!(updates_2.update_fail_malformed_htlcs.is_empty()); - assert!(updates_2.update_fee.is_none()); - - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); - - expect_payment_failed!(nodes[0], our_payment_hash, false); - - let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 2); - match msg_events[0] { - MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { - assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id); - }, - _ => panic!("Unexpected event"), - } - let node_0_closing_signed = match msg_events[1] { - MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - (*msg).clone() - }, - _ => panic!("Unexpected event"), - }; - - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); - let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); - let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - assert!(node_1_none.is_none()); - - assert!(nodes[0].node.list_channels().is_empty()); - - assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); - close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); - assert!(nodes[1].node.list_channels().is_empty()); - assert!(nodes[2].node.list_channels().is_empty()); -} - -fn do_test_shutdown_rebroadcast(recv_count: u8) { - // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of - // messages delivered prior to disconnect - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - - let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); - - nodes[1].node.close_channel(&chan_1.2).unwrap(); - let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - if recv_count > 0 { - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - if recv_count > 1 { - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - } - } - - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); - let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); - let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); - - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish); - let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - assert!(node_1_shutdown == node_1_2nd_shutdown); - - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish); - let node_0_2nd_shutdown = if recv_count > 0 { - let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown); - node_0_2nd_shutdown - } else { - let node_0_chan_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); - assert_eq!(node_0_chan_update.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected. - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown); - get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()) - }; - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_2nd_shutdown); - - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - assert!(nodes[2].node.claim_funds(our_payment_preimage)); - check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fail_htlcs.is_empty()); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_fee.is_none()); - assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - expect_payment_forwarded!(nodes[1], Some(1000), false); - check_added_monitors!(nodes[1], 1); - let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); - - assert!(updates_2.update_add_htlcs.is_empty()); - assert!(updates_2.update_fail_htlcs.is_empty()); - assert!(updates_2.update_fail_malformed_htlcs.is_empty()); - assert!(updates_2.update_fee.is_none()); - assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { ref payment_preimage } => { - assert_eq!(our_payment_preimage, *payment_preimage); - }, - _ => panic!("Unexpected event"), - } - - let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); - if recv_count > 0 { - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); - let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(node_0_2nd_closing_signed.is_some()); - } - - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); - let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); - if recv_count == 0 { - // If all closing_signeds weren't delivered we can just resume where we left off... - let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); - - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish); - let node_0_msgs = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(node_0_msgs.len(), 2); - let node_0_2nd_closing_signed = match node_0_msgs[1] { - MessageSendEvent::SendClosingSigned { ref msg, .. } => { - assert_eq!(node_0_closing_signed, *msg); - msg.clone() - }, - _ => panic!(), - }; - - let node_0_3rd_shutdown = match node_0_msgs[0] { - MessageSendEvent::SendShutdown { ref msg, .. } => { - assert_eq!(node_0_2nd_shutdown, *msg); - msg.clone() - }, - _ => panic!(), - }; - assert!(node_0_2nd_shutdown == node_0_3rd_shutdown); - - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish); - let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - assert!(node_1_3rd_shutdown == node_1_2nd_shutdown); - - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_3rd_shutdown); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_3rd_shutdown); - - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed); - let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); - let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - assert!(node_1_none.is_none()); - } else { - // If one node, however, received + responded with an identical closing_signed we end - // up erroring and node[0] will try to broadcast its own latest commitment transaction. - // There isn't really anything better we can do simply, but in the future we might - // explore storing a set of recently-closed channels that got disconnected during - // closing_signed and avoiding broadcasting local commitment txn for some timeout to - // give our counterparty enough time to (potentially) broadcast a cooperative closing - // transaction. - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish); - let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { - match action { - &ErrorAction::SendErrorMessage { ref msg } => { - nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &msg); - assert_eq!(msg.channel_id, chan_1.2); - }, - _ => panic!("Unexpected event!"), - } - } else { panic!("Needed SendErrorMessage close"); } - - // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and - // checks it, but in this case nodes[1] didn't ever get a chance to receive a - // closing_signed so we do it ourselves - check_closed_broadcast!(nodes[1], false); - check_added_monitors!(nodes[1], 1); - } - - assert!(nodes[0].node.list_channels().is_empty()); - - assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); - close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); - assert!(nodes[1].node.list_channels().is_empty()); - assert!(nodes[2].node.list_channels().is_empty()); -} - -#[test] -fn test_shutdown_rebroadcast() { - do_test_shutdown_rebroadcast(0); - do_test_shutdown_rebroadcast(1); - do_test_shutdown_rebroadcast(2); -} - #[test] fn fake_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -7508,323 +7132,6 @@ fn test_sweep_outbound_htlc_failure_update() { do_test_sweep_outbound_htlc_failure_update(true, false); } -#[test] -fn test_upfront_shutdown_script() { - // BOLT 2 : Option upfront shutdown script, if peer commit its closing_script at channel opening - // enforce it at shutdown message - - let mut config = UserConfig::default(); - config.channel_options.announced_channel = true; - config.peer_channel_config_limits.force_announced_channel_preference = false; - config.channel_options.commit_upfront_shutdown_pubkey = false; - let user_cfgs = [None, Some(config), None]; - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - // We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign - let flags = InitFeatures::known(); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone()); - nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id()); - node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); - // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer - nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str())); - check_added_monitors!(nodes[2], 1); - - // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone()); - nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id()); - // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign - nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - let events = nodes[2].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } - - // We test that if case of peer non-signaling we don't enforce committed script at channel opening - let flags_no = InitFeatures::known().clear_upfront_shutdown_script(); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags_no, flags.clone()); - nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); - check_added_monitors!(nodes[1], 1); - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } - - // We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close - // channel smoothly, opt-out is from channel initiator here - let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000, flags.clone(), flags.clone()); - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - check_added_monitors!(nodes[1], 1); - let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } - - //// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close - //// channel smoothly - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags.clone(), flags.clone()); - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - check_added_monitors!(nodes[1], 1); - let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); - match events[0] { - MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } - match events[1] { - MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } -} - -#[test] -fn test_unsupported_anysegwit_upfront_shutdown_script() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - // Use a non-v0 segwit script supported by option_shutdown_anysegwit - let node_features = InitFeatures::known().clear_shutdown_anysegwit(); - let anysegwit_shutdown_script = Builder::new() - .push_int(16) - .push_slice(&[0, 40]) - .into_script(); - - // Check script when handling an open_channel message - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); - let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - open_channel.shutdown_scriptpubkey = Present(anysegwit_shutdown_script.clone()); - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), node_features.clone(), &open_channel); - - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { - assert_eq!(node_id, nodes[0].node.get_our_node_id()); - assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)"); - }, - _ => panic!("Unexpected event"), - } - - // Check script when handling an accept_channel message - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); - let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); - let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - accept_channel.shutdown_scriptpubkey = Present(anysegwit_shutdown_script.clone()); - nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), node_features, &accept_channel); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); - assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)"); - }, - _ => panic!("Unexpected event"), - } -} - -#[test] -fn test_invalid_upfront_shutdown_script() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); - - // Use a segwit v0 script with an unsupported witness program - let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - open_channel.shutdown_scriptpubkey = Present(Builder::new().push_int(0) - .push_slice(&[0, 0]) - .into_script()); - nodes[0].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { - assert_eq!(node_id, nodes[0].node.get_our_node_id()); - assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_0 OP_PUSHBYTES_2 0000)"); - }, - _ => panic!("Unexpected event"), - } -} - -#[test] -fn test_segwit_v0_shutdown_script() { - let mut config = UserConfig::default(); - config.channel_options.announced_channel = true; - config.peer_channel_config_limits.force_announced_channel_preference = false; - config.channel_options.commit_upfront_shutdown_pubkey = false; - let user_cfgs = [None, Some(config), None]; - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - check_added_monitors!(nodes[1], 1); - - // Use a segwit v0 script supported even without option_shutdown_anysegwit - let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - node_0_shutdown.scriptpubkey = Builder::new().push_int(0) - .push_slice(&[0; 20]) - .into_script(); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); - match events[0] { - MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } - match events[1] { - MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } -} - -#[test] -fn test_anysegwit_shutdown_script() { - let mut config = UserConfig::default(); - config.channel_options.announced_channel = true; - config.peer_channel_config_limits.force_announced_channel_preference = false; - config.channel_options.commit_upfront_shutdown_pubkey = false; - let user_cfgs = [None, Some(config), None]; - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - check_added_monitors!(nodes[1], 1); - - // Use a non-v0 segwit script supported by option_shutdown_anysegwit - let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - node_0_shutdown.scriptpubkey = Builder::new().push_int(16) - .push_slice(&[0, 0]) - .into_script(); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); - match events[0] { - MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } - match events[1] { - MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } - _ => panic!("Unexpected event"), - } -} - -#[test] -fn test_unsupported_anysegwit_shutdown_script() { - let mut config = UserConfig::default(); - config.channel_options.announced_channel = true; - config.peer_channel_config_limits.force_announced_channel_preference = false; - config.channel_options.commit_upfront_shutdown_pubkey = false; - let user_cfgs = [None, Some(config), None]; - let chanmon_cfgs = create_chanmon_cfgs(3); - let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - node_cfgs[0].features = InitFeatures::known().clear_shutdown_anysegwit(); - node_cfgs[1].features = InitFeatures::known().clear_shutdown_anysegwit(); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - // Check that using an unsupported shutdown script fails and a supported one succeeds. - let supported_shutdown_script = chanmon_cfgs[1].keys_manager.get_shutdown_scriptpubkey(); - let unsupported_shutdown_script = - ShutdownScript::new_witness_program(NonZeroU8::new(16).unwrap(), &[0, 40]).unwrap(); - chanmon_cfgs[1].keys_manager - .expect(OnGetShutdownScriptpubkey { returns: unsupported_shutdown_script.clone() }) - .expect(OnGetShutdownScriptpubkey { returns: supported_shutdown_script }); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1, node_cfgs[0].features.clone(), node_cfgs[1].features.clone()); - match nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()) { - Err(APIError::IncompatibleShutdownScript { script }) => { - assert_eq!(script.into_inner(), unsupported_shutdown_script.clone().into_inner()); - }, - Err(e) => panic!("Unexpected error: {:?}", e), - Ok(_) => panic!("Expected error"), - } - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - check_added_monitors!(nodes[1], 1); - - // Use a non-v0 segwit script unsupported without option_shutdown_anysegwit - let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - node_0_shutdown.scriptpubkey = unsupported_shutdown_script.into_inner(); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_cfgs[1].features, &node_0_shutdown); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); - match events[1] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); - assert_eq!(msg.data, "Got a nonstandard scriptpubkey (60020028) from remote peer".to_owned()); - }, - _ => panic!("Unexpected event"), - } - check_added_monitors!(nodes[0], 1); -} - -#[test] -fn test_invalid_shutdown_script() { - let mut config = UserConfig::default(); - config.channel_options.announced_channel = true; - config.peer_channel_config_limits.force_announced_channel_preference = false; - config.channel_options.commit_upfront_shutdown_pubkey = false; - let user_cfgs = [None, Some(config), None]; - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); - check_added_monitors!(nodes[1], 1); - - // Use a segwit v0 script with an unsupported witness program - let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - node_0_shutdown.scriptpubkey = Builder::new().push_int(0) - .push_slice(&[0, 0]) - .into_script(); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); - match events[1] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); - assert_eq!(msg.data, "Got a nonstandard scriptpubkey (00020000) from remote peer".to_owned()) - }, - _ => panic!("Unexpected event"), - } - check_added_monitors!(nodes[0], 1); -} - #[test] fn test_user_configurable_csv_delay() { // We test our channel constructors yield errors when we pass them absurd csv delay diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index 758927fdf17..d265888e465 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -61,6 +61,9 @@ mod onion_route_tests; #[cfg(test)] #[allow(unused_mut)] mod monitor_tests; +#[cfg(test)] +#[allow(unused_mut)] +mod shutdown_tests; pub use self::peer_channel_encryptor::LN_MAX_MSG_LEN; diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs new file mode 100644 index 00000000000..a4dd3a0781a --- /dev/null +++ b/lightning/src/ln/shutdown_tests.rs @@ -0,0 +1,728 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Tests of our shutdown and closing_signed negotiation logic. + +use chain::keysinterface::KeysInterface; +use chain::transaction::OutPoint; +use ln::{PaymentPreimage, PaymentHash}; +use ln::channelmanager::PaymentSendFailure; +use routing::router::get_route; +use ln::features::{InitFeatures, InvoiceFeatures}; +use ln::msgs; +use ln::msgs::{ChannelMessageHandler, ErrorAction}; +use ln::script::ShutdownScript; +use util::test_utils; +use util::test_utils::OnGetShutdownScriptpubkey; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; +use util::errors::APIError; +use util::config::UserConfig; + +use bitcoin::blockdata::script::Builder; +use bitcoin::blockdata::opcodes; + +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; + +use regex; + +use core::default::Default; +use core::num::NonZeroU8; + +use ln::functional_test_utils::*; +use ln::msgs::OptionalField::Present; + +#[test] +fn pre_funding_lock_shutdown_test() { + // Test sending a shutdown prior to funding_locked after funding generation + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0, InitFeatures::known(), InitFeatures::known()); + mine_transaction(&nodes[0], &tx); + mine_transaction(&nodes[1], &tx); + + nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); + + assert!(nodes[0].node.list_channels().is_empty()); + assert!(nodes[1].node.list_channels().is_empty()); +} + +#[test] +fn updates_shutdown_wait() { + // Test sending a shutdown with outstanding updates pending + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); + + nodes[0].node.close_channel(&chan_1.2).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]); + + let net_graph_msg_handler0 = &nodes[0].net_graph_msg_handler; + let net_graph_msg_handler1 = &nodes[1].net_graph_msg_handler; + let route_1 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler0.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let route_2 = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler1.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + unwrap_send_err!(nodes[0].node.send_payment(&route_1, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {}); + unwrap_send_err!(nodes[1].node.send_payment(&route_2, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {}); + + assert!(nodes[2].node.claim_funds(our_payment_preimage)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], Some(1000), false); + check_added_monitors!(nodes[1], 1); + let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); + + assert!(updates_2.update_add_htlcs.is_empty()); + assert!(updates_2.update_fail_htlcs.is_empty()); + assert!(updates_2.update_fail_malformed_htlcs.is_empty()); + assert!(updates_2.update_fee.is_none()); + assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(our_payment_preimage, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); + + assert!(nodes[0].node.list_channels().is_empty()); + + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[2].node.list_channels().is_empty()); +} + +#[test] +fn htlc_fail_async_shutdown() { + // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + + nodes[1].node.close_channel(&chan_1.2).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); + + let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates_2.update_add_htlcs.is_empty()); + assert!(updates_2.update_fulfill_htlcs.is_empty()); + assert_eq!(updates_2.update_fail_htlcs.len(), 1); + assert!(updates_2.update_fail_malformed_htlcs.is_empty()); + assert!(updates_2.update_fee.is_none()); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); + + expect_payment_failed!(nodes[0], our_payment_hash, false); + + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2); + match msg_events[0] { + MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { + assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id); + }, + _ => panic!("Unexpected event"), + } + let node_0_closing_signed = match msg_events[1] { + MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + (*msg).clone() + }, + _ => panic!("Unexpected event"), + }; + + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); + + assert!(nodes[0].node.list_channels().is_empty()); + + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[2].node.list_channels().is_empty()); +} + +fn do_test_shutdown_rebroadcast(recv_count: u8) { + // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of + // messages delivered prior to disconnect + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + + let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); + + nodes[1].node.close_channel(&chan_1.2).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + if recv_count > 0 { + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + if recv_count > 1 { + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + } + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish); + let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + assert!(node_1_shutdown == node_1_2nd_shutdown); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish); + let node_0_2nd_shutdown = if recv_count > 0 { + let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown); + node_0_2nd_shutdown + } else { + let node_0_chan_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + assert_eq!(node_0_chan_update.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected. + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown); + get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()) + }; + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_2nd_shutdown); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + assert!(nodes[2].node.claim_funds(our_payment_preimage)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], Some(1000), false); + check_added_monitors!(nodes[1], 1); + let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); + + assert!(updates_2.update_add_htlcs.is_empty()); + assert!(updates_2.update_fail_htlcs.is_empty()); + assert!(updates_2.update_fail_malformed_htlcs.is_empty()); + assert!(updates_2.update_fee.is_none()); + assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(our_payment_preimage, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + if recv_count > 0 { + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_2nd_closing_signed.is_some()); + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + if recv_count == 0 { + // If all closing_signeds weren't delivered we can just resume where we left off... + let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish); + let node_0_msgs = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(node_0_msgs.len(), 2); + let node_0_2nd_closing_signed = match node_0_msgs[1] { + MessageSendEvent::SendClosingSigned { ref msg, .. } => { + assert_eq!(node_0_closing_signed, *msg); + msg.clone() + }, + _ => panic!(), + }; + + let node_0_3rd_shutdown = match node_0_msgs[0] { + MessageSendEvent::SendShutdown { ref msg, .. } => { + assert_eq!(node_0_2nd_shutdown, *msg); + msg.clone() + }, + _ => panic!(), + }; + assert!(node_0_2nd_shutdown == node_0_3rd_shutdown); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish); + let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + assert!(node_1_3rd_shutdown == node_1_2nd_shutdown); + + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_3rd_shutdown); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_3rd_shutdown); + + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); + let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_none.is_none()); + } else { + // If one node, however, received + responded with an identical closing_signed we end + // up erroring and node[0] will try to broadcast its own latest commitment transaction. + // There isn't really anything better we can do simply, but in the future we might + // explore storing a set of recently-closed channels that got disconnected during + // closing_signed and avoiding broadcasting local commitment txn for some timeout to + // give our counterparty enough time to (potentially) broadcast a cooperative closing + // transaction. + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish); + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { + match action { + &ErrorAction::SendErrorMessage { ref msg } => { + nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &msg); + assert_eq!(msg.channel_id, chan_1.2); + }, + _ => panic!("Unexpected event!"), + } + } else { panic!("Needed SendErrorMessage close"); } + + // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and + // checks it, but in this case nodes[1] didn't ever get a chance to receive a + // closing_signed so we do it ourselves + check_closed_broadcast!(nodes[1], false); + check_added_monitors!(nodes[1], 1); + } + + assert!(nodes[0].node.list_channels().is_empty()); + + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[2].node.list_channels().is_empty()); +} + +#[test] +fn test_shutdown_rebroadcast() { + do_test_shutdown_rebroadcast(0); + do_test_shutdown_rebroadcast(1); + do_test_shutdown_rebroadcast(2); +} + +#[test] +fn test_upfront_shutdown_script() { + // BOLT 2 : Option upfront shutdown script, if peer commit its closing_script at channel opening + // enforce it at shutdown message + + let mut config = UserConfig::default(); + config.channel_options.announced_channel = true; + config.peer_channel_config_limits.force_announced_channel_preference = false; + config.channel_options.commit_upfront_shutdown_pubkey = false; + let user_cfgs = [None, Some(config), None]; + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + // We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign + let flags = InitFeatures::known(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone()); + nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id()); + node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); + // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer + nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str())); + check_added_monitors!(nodes[2], 1); + + // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone()); + nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id()); + // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign + nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } + + // We test that if case of peer non-signaling we don't enforce committed script at channel opening + let flags_no = InitFeatures::known().clear_upfront_shutdown_script(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags_no, flags.clone()); + nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + check_added_monitors!(nodes[1], 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } + + // We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close + // channel smoothly, opt-out is from channel initiator here + let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000, flags.clone(), flags.clone()); + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + check_added_monitors!(nodes[1], 1); + let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } + + //// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close + //// channel smoothly + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags.clone(), flags.clone()); + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + check_added_monitors!(nodes[1], 1); + let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_unsupported_anysegwit_upfront_shutdown_script() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Use a non-v0 segwit script supported by option_shutdown_anysegwit + let node_features = InitFeatures::known().clear_shutdown_anysegwit(); + let anysegwit_shutdown_script = Builder::new() + .push_int(16) + .push_slice(&[0, 40]) + .into_script(); + + // Check script when handling an open_channel message + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); + let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + open_channel.shutdown_scriptpubkey = Present(anysegwit_shutdown_script.clone()); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), node_features.clone(), &open_channel); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)"); + }, + _ => panic!("Unexpected event"), + } + + // Check script when handling an accept_channel message + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); + let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + accept_channel.shutdown_scriptpubkey = Present(anysegwit_shutdown_script.clone()); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), node_features, &accept_channel); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)"); + }, + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_invalid_upfront_shutdown_script() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); + + // Use a segwit v0 script with an unsupported witness program + let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + open_channel.shutdown_scriptpubkey = Present(Builder::new().push_int(0) + .push_slice(&[0, 0]) + .into_script()); + nodes[0].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_0 OP_PUSHBYTES_2 0000)"); + }, + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_segwit_v0_shutdown_script() { + let mut config = UserConfig::default(); + config.channel_options.announced_channel = true; + config.peer_channel_config_limits.force_announced_channel_preference = false; + config.channel_options.commit_upfront_shutdown_pubkey = false; + let user_cfgs = [None, Some(config), None]; + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + check_added_monitors!(nodes[1], 1); + + // Use a segwit v0 script supported even without option_shutdown_anysegwit + let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + node_0_shutdown.scriptpubkey = Builder::new().push_int(0) + .push_slice(&[0; 20]) + .into_script(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_anysegwit_shutdown_script() { + let mut config = UserConfig::default(); + config.channel_options.announced_channel = true; + config.peer_channel_config_limits.force_announced_channel_preference = false; + config.channel_options.commit_upfront_shutdown_pubkey = false; + let user_cfgs = [None, Some(config), None]; + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + check_added_monitors!(nodes[1], 1); + + // Use a non-v0 segwit script supported by option_shutdown_anysegwit + let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + node_0_shutdown.scriptpubkey = Builder::new().push_int(16) + .push_slice(&[0, 0]) + .into_script(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_unsupported_anysegwit_shutdown_script() { + let mut config = UserConfig::default(); + config.channel_options.announced_channel = true; + config.peer_channel_config_limits.force_announced_channel_preference = false; + config.channel_options.commit_upfront_shutdown_pubkey = false; + let user_cfgs = [None, Some(config), None]; + let chanmon_cfgs = create_chanmon_cfgs(3); + let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + node_cfgs[0].features = InitFeatures::known().clear_shutdown_anysegwit(); + node_cfgs[1].features = InitFeatures::known().clear_shutdown_anysegwit(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + // Check that using an unsupported shutdown script fails and a supported one succeeds. + let supported_shutdown_script = chanmon_cfgs[1].keys_manager.get_shutdown_scriptpubkey(); + let unsupported_shutdown_script = + ShutdownScript::new_witness_program(NonZeroU8::new(16).unwrap(), &[0, 40]).unwrap(); + chanmon_cfgs[1].keys_manager + .expect(OnGetShutdownScriptpubkey { returns: unsupported_shutdown_script.clone() }) + .expect(OnGetShutdownScriptpubkey { returns: supported_shutdown_script }); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1, node_cfgs[0].features.clone(), node_cfgs[1].features.clone()); + match nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()) { + Err(APIError::IncompatibleShutdownScript { script }) => { + assert_eq!(script.into_inner(), unsupported_shutdown_script.clone().into_inner()); + }, + Err(e) => panic!("Unexpected error: {:?}", e), + Ok(_) => panic!("Expected error"), + } + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + check_added_monitors!(nodes[1], 1); + + // Use a non-v0 segwit script unsupported without option_shutdown_anysegwit + let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + node_0_shutdown.scriptpubkey = unsupported_shutdown_script.into_inner(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_cfgs[1].features, &node_0_shutdown); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[1] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(msg.data, "Got a nonstandard scriptpubkey (60020028) from remote peer".to_owned()); + }, + _ => panic!("Unexpected event"), + } + check_added_monitors!(nodes[0], 1); +} + +#[test] +fn test_invalid_shutdown_script() { + let mut config = UserConfig::default(); + config.channel_options.announced_channel = true; + config.peer_channel_config_limits.force_announced_channel_preference = false; + config.channel_options.commit_upfront_shutdown_pubkey = false; + let user_cfgs = [None, Some(config), None]; + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + check_added_monitors!(nodes[1], 1); + + // Use a segwit v0 script with an unsupported witness program + let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + node_0_shutdown.scriptpubkey = Builder::new().push_int(0) + .push_slice(&[0, 0]) + .into_script(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[1] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(msg.data, "Got a nonstandard scriptpubkey (00020000) from remote peer".to_owned()) + }, + _ => panic!("Unexpected event"), + } + check_added_monitors!(nodes[0], 1); +} From d63b024eff602e6faa0c5ab43224b84319aabce1 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 26 Jul 2021 20:43:05 +0000 Subject: [PATCH 10/13] Force-close if finish closing_signed negotiation takes a full minute --- lightning/src/ln/channel.rs | 46 ++++++++++++++++++++++++------ lightning/src/ln/channelmanager.rs | 23 +++++++++------ 2 files changed, 53 insertions(+), 16 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 0ada482f367..434941e92fa 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -255,8 +255,6 @@ enum ChannelState { RemoteShutdownSent = 1 << 10, /// Flag which is set on ChannelFunded or FundingSent after sending a shutdown message. At this /// point, we may not add any new HTLCs to the channel. - /// TODO: Investigate some kind of timeout mechanism by which point the remote end must provide - /// us their shutdown. LocalShutdownSent = 1 << 11, /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about /// to drop us, but we store this anyway. @@ -503,6 +501,13 @@ pub(super) struct Channel { commitment_secrets: CounterpartyCommitmentSecrets, channel_update_status: ChannelUpdateStatus, + /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does + /// not complete within a single timer tick (one minute), we should force-close the channel. + /// This prevents us from keeping unusable channels around forever if our counterparty wishes + /// to DoS us. + /// Note that this field is reset to false on deserialization to give us a chance to connect to + /// our peer and start the closing_signed negotiation fresh. + closing_signed_in_flight: bool, /// Our counterparty's channel_announcement signatures provided in announcement_signatures. /// This can be used to rebroadcast the channel_announcement message later. @@ -740,6 +745,7 @@ impl Channel { commitment_secrets: CounterpartyCommitmentSecrets::new(), channel_update_status: ChannelUpdateStatus::Enabled, + closing_signed_in_flight: false, announcement_sigs: None, @@ -1006,6 +1012,7 @@ impl Channel { commitment_secrets: CounterpartyCommitmentSecrets::new(), channel_update_status: ChannelUpdateStatus::Enabled, + closing_signed_in_flight: false, announcement_sigs: None, @@ -3413,16 +3420,38 @@ impl Channel { self.closing_fee_limits.clone().unwrap() } + /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true + /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At + /// this point if we're the funder we should send the initial closing_signed, and in any case + /// shutdown should complete within a reasonable timeframe. + fn closing_negotiation_ready(&self) -> bool { + self.pending_inbound_htlcs.is_empty() && self.pending_outbound_htlcs.is_empty() && + self.channel_state & + (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 | + ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32) + == BOTH_SIDES_SHUTDOWN_MASK && + self.pending_update_fee.is_none() + } + + /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning + /// an Err if no progress is being made and the channel should be force-closed instead. + /// Should be called on a one-minute timer. + pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> { + if self.closing_negotiation_ready() { + if self.closing_signed_in_flight { + return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned())); + } else { + self.closing_signed_in_flight = true; + } + } + Ok(()) + } + pub fn maybe_propose_closing_signed(&mut self, fee_estimator: &F, logger: &L) -> Result<(Option, Option), ChannelError> where F::Target: FeeEstimator, L::Target: Logger { - if !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() || - self.channel_state & - (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 | - ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32) - != BOTH_SIDES_SHUTDOWN_MASK || - self.last_sent_closing_fee.is_some() || self.pending_update_fee.is_some() { + if self.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() { return Ok((None, None)); } @@ -5463,6 +5492,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel commitment_secrets, channel_update_status, + closing_signed_in_flight: false, announcement_sigs, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c0e32bbdd47..cff314679ab 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2715,6 +2715,20 @@ impl ChannelMana let pending_msg_events = &mut channel_state.pending_msg_events; let short_to_id = &mut channel_state.short_to_id; channel_state.by_id.retain(|chan_id, chan| { + let counterparty_node_id = chan.get_counterparty_node_id(); + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push((err, counterparty_node_id)); + } + if !retain_channel { return false; } + + if let Err(e) = chan.timer_check_closing_negotiation_progress() { + let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + handle_errors.push((Err(err), chan.get_counterparty_node_id())); + if needs_close { return false; } + } + match chan.channel_update_status() { ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), @@ -2741,20 +2755,13 @@ impl ChannelMana _ => {}, } - let counterparty_node_id = chan.get_counterparty_node_id(); - let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); - if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } - if err.is_err() { - handle_errors.push((err, counterparty_node_id)); - } - retain_channel + true }); } for (err, counterparty_node_id) in handle_errors.drain(..) { let _ = handle_error!(self, err, counterparty_node_id); } - should_persist }); } From dcb0832b7620959271fd7f746fe8e2ff5784d3e9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 26 Jul 2021 22:50:49 +0000 Subject: [PATCH 11/13] Add a test for shutdown negotiaion funder restart and timeout --- lightning/src/ln/channel.rs | 8 +- lightning/src/ln/functional_test_utils.rs | 14 ++- lightning/src/ln/shutdown_tests.rs | 108 ++++++++++++++++++++++ 3 files changed, 127 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 434941e92fa..2440b64c7f8 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -452,7 +452,6 @@ pub(super) struct Channel { counterparty_max_commitment_tx_output: Mutex<(u64, u64)>, last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig) - closing_fee_limits: Option<(u64, u64)>, target_closing_feerate_sats_per_kw: Option, /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor` @@ -460,6 +459,13 @@ pub(super) struct Channel { /// closing_signed message and handling it in `maybe_propose_closing_signed`. pending_counterparty_closing_signed: Option, + /// The minimum and maximum absolute fee we are willing to place on the closing transaction. + /// These are set once we reach `closing_negotiation_ready`. + #[cfg(test)] + pub(crate) closing_fee_limits: Option<(u64, u64)>, + #[cfg(not(test))] + closing_fee_limits: Option<(u64, u64)>, + /// The hash of the block in which the funding transaction was included. funding_tx_confirmed_in: Option, funding_tx_confirmation_height: u32, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 565f1305963..3b0a8452b7b 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -416,12 +416,22 @@ macro_rules! get_htlc_update_msgs { } } +#[cfg(test)] +macro_rules! get_channel_ref { + ($node: expr, $lock: ident, $channel_id: expr) => { + { + $lock = $node.node.channel_state.lock().unwrap(); + $lock.by_id.get_mut(&$channel_id).unwrap() + } + } +} + #[cfg(test)] macro_rules! get_feerate { ($node: expr, $channel_id: expr) => { { - let chan_lock = $node.node.channel_state.lock().unwrap(); - let chan = chan_lock.by_id.get(&$channel_id).unwrap(); + let mut lock; + let chan = get_channel_ref!($node, lock, $channel_id); chan.get_feerate() } } diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index a4dd3a0781a..492b9155cd3 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -726,3 +726,111 @@ fn test_invalid_shutdown_script() { } check_added_monitors!(nodes[0], 1); } + +#[derive(PartialEq)] +enum TimeoutStep { + AfterShutdown, + AfterClosingSigned, + NoTimeout, +} + +fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { + // The range-based closing signed negotiation allows the funder to restart the process with a + // new range if the previous range did not overlap. This allows implementations to request user + // intervention allowing users to enter a new fee range. We do not implement the sending side + // of this, instead opting to allow users to enter an explicit "willing to pay up to X to avoid + // force-closing" value and relying on that instead. + // + // Here we run test the fundee side of that restart mechanism, implementing the funder side of + // it manually. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; + + send_payment(&nodes[0], &[&nodes[1]], 8_000_000); + + nodes[0].node.close_channel(&chan_id).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + + { + // Now we set nodes[1] to require a relatively high feerate for closing. This should result + // in it rejecting nodes[0]'s initial closing_signed, giving nodes[0] a chance to try + // again. + let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock *= 10; + } + + let mut node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + // nodes[0] should use a "reasonable" feerate, well under the 10 sat/vByte that nodes[1] thinks + // is the current prevailing feerate. + assert!(node_0_closing_signed.fee_satoshis <= 500); + + if timeout_step != TimeoutStep::AfterShutdown { + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + // At this point nodes[1] should send back a warning message indicating it disagrees with the + // given channel-closing fee. Currently we do not implement warning messages so instead we + // remain silent here. + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Now deliver a mutated closing_signed indicating a higher acceptable fee range, which + // nodes[1] should happily accept and respond to. + node_0_closing_signed.fee_range.as_mut().unwrap().max_fee_satoshis *= 10; + { + let mut lock; + get_channel_ref!(nodes[0], lock, chan_id).closing_fee_limits.as_mut().unwrap().1 *= 10; + } + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let node_0_2nd_closing_signed = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + if timeout_step == TimeoutStep::NoTimeout { + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.1.unwrap()); + } + } + + if timeout_step != TimeoutStep::NoTimeout { + assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); + } else { + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + } + + nodes[1].node.timer_tick_occurred(); + nodes[1].node.timer_tick_occurred(); + + let txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(txn.len(), 1); + assert_eq!(txn[0].output.len(), 2); + + if timeout_step != TimeoutStep::NoTimeout { + assert!((txn[0].output[0].script_pubkey.is_v0_p2wpkh() && + txn[0].output[1].script_pubkey.is_v0_p2wsh()) || + (txn[0].output[1].script_pubkey.is_v0_p2wpkh() && + txn[0].output[0].script_pubkey.is_v0_p2wsh())); + check_closed_broadcast!(nodes[1], true); + check_added_monitors!(nodes[1], 1); + } else { + assert!(txn[0].output[0].script_pubkey.is_v0_p2wpkh()); + assert!(txn[0].output[1].script_pubkey.is_v0_p2wpkh()); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + }, + _ => panic!("Unexpected event"), + } + } +} + +#[test] +fn test_closing_signed_reinit_timeout() { + do_test_closing_signed_reinit_timeout(TimeoutStep::AfterShutdown); + do_test_closing_signed_reinit_timeout(TimeoutStep::AfterClosingSigned); + do_test_closing_signed_reinit_timeout(TimeoutStep::NoTimeout); +} From 0b481e91d5a1832665f2b76799c9d177db010128 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 13 Aug 2021 18:46:50 +0000 Subject: [PATCH 12/13] Slightly clarify the closing_signed error msg on fee mismatch --- lightning/src/ln/channel.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2440b64c7f8..03968118a16 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3635,7 +3635,7 @@ impl Channel { let funding_redeemscript = self.get_funding_redeemscript(); let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false); if used_total_fee != msg.fee_satoshis { - return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee greater than the value they can claim. Fee in message: {}", msg.fee_satoshis))); + return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee))); } let mut sighash = hash_to_message!(&bip143::SigHashCache::new(&closing_tx).signature_hash(0, &funding_redeemscript, self.channel_value_satoshis, SigHashType::All)[..]); From 82e7df1c0b651bc3b9f034b86c407e2ca7b74241 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 13 Aug 2021 23:01:31 +0000 Subject: [PATCH 13/13] Add relatively simple tests of the legacy and target closing fee This doesn't exhaustively test closing fee negotiation at all, but ensures that it is at least basically able to come to consensus and sign cooperative closing transactions. --- lightning/src/ln/shutdown_tests.rs | 92 ++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 492b9155cd3..a40ad23761e 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -834,3 +834,95 @@ fn test_closing_signed_reinit_timeout() { do_test_closing_signed_reinit_timeout(TimeoutStep::AfterClosingSigned); do_test_closing_signed_reinit_timeout(TimeoutStep::NoTimeout); } + +fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) { + // A simpe test of the legacy shutdown fee negotiation logic. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + + if high_initiator_fee { + // If high_initiator_fee is set, set nodes[0]'s feerate significantly higher. This + // shouldn't impact the flow at all given nodes[1] will happily accept the higher fee. + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock *= 10; + } + + nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + + let mut node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + node_0_closing_signed.fee_range = None; + if high_initiator_fee { + assert!(node_0_closing_signed.fee_satoshis > 500); + } else { + assert!(node_0_closing_signed.fee_satoshis < 500); + } + + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let (_, mut node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + node_1_closing_signed.as_mut().unwrap().fee_range = None; + + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()); + let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_none.is_none()); +} + +#[test] +fn simple_legacy_shutdown_test() { + do_simple_legacy_shutdown_test(false); + do_simple_legacy_shutdown_test(true); +} + +#[test] +fn simple_target_feerate_shutdown() { + // Simple test of target in `close_channel_with_target_feerate`. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let chan_id = OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(); + + nodes[0].node.close_channel_with_target_feerate(&chan_id, 253 * 10).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.close_channel_with_target_feerate(&chan_id, 253 * 5).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); + let (_, node_1_closing_signed_opt) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + let node_1_closing_signed = node_1_closing_signed_opt.unwrap(); + + // nodes[1] was passed a target which was larger than the current channel feerate, which it + // should ignore in favor of the channel fee, as there is no use demanding a minimum higher + // than what will be paid on a force-close transaction. Note that we have to consider rounding, + // so only check that we're within 10 sats. + assert!(node_0_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis >= + node_1_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis * 10 - 5); + assert!(node_0_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis <= + node_1_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis * 10 + 5); + + // Further, because nodes[0]'s target fee is larger than the `Normal` fee estimation plus our + // force-closure-avoidance buffer, min should equal max, and the nodes[1]-selected fee should + // be the nodes[0] only available fee. + assert_eq!(node_0_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis, + node_0_closing_signed.fee_range.as_ref().unwrap().max_fee_satoshis); + assert_eq!(node_0_closing_signed.fee_range.as_ref().unwrap().min_fee_satoshis, + node_0_closing_signed.fee_satoshis); + assert_eq!(node_0_closing_signed.fee_satoshis, node_1_closing_signed.fee_satoshis); + + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed); + let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_none.is_none()); +}