Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add infra to block ChannelMonitorUpdates on forwarded claims #2167

Merged
25 changes: 23 additions & 2 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,12 @@ pub(super) struct ReestablishResponses {
pub shutdown_msg: Option<msgs::Shutdown>,
}

/// The return type of `force_shutdown`
pub(crate) type ShutdownResult = (
Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
TheBlueMatt marked this conversation as resolved.
Show resolved Hide resolved
);

/// If the majority of the channels funds are to the fundee and the initiator holds only just
/// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
/// initiator controls the feerate, if they then go to increase the channel fee, they may have no
Expand Down Expand Up @@ -5044,10 +5050,25 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
self.pending_monitor_updates.is_empty()
}

pub fn complete_all_mon_updates_through(&mut self, update_id: u64) {
self.pending_monitor_updates.retain(|upd| {
if upd.update.update_id <= update_id {
assert!(!upd.blocked, "Completed update must have flown");
false
} else { true }
});
}

pub fn complete_one_mon_update(&mut self, update_id: u64) {
self.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
}

/// Returns an iterator over all unblocked monitor updates which have not yet completed.
pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator<Item=&ChannelMonitorUpdate> {
self.pending_monitor_updates.iter()
.filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) })
}

/// Returns true if funding_created was sent/received.
pub fn is_funding_initiated(&self) -> bool {
self.channel_state >= ChannelState::FundingSent as u32
Expand Down Expand Up @@ -6228,7 +6249,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
/// immediately (others we will have to allow to time out).
pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>) {
pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
// Note that we MUST only generate a monitor update that indicates force-closure - we're
// called during initialization prior to the chain_monitor in the encompassing ChannelManager
// being fully configured in some cases. Thus, its likely any monitor events we generate will
Expand Down Expand Up @@ -6257,7 +6278,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
Some((funding_txo, ChannelMonitorUpdate {
Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
}))
Expand Down
469 changes: 340 additions & 129 deletions lightning/src/ln/channelmanager.rs

Large diffs are not rendered by default.

8 changes: 8 additions & 0 deletions lightning/src/ln/payment_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -609,6 +609,9 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());

nodes[0].node.test_process_background_events();
check_added_monitors(&nodes[0], 1);

reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));

// Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures
Expand All @@ -634,6 +637,9 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());

nodes[0].node.test_process_background_events();
check_added_monitors(&nodes[0], 1);

reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));

match nodes[0].node.send_payment_with_route(&new_route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) {
Expand Down Expand Up @@ -782,6 +788,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
check_added_monitors(&nodes[0], 1);
}

#[test]
Expand Down Expand Up @@ -2869,6 +2876,7 @@ fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_c, chain_monitor_c, nodes_0_deserialized_c);
let events = nodes[0].node.get_and_clear_pending_events();
assert!(events.is_empty());
check_added_monitors(&nodes[0], 1);
}

#[test]
Expand Down
2 changes: 2 additions & 0 deletions lightning/src/ln/priv_short_conf_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -853,10 +853,12 @@ fn test_0conf_channel_reorg() {
err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs.".to_owned()
});
check_closed_broadcast!(nodes[0], true);
check_added_monitors(&nodes[0], 1);
check_closed_event!(&nodes[1], 1, ClosureReason::ProcessingError {
err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs.".to_owned()
});
check_closed_broadcast!(nodes[1], true);
check_added_monitors(&nodes[1], 1);
}

#[test]
Expand Down
6 changes: 6 additions & 0 deletions lightning/src/ln/reload_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -774,6 +774,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
if persist_both_monitors {
if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
check_added_monitors(&nodes[3], 2);
} else {
check_added_monitors(&nodes[3], 1);
}

// On restart, we should also get a duplicate PaymentClaimed event as we persisted the
Expand Down Expand Up @@ -1047,6 +1050,9 @@ fn removed_payment_no_manager_persistence() {
_ => panic!("Unexpected event"),
}

nodes[1].node.test_process_background_events();
check_added_monitors(&nodes[1], 1);

// Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is
// now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
// learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.
Expand Down
11 changes: 6 additions & 5 deletions lightning/src/ln/reorg_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -302,8 +302,6 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
let relevant_txids = nodes[0].node.get_relevant_txids();
assert_eq!(relevant_txids.len(), 0);

handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
check_added_monitors!(nodes[1], 1);
{
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
Expand Down Expand Up @@ -349,8 +347,6 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
let relevant_txids = nodes[0].node.get_relevant_txids();
assert_eq!(relevant_txids.len(), 0);

handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
check_added_monitors!(nodes[1], 1);
{
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
Expand All @@ -364,7 +360,12 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
check_added_monitors!(nodes[0], 1);
let expected_err = "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.";
check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) });
if reorg_after_reload || !reload_node {
handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) });
}

check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: expected_err.to_owned() });
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
Expand Down
4 changes: 2 additions & 2 deletions lightning/src/sync/nostd_sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ impl<T> Mutex<T> {
impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
#[inline]
fn held_by_thread(&self) -> LockHeldState {
if self.lock().is_err() { return LockHeldState::HeldByThread; }
if self.inner.try_borrow_mut().is_err() { return LockHeldState::HeldByThread; }
else { return LockHeldState::NotHeldByThread; }
}
type ExclLock = MutexGuard<'a, T>;
Expand Down Expand Up @@ -115,7 +115,7 @@ impl<T> RwLock<T> {
impl<'a, T: 'a> LockTestExt<'a> for RwLock<T> {
#[inline]
fn held_by_thread(&self) -> LockHeldState {
if self.write().is_err() { return LockHeldState::HeldByThread; }
if self.inner.try_borrow_mut().is_err() { return LockHeldState::HeldByThread; }
else { return LockHeldState::NotHeldByThread; }
}
type ExclLock = RwLockWriteGuard<'a, T>;
Expand Down