diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 34337769a26..fef042beee6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -43,14 +43,16 @@ jobs: build-no-std: true - toolchain: 1.36.0 build-no-std: false + test-log-variants: true - toolchain: 1.41.0 build-no-std: false - toolchain: 1.45.2 + build-net-old-tokio: true build-net-tokio: true build-no-std: false coverage: true - toolchain: 1.47.0 - build-no-std: false + build-no-std: true runs-on: ${{ matrix.platform }} steps: - name: Checkout source code @@ -61,6 +63,9 @@ jobs: toolchain: ${{ matrix.toolchain }} override: true profile: minimal + - name: Pin tokio to 1.14 for Rust 1.45 + if: "matrix.build-net-old-tokio" + run: cargo update -p tokio --precise "1.14.0" --verbose - name: Build on Rust ${{ matrix.toolchain }} with net-tokio if: "matrix.build-net-tokio && !matrix.coverage" run: cargo build --verbose --color always @@ -73,6 +78,13 @@ jobs: cargo build --verbose --color always -p lightning cargo build --verbose --color always -p lightning-invoice cargo build --verbose --color always -p lightning-persister + - name: Build on Rust ${{ matrix.toolchain }} with all Log-Limiting features + if: matrix.test-log-variants + run: | + cd lightning + for FEATURE in $(cat Cargo.toml | grep '^max_level_' | awk '{ print $1 }'); do + cargo build --verbose --color always --features $FEATURE + done - name: Build Block Sync Clients on Rust ${{ matrix.toolchain }} with features if: "matrix.build-net-tokio && !matrix.coverage" run: | @@ -89,6 +101,10 @@ jobs: RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client,rest-client RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client,rest-client,tokio + - name: Test backtrace-debug builds on Rust ${{ matrix.toolchain }} + if: "matrix.build-no-std" + run: | + cd lightning && cargo test --verbose --color always --features backtrace - name: Test on Rust ${{ matrix.toolchain }} with net-tokio if: "matrix.build-net-tokio && !matrix.coverage" run: cargo test --verbose --color always @@ -97,11 +113,19 @@ jobs: run: RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always - name: Test on no-std bullds Rust ${{ matrix.toolchain }} if: "matrix.build-no-std && !matrix.coverage" + shell: bash # Default on Winblows is powershell run: | cd lightning cargo test --verbose --color always --no-default-features --features no-std # check if there is a conflict between no-std and the default std feature cargo test --verbose --color always --features no-std + # check if there is a conflict between no-std and the c_bindings cfg + RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always --no-default-features --features=no-std + cd .. + cd lightning-invoice + cargo test --verbose --color always --no-default-features --features no-std + # check if there is a conflict between no-std and the default std feature + cargo test --verbose --color always --features no-std cd .. - name: Test on no-std builds Rust ${{ matrix.toolchain }} and full code-linking for coverage generation if: "matrix.build-no-std && matrix.coverage" @@ -115,6 +139,14 @@ jobs: cargo test --verbose --color always -p lightning cargo test --verbose --color always -p lightning-invoice cargo build --verbose --color always -p lightning-persister + cargo build --verbose --color always -p lightning-background-processor + - name: Test C Bindings Modifications on Rust ${{ matrix.toolchain }} + if: "! matrix.build-net-tokio" + run: | + RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always -p lightning + RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always -p lightning-invoice + RUSTFLAGS="--cfg=c_bindings" cargo build --verbose --color always -p lightning-persister + RUSTFLAGS="--cfg=c_bindings" cargo build --verbose --color always -p lightning-background-processor - name: Test Block Sync Clients on Rust ${{ matrix.toolchain }} with features if: "matrix.build-net-tokio && !matrix.coverage" run: | @@ -156,7 +188,7 @@ jobs: done - name: Upload coverage if: matrix.coverage - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@v2 with: # Could you use this to fake the coverage report for your PR? Sure. # Will anyone be impressed by your amazing coverage? No @@ -220,7 +252,7 @@ jobs: profile: minimal - name: Fetch full tree and rebase on upstream run: | - git remote add upstream https://github.com/rust-bitcoin/rust-lightning + git remote add upstream https://github.com/lightningdevkit/rust-lightning git fetch upstream export GIT_COMMITTER_EMAIL="rl-ci@example.com" export GIT_COMMITTER_NAME="RL CI" @@ -255,7 +287,7 @@ jobs: linting: runs-on: ubuntu-latest env: - TOOLCHAIN: 1.45.2 + TOOLCHAIN: 1.47.0 steps: - name: Checkout source code uses: actions/checkout@v2 diff --git a/CHANGELOG.md b/CHANGELOG.md index a23f9d76a95..df1204e9bc7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,359 @@ +# 0.0.104 - 2021-12-17 + +## API Updates + * A `PaymentFailed` event is now provided to indicate a payment has failed + fully. This event is generated either after + `ChannelManager::abandon_payment` is called for a given payment, or the + payment times out, and there are no further pending HTLCs for the payment. + This event should be used to detect payment failure instead of + `PaymentPathFailed::all_paths_failed`, unless no payment retries occur via + `ChannelManager::retry_payment` (#1202). + * Payment secrets are now generated deterministically using material from + the new `KeysInterface::get_inbound_payment_key_material` (#1177). + * A `PaymentPathSuccessful` event has been added to ease passing success info + to a scorer, along with a `Score::payment_path_successful` method to accept + such info (#1178, #1197). + * `Score::channel_penalty_msat` has additional arguments describing the + channel's capacity and the HTLC amount being sent over the channel (#1166). + * A new log level `Gossip` has been added, which is used for verbose + information generated during network graph sync. Enabling the + `max_level_trace` feature or ignoring `Gossip` log entries reduces log + growth during initial start up from many GiB to several MiB (#1145). + * The `allow_wallclock_use` feature has been removed in favor of only using + the `std` and `no-std` features (#1212). + * `NetworkGraph` can now remove channels that we haven't heard updates for in + two weeks with `NetworkGraph::remove_stale_channels{,with_time}`. The first + is called automatically if a `NetGraphMsgHandler` is passed to + `BackgroundProcessor::start` (#1212). + * `InvoicePayer::pay_pubkey` was added to enable sending "keysend" payments to + supported recipients, using the `InvoicePayer` to handle retires (#1160). + * `user_payment_id` has been removed from `PaymentPurpose`, and + `ChannelManager::create_inbound_payment{,_for_hash}` (#1180). + * Updated documentation for several `ChannelManager` functions to remove stale + references to panics which no longer occur (#1201). + * The `Score` and `LockableScore` objects have moved into the + `routing::scoring` module instead of being in the `routing` module (#1166). + * The `Time` parameter to `ScorerWithTime` is no longer longer exposed, + instead being fixed based on the `std`/`no-std` feature (#1184). + * `ChannelDetails::balance_msat` was added to fetch a channel's balance + without subtracting the reserve values, lining up with on-chain claim amounts + less on-chain fees (#1203). + * An explicit `UserConfig::accept_inbound_channels` flag is now provided, + removing the need to set `min_funding_satoshis` to > 21 million BTC (#1173). + * Inbound channels that fail to see the funding transaction confirm within + 2016 blocks are automatically force-closed with + `ClosureReason::FundingTimedOut` (#1083). + * We now accept a channel_reserve value of 0 from counterparties, as it is + insecure for our counterparty but not us (#1163). + * `NetAddress::OnionV2` parsing was removed as version 2 onion services are no + longer supported in modern Tor (#1204). + * Generation and signing of anchor outputs is now supported in the + `KeysInterface`, though no support for them exists in the channel itself (#1176) + +## Bug Fixes + * Fixed a race condition in `InvoicePayer` where paths may be retried after + the retry count has been exceeded. In this case the + `Event::PaymentPathFailed::all_paths_failed` field is not a reliable payment + failure indicator. There was no acceptable alternative indicator, + `Event::PaymentFailed` as been added to provide one (#1202). + * Reduced the blocks-before-timeout we expect of outgoing HTLCs before + refusing to forward. This check was overly strict and resulted in refusing + to forward som HTLCs to a next hop that had a lower security threshold than + us (#1119). + * LDK no longer attempt to update the channel fee for outbound channels when + we cannot afford the new fee. This could have caused force-closure by our + channel counterparty (#1054). + * Fixed several bugs which may have prevented the reliable broadcast of our + own channel announcements and updates (#1169). + * Fixed a rare bug which may have resulted in spurious route finding failures + when using last-hop hints and MPP with large value payments (#1168). + * `KeysManager::spend_spendable_outputs` no longer adds a change output that + is below the dust threshold for non-standard change scripts (#1131). + * Fixed a minor memory leak when attempting to send a payment that fails due + to an error when updating the `ChannelMonitor` (#1143). + * Fixed a bug where a `FeeEstimator` that returns values rounded to the next + sat/vbyte may result in force-closures (#1208). + * Handle MPP timeout HTLC error codes, instead of considering the recipient to + have sent an invalid error, removing them from the network graph (#1148) + +## Serialization Compatibility + * All above new events/fields are ignored by prior clients. All above new + events/fields are not present when reading objects serialized by prior + versions of the library. + * Payment secrets are now generated deterministically. This reduces the memory + footprint for inbound payments, however, newly-generated inbound payments + using `ChannelManager::create_inbound_payment{,_for_hash}` will not be + receivable using versions prior to 0.0.104. + `ChannelManager::create_inbound_payment{,_for_hash}_legacy` are provided for + backwards compatibility (#1177). + * `PaymentPurpose::InvoicePayment::user_payment_id` will be 0 when reading + objects written with 0.0.104 when read by 0.0.103 and previous (#1180). + +In total, this release features 51 files changed, 5356 insertions, 2238 +deletions in 107 commits from 9 authors, in alphabetical order: + * Antoine Riard + * Conor Okus + * Devrandom + * Duncan Dean + * Elias Rohrer + * Jeffrey Czyz + * Ken Sedgwick + * Matt Corallo + * Valentine Wallace + + +# 0.0.103 - 2021-11-02 + +## API Updates + * This release is almost entirely focused on a new API in the + `lightning-invoice` crate - the `InvoicePayer`. `InvoicePayer` is a + struct which takes a reference to a `ChannelManager` and a `Router` + and retries payments as paths fail. It limits retries to a configurable + number, but is not serialized to disk and may retry additional times across + a serialization/load. In order to learn about failed payments, it must + receive `Event`s directly from the `ChannelManager`, wrapping a + user-provided `EventHandler` which it provides all unhandled events to + (#1059). + * `get_route` has been renamed `find_route` (#1059) and now takes a + `RouteParameters` struct in replacement of a number of its long list of + arguments (#1134). The `Payee` in the `RouteParameters` is stored in the + `Route` object returned and provided in the `RouteParameters` contained in + `Event::PaymentPathFailed` (#1059). + * `ChannelMonitor`s must now be persisted after calls that provide new block + data, prior to `MonitorEvent`s being passed back to `ChannelManager` for + processing. If you are using a `ChainMonitor` this is handled for you. + The `Persist` API has been updated to `Option`ally take the + `ChannelMonitorUpdate` as persistence events that result from chain data no + longer have a corresponding update (#1108). + * `routing::Score` now has a `payment_path_failed` method which it can use to + learn which channels often fail payments. It is automatically called by + `InvoicePayer` for failed payment paths (#1144). + * The default `Scorer` implementation is now a type alias to a type generic + across different clocks and supports serialization to persist scoring data + across restarts (#1146). + * `Event::PaymentSent` now includes the full fee which was spent across all + payment paths which were fulfilled or pending when the payment was fulfilled + (#1142). + * `Event::PaymentSent` and `Event::PaymentPathFailed` now include the + `PaymentId` which matches the `PaymentId` returned from + `ChannelManager::send_payment` or `InvoicePayer::pay_invoice` (#1059). + * `NetGraphMsgHandler` now takes a `Deref` to the `NetworkGraph`, allowing for + shared references to the graph data to make serialization and references to + the graph data in the `InvoicePayer`'s `Router` simpler (#1149). + * `routing::Score::channel_penalty_msat` has been updated to provide the + `NodeId` of both the source and destination nodes of a channel (#1133). + +## Bug Fixes + * Previous versions would often disconnect peers during initial graph sync due + to ping timeouts while processing large numbers of gossip messages. We now + delay disconnecting peers if we receive messages from them even if it takes + a while to receive a pong from them. Further, we avoid sending too many + gossip messages between pings to ensure we should always receive pongs in a + timely manner (#1137). + * If a payment was sent, creating an outbound HTLC and sending it to our + counterparty (implying the `ChannelMonitor` was persisted on disk), but the + `ChannelManager` was not persisted prior to shutdown/crash, no + `Event::PaymentPathFailed` event was generated if the HTLC was eventually + failed on chain. Events are now consistent irrespective of `ChannelManager` + persistence or non-persistence (#1104). + +## Serialization Compatibility + * All above new Events/fields are ignored by prior clients. All above new + Events/fields are not present when reading objects serialized by prior + versions of the library. + * Payments for which a `Route` was generated using a previous version or for + which the payment was originally sent by a previous version of the library + will not be retried by an `InvoicePayer`. + +This release was singularly focused and some contributions by third parties +were delayed. +In total, this release features 38 files changed, 4414 insertions, and 969 +deletions in 71 commits from 2 authors, in alphabetical order: + + * Jeffrey Czyz + * Matt Corallo + + +# 0.0.102 - 2021-10-18 + +## API Updates + * `get_route` now takes a `Score` as an argument. `Score` is queried during + the route-finding process, returning the absolute amounts which you are + willing to pay to avoid routing over a given channel. As a default, a + `Scorer` is provided which returns a constant amount, with a suggested + default of 500 msat. This translates to a willingness to pay up to 500 msat + in additional fees per hop in order to avoid additional hops (#1124). + * `Event::PaymentPathFailed` now contains a `short_channel_id` field which may + be filled in with a channel that can be "blamed" for the payment failure. + Payment retries should likely avoid the given channel for some time (#1077). + * `PublicKey`s in `NetworkGraph` have been replaced with a `NodeId` struct + which contains only a simple `[u8; 33]`, substantially improving + `NetworkGraph` deserialization performance (#1107). + * `ChainMonitor`'s `HashMap` of `ChannelMonitor`s is now private, exposed via + `Chainmonitor::get_monitor` and `ChainMonitor::list_monitors` instead + (#1112). + * When an outbound channel is closed prior to the broadcasting of its funding + transaction, but after you call + `ChannelManager::funding_transaction_generated`, a new event type, + `Event::DiscardFunding`, is generated, informing you the transaction was not + broadcasted and that you can spend the same inputs again elsewhere (#1098). + * `ChannelManager::create_channel` now returns the temporary channel ID which + may later appear in `Event::ChannelClosed` or `ChannelDetails` prior to the + channel being funded (#1121). + * `Event::PaymentSent` now contains the payment hash as well as the payment + preimage (#1062). + * `ReadOnlyNetworkGraph::get_addresses` now returns owned `NetAddress` rather + than references. As a side-effect this method is now exposed in foreign + language bindings (#1115). + * The `Persist` and `ChannelMonitorUpdateErr` types have moved to the + `lightning::chain::chainmonitor` and `lightning::chain` modules, + respectively (#1112). + * `ChannelManager::send_payment` now returns a `PaymentId` which identifies a + payment (whether MPP or not) and can be used to retry the full payment or + MPP parts through `retry_payment` (#1096). Note that doing so is currently + *not* crash safe, and you may find yourself sending twice. It is recommended + that you *not* use the `retry_payment` API until the next release. + +## Bug Fixes + * Due to an earlier fix for the Lightning dust inflation vulnerability tracked + in CVE-2021-41591/CVE-2021-41592/CVE-2021-41593 in 0.0.100, we required + counterparties to accept a dust limit slightly lower than the dust limit now + required by other implementations. This appeared as, at least, latest lnd + always refusing to accept channels opened by LDK clients (#1065). + * If there are multiple channels available to the same counterparty, + `get_route` would only consider the channel listed last as available for + sending (#1100). + * `Persist` implementations returning + `ChannelMonitorUpdateErr::TemporaryFailure` from `watch_channel` previously + resulted in the `ChannelMonitor` not being stored at all, resulting in a + panic after monitor updating is complete (#1112). + * If payments are pending awaiting forwarding at startup, an + `Event::PendingHTLCsForwardable` event will always be provided. This ensures + user code calls `ChannelManager::process_pending_htlc_fowards` even if it + shut down while awaiting the batching timer during the previous run (#1076). + * If a call to `ChannelManager::send_payment` failed due to lack of + availability of funds locally, LDK would store the payment as pending + forever, with no ability to retry or fail it, leaking memory (#1109). + +## Serialization Compatibility + * All above new Events/fields are ignored by prior clients. All above new + Events/fields, except for `Event::PaymentSent::payment_hash` are not present + when reading objects serialized by prior versions of the library. + +In total, this release features 32 files changed, 2248 insertions, and 1483 +deletions in 51 commits from 7 authors, in alphabetical order: + + * 1nF0rmed + * Duncan Dean + * Elias Rohrer + * Galder Zamarreño + * Jeffrey Czyz + * Matt Corallo + * Valentine Wallace + + +# 0.0.101 - 2021-09-23 + +## API Updates + * Custom message types are now supported directly in the `PeerManager`, + allowing you to send and receive messages of any type that is not natively + understood by LDK. This requires a new type bound on `PeerManager`, a + `CustomMessageHandler`. `IgnoringMessageHandler` provides a simple default + for this new bound for ignoring unknown messages (#1031, #1074). + * Route graph updates as a result of failed payments are no longer provided as + `MessageSendEvent::PaymentFailureNetworkUpdate` but instead included in a + new field in the `Event::PaymentFailed` events. Generally, this means route + graph updates are no longer handled as a part of the `PeerManager` but + instead through the new `EventHandler` implementation for + `NetGraphMsgHandler`. To make this easy, a new parameter to + `lightning-background-processor::BackgroundProcessor::start` is added, which + contains an `Option`al `NetGraphmsgHandler`. If provided as `Some`, relevant + events will be processed by the `NetGraphMsgHandler` prior to normal event + handling (#1043). + * `NetworkGraph` is now, itself, thread-safe. Accordingly, most functions now + take `&self` instead of `&mut self` and the graph data can be accessed + through `NetworkGraph.read_only` (#1043). + * The balances available on-chain to claim after a channel has been closed are + now exposed via `ChannelMonitor::get_claimable_balances` and + `ChainMonitor::get_claimable_balances`. The second can be used to get + information about all closed channels which still have on-chain balances + associated with them. See enum variants of `ln::channelmonitor::Balance` and + method documentation for the above methods for more information on the types + of balances exposed (#1034). + * When one HTLC of a multi-path payment fails, the new field `all_paths_failed` + in `Event::PaymentFailed` is set to `false`. This implies that the payment + has not failed, but only one part. Payment resolution is only indicated by an + `Event::PaymentSent` event or an `Event::PaymentFailed` with + `all_paths_failed` set to `true`, which is also set for the last remaining + part of a multi-path payment (#1053). + * To better capture the context described above, `Event::PaymentFailed` has + been renamed to `Event::PaymentPathFailed` (#1084). + * A new event, `ChannelClosed`, is provided by `ChannelManager` when a channel + is closed, including a reason and error message (if relevant, #997). + * `lightning-invoice` now considers invoices with sub-millisatoshi precision + to be invalid, and requires millisatoshi values during construction (thus + you must call `amount_milli_satoshis` instead of `amount_pico_btc`, #1057). + * The `BaseSign` interface now includes two new hooks which provide additional + information about commitment transaction signatures and revocation secrets + provided by our counterparty, allowing additional verification (#1039). + * The `BaseSign` interface now includes additional information for cooperative + close transactions, making it easier for a signer to verify requests (#1064). + * `Route` has two additional helper methods to get fees and amounts (#1063). + * `Txid` and `Transaction` objects can now be deserialized from responses when + using the HTTP client in the `lightning-block-sync` crate (#1037, #1061). + +## Bug Fixes + * Fix a panic when reading a lightning invoice with a non-recoverable + signature. Further, restrict lightning invoice parsing to require payment + secrets and better handle a few edge cases as required by BOLT 11 (#1057). + * Fix a panic when receiving multiple messages (such as HTLC fulfill messages) + after a call to `chain::Watch::update_channel` returned + `Err(ChannelMonitorUpdateErr::TemporaryFailure)` with no + `ChannelManager::channel_monitor_updated` call in between (#1066). + * For multi-path payments, `Event::PaymentSent` is no longer generated + multiple times, once for each independent part (#1053). + * Multi-hop route hints in invoices are now considered in the default router + provided via `get_route` (#1040). + * The time peers have to respond to pings has been increased when building + with debug assertions enabled. This avoids peer disconnections on slow hosts + when running in debug mode (#1051). + * The timeout for the first byte of a response for requests from the + `lightning-block-sync` crate has been increased to 300 seconds to better + handle the long hangs in Bitcoin Core when it syncs to disk (#1090). + +## Serialization Compatibility + * Due to a bug in 0.0.100, `Event`s written by 0.0.101 which are of a type not + understood by 0.0.100 may lead to `Err(DecodeError::InvalidValue)` or corrupt + deserialized objects in 0.100. Such `Event`s will lead to an + `Err(DecodeError::InvalidValue)` in versions prior to 0.0.100. The only such + new event written by 0.0.101 is `Event::ChannelClosed` (#1087). + * Payments that were initiated in versions prior to 0.0.101 may still + generate duplicate `PaymentSent` `Event`s or may have spurious values for + `Event::PaymentPathFailed::all_paths_failed` (#1053). + * The return values of `ChannelMonitor::get_claimable_balances` (and, thus, + `ChainMonitor::get_claimable_balances`) may be spurious for channels where + the spend of the funding transaction appeared on chain while running a + version prior to 0.0.101. `Balance` information should only be relied upon + for channels that were closed while running 0.0.101+ (#1034). + * Payments failed while running versions prior to 0.0.101 will never have a + `Some` for the `network_update` field (#1043). + +In total, this release features 67 files changed, 4980 insertions, 1888 +deletions in 89 commits from 12 authors, in alphabetical order: + * Antoine Riard + * Devrandom + * Galder Zamarreño + * Giles Cope + * Jeffrey Czyz + * Joseph Goulden + * Matt Corallo + * Sergi Delgado Segura + * Tibo-lg + * Valentine Wallace + * abhik-99 + * vss96 + + # 0.0.100 - 2021-08-17 ## API Updates diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3cf463ba02e..2c1fb0d8a73 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -21,8 +21,8 @@ Communication about Rust-Lightning happens primarily on #ldk-dev on the Discussion about code base improvements happens in GitHub issues and on pull requests. -Major projects are tracked [here](https://github.com/rust-bitcoin/rust-lightning/projects). -Major milestones are tracked [here](https://github.com/rust-bitcoin/rust-lightning/milestones?direction=asc&sort=title&state=open). +Major projects are tracked [here](https://github.com/lightningdevkit/rust-lightning/projects). +Major milestones are tracked [here](https://github.com/lightningdevkit/rust-lightning/milestones?direction=asc&sort=title&state=open). Getting Started --------------- @@ -33,7 +33,7 @@ This doesn't mean don't be ambitious with the breadth and depth of your contribu understand the project culture before investing an asymmetric number of hours on development compared to your merged work. -Browsing through the [meeting minutes](https://github.com/rust-bitcoin/rust-lightning/wiki/Meetings) +Browsing through the [meeting minutes](https://github.com/lightningdevkit/rust-lightning/wiki/Meetings) is a good first step. You will learn who is working on what, how releases are drafted, what are the pending tasks to deliver, where you can contribute review bandwidth, etc. diff --git a/README.md b/README.md index d73e71da8aa..f44f59a3fb4 100644 --- a/README.md +++ b/README.md @@ -87,7 +87,7 @@ In general, Rust-Lightning does not provide (but LDK has implementations of): hardware wallets. LDK's customizability was presented about at Advancing Bitcoin in February 2020: -https://vimeo.com/showcase/7131712/video/418412286 +https://vimeo.com/showcase/8372504/video/412818125 Design Goal ----------- diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index f85ac9b9def..f41b17a1430 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -30,11 +30,11 @@ use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hash_types::{BlockHash, WPubkeyHash}; use lightning::chain; -use lightning::chain::{BestBlock, chainmonitor, channelmonitor, Confirm, Watch}; -use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, MonitorEvent}; +use lightning::chain::{BestBlock, ChannelMonitorUpdateErr, chainmonitor, channelmonitor, Confirm, Watch}; +use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent}; use lightning::chain::transaction::OutPoint; use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; -use lightning::chain::keysinterface::{KeysInterface, InMemorySigner}; +use lightning::chain::keysinterface::{KeyMaterial, KeysInterface, InMemorySigner}; use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::ln::channelmanager::{ChainParameters, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs}; use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; @@ -50,8 +50,7 @@ use lightning::util::events::MessageSendEventsProvider; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use lightning::routing::router::{Route, RouteHop}; - -use utils::test_logger; +use utils::test_logger::{self, Output}; use utils::test_persister::TestPersister; use bitcoin::secp256k1::key::{PublicKey,SecretKey}; @@ -99,8 +98,8 @@ impl Writer for VecWriter { struct TestChainMonitor { pub logger: Arc, pub keys: Arc, + pub persister: Arc, pub chain_monitor: Arc, Arc, Arc, Arc, Arc>>, - pub update_ret: Mutex>, // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization // logic will automatically force-close our channels for us (as we don't have an up-to-date // monitor implying we are not able to punish misbehaving counterparties). Because this test @@ -112,28 +111,27 @@ struct TestChainMonitor { impl TestChainMonitor { pub fn new(broadcaster: Arc, logger: Arc, feeest: Arc, persister: Arc, keys: Arc) -> Self { Self { - chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, persister)), + chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, Arc::clone(&persister))), logger, keys, - update_ret: Mutex::new(Ok(())), + persister, latest_monitors: Mutex::new(HashMap::new()), should_update_manager: atomic::AtomicBool::new(false), } } } impl chain::Watch for TestChainMonitor { - fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { + fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), chain::ChannelMonitorUpdateErr> { let mut ser = VecWriter(Vec::new()); monitor.write(&mut ser).unwrap(); if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) { panic!("Already had monitor pre-watch_channel"); } self.should_update_manager.store(true, atomic::Ordering::Relaxed); - assert!(self.chain_monitor.watch_channel(funding_txo, monitor).is_ok()); - self.update_ret.lock().unwrap().clone() + self.chain_monitor.watch_channel(funding_txo, monitor) } - fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { + fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> { let mut map_lock = self.latest_monitors.lock().unwrap(); let mut map_entry = match map_lock.entry(funding_txo) { hash_map::Entry::Occupied(entry) => entry, @@ -146,8 +144,7 @@ impl chain::Watch for TestChainMonitor { deserialized_monitor.write(&mut ser).unwrap(); map_entry.insert((update.update_id, ser.0)); self.should_update_manager.store(true, atomic::Ordering::Relaxed); - assert!(self.chain_monitor.update_channel(funding_txo, update).is_ok()); - self.update_ret.lock().unwrap().clone() + self.chain_monitor.update_channel(funding_txo, update) } fn release_pending_monitor_events(&self) -> Vec { @@ -167,6 +164,10 @@ impl KeysInterface for KeyProvider { SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]).unwrap() } + fn get_inbound_payment_key_material(&self) -> KeyMaterial { + KeyMaterial([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]) + } + fn get_destination_script(&self) -> Script { let secp_ctx = Secp256k1::signing_only(); let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_id]).unwrap(); @@ -273,8 +274,8 @@ fn check_payment_err(send_err: PaymentSendFailure) { PaymentSendFailure::AllFailedRetrySafe(per_path_results) => { for api_err in per_path_results { check_api_err(api_err); } }, - PaymentSendFailure::PartialFailure(per_path_results) => { - for res in per_path_results { if let Err(api_err) = res { check_api_err(api_err); } } + PaymentSendFailure::PartialFailure { results, .. } => { + for res in results { if let Err(api_err) = res { check_api_err(api_err); } } }, } } @@ -286,7 +287,7 @@ fn get_payment_secret_hash(dest: &ChanMan, payment_id: &mut u8) -> Option<(Payme let mut payment_hash; for _ in 0..256 { payment_hash = PaymentHash(Sha256::hash(&[*payment_id; 1]).into_inner()); - if let Ok(payment_secret) = dest.create_inbound_payment_for_hash(payment_hash, None, 3600, 0) { + if let Ok(payment_secret) = dest.create_inbound_payment_for_hash(payment_hash, None, 3600) { return Some((payment_secret, payment_hash)); } *payment_id = payment_id.wrapping_add(1); @@ -307,6 +308,7 @@ fn send_payment(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, p fee_msat: amt, cltv_expiry_delta: 200, }]], + payee: None, }, payment_hash, &Some(payment_secret)) { check_payment_err(err); false @@ -332,6 +334,7 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des fee_msat: amt, cltv_expiry_delta: 200, }]], + payee: None, }, payment_hash, &Some(payment_secret)) { check_payment_err(err); false @@ -339,14 +342,16 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des } #[inline] -pub fn do_test(data: &[u8], out: Out) { +pub fn do_test(data: &[u8], underlying_out: Out) { + let out = SearchingOutput::new(underlying_out); let broadcast = Arc::new(TestBroadcaster{}); macro_rules! make_node { ($node_id: expr, $fee_estimator: expr) => { { let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(HashMap::new()) }); - let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister{}), Arc::clone(&keys_manager))); + let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), + Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(&keys_manager))); let mut config = UserConfig::default(); config.channel_options.forwarding_fee_proportional_millionths = 0; @@ -365,7 +370,8 @@ pub fn do_test(data: &[u8], out: Out) { ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => { { let keys_manager = Arc::clone(& $keys_manager); let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister{}), Arc::clone(& $keys_manager))); + let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), + Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(& $keys_manager))); let mut config = UserConfig::default(); config.channel_options.forwarding_fee_proportional_millionths = 0; @@ -732,7 +738,11 @@ pub fn do_test(data: &[u8], out: Out) { // force-close which we should detect as an error). assert_eq!(msg.contents.flags & 2, 0); }, - _ => panic!("Unhandled message event {:?}", event), + _ => if out.may_fail.load(atomic::Ordering::Acquire) { + return; + } else { + panic!("Unhandled message event {:?}", event) + }, } if $limit_events != ProcessMessages::AllMessages { break; @@ -764,7 +774,11 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => { assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set! }, - _ => panic!("Unhandled message event"), + _ => if out.may_fail.load(atomic::Ordering::Acquire) { + return; + } else { + panic!("Unhandled message event") + }, } } push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0)); @@ -781,7 +795,11 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => { assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set! }, - _ => panic!("Unhandled message event"), + _ => if out.may_fail.load(atomic::Ordering::Acquire) { + return; + } else { + panic!("Unhandled message event") + }, } } push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2)); @@ -827,12 +845,17 @@ pub fn do_test(data: &[u8], out: Out) { } }, events::Event::PaymentSent { .. } => {}, - events::Event::PaymentFailed { .. } => {}, + events::Event::PaymentPathSuccessful { .. } => {}, + events::Event::PaymentPathFailed { .. } => {}, events::Event::PaymentForwarded { .. } if $node == 1 => {}, events::Event::PendingHTLCsForwardable { .. } => { nodes[$node].process_pending_htlc_forwards(); }, - _ => panic!("Unhandled event"), + _ => if out.may_fail.load(atomic::Ordering::Acquire) { + return; + } else { + panic!("Unhandled event") + }, } } had_events @@ -846,31 +869,35 @@ pub fn do_test(data: &[u8], out: Out) { // bit-twiddling mutations to have similar effects. This is probably overkill, but no // harm in doing so. - 0x00 => *monitor_a.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), - 0x01 => *monitor_b.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), - 0x02 => *monitor_c.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), - 0x04 => *monitor_a.update_ret.lock().unwrap() = Ok(()), - 0x05 => *monitor_b.update_ret.lock().unwrap() = Ok(()), - 0x06 => *monitor_c.update_ret.lock().unwrap() = Ok(()), + 0x00 => *monitor_a.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), + 0x01 => *monitor_b.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), + 0x02 => *monitor_c.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), + 0x04 => *monitor_a.persister.update_ret.lock().unwrap() = Ok(()), + 0x05 => *monitor_b.persister.update_ret.lock().unwrap() = Ok(()), + 0x06 => *monitor_c.persister.update_ret.lock().unwrap() = Ok(()), 0x08 => { if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { - nodes[0].channel_monitor_updated(&chan_1_funding, *id); + monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); + nodes[0].process_monitor_events(); } }, 0x09 => { if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) { - nodes[1].channel_monitor_updated(&chan_1_funding, *id); + monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); + nodes[1].process_monitor_events(); } }, 0x0a => { if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) { - nodes[1].channel_monitor_updated(&chan_2_funding, *id); + monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); + nodes[1].process_monitor_events(); } }, 0x0b => { if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) { - nodes[2].channel_monitor_updated(&chan_2_funding, *id); + monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); + nodes[2].process_monitor_events(); } }, @@ -1071,22 +1098,26 @@ pub fn do_test(data: &[u8], out: Out) { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. // First make sure there are no pending monitor updates, resetting the error state - // and calling channel_monitor_updated for each monitor. - *monitor_a.update_ret.lock().unwrap() = Ok(()); - *monitor_b.update_ret.lock().unwrap() = Ok(()); - *monitor_c.update_ret.lock().unwrap() = Ok(()); + // and calling force_channel_monitor_updated for each monitor. + *monitor_a.persister.update_ret.lock().unwrap() = Ok(()); + *monitor_b.persister.update_ret.lock().unwrap() = Ok(()); + *monitor_c.persister.update_ret.lock().unwrap() = Ok(()); if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { - nodes[0].channel_monitor_updated(&chan_1_funding, *id); + monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); + nodes[0].process_monitor_events(); } if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) { - nodes[1].channel_monitor_updated(&chan_1_funding, *id); + monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); + nodes[1].process_monitor_events(); } if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) { - nodes[1].channel_monitor_updated(&chan_2_funding, *id); + monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); + nodes[1].process_monitor_events(); } if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) { - nodes[2].channel_monitor_updated(&chan_2_funding, *id); + monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); + nodes[2].process_monitor_events(); } // Next, make sure peers are all connected to each other @@ -1115,7 +1146,7 @@ pub fn do_test(data: &[u8], out: Out) { break; } - // Finally, make sure that at least one end of each channel can make a substantial payment. + // Finally, make sure that at least one end of each channel can make a substantial payment assert!( send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id) || send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id)); @@ -1142,7 +1173,29 @@ pub fn do_test(data: &[u8], out: Out) { } } -pub fn chanmon_consistency_test(data: &[u8], out: Out) { +/// We actually have different behavior based on if a certain log string has been seen, so we have +/// to do a bit more tracking. +#[derive(Clone)] +struct SearchingOutput { + output: O, + may_fail: Arc, +} +impl Output for SearchingOutput { + fn locked_write(&self, data: &[u8]) { + // We hit a design limitation of LN state machine (see CONCURRENT_INBOUND_HTLC_FEE_BUFFER) + if std::str::from_utf8(data).unwrap().contains("Outbound update_fee HTLC buffer overflow - counterparty should force-close this channel") { + self.may_fail.store(true, atomic::Ordering::Release); + } + self.output.locked_write(data) + } +} +impl SearchingOutput { + pub fn new(output: O) -> Self { + Self { output, may_fail: Arc::new(atomic::AtomicBool::new(false)) } + } +} + +pub fn chanmon_consistency_test(data: &[u8], out: Out) { do_test(data, out); } diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index d82ff88d9c7..34c6f554c18 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -31,14 +31,15 @@ use lightning::chain::{BestBlock, Confirm, Listen}; use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use lightning::chain::chainmonitor; use lightning::chain::transaction::OutPoint; -use lightning::chain::keysinterface::{InMemorySigner, KeysInterface}; +use lightning::chain::keysinterface::{InMemorySigner, KeyMaterial, KeysInterface}; use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::ln::channelmanager::{ChainParameters, ChannelManager}; use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor,IgnoringMessageHandler}; use lightning::ln::msgs::DecodeError; use lightning::ln::script::ShutdownScript; -use lightning::routing::router::get_route; use lightning::routing::network_graph::{NetGraphMsgHandler, NetworkGraph}; +use lightning::routing::router::{find_route, Payee, RouteParameters}; +use lightning::routing::scoring::Scorer; use lightning::util::config::UserConfig; use lightning::util::errors::APIError; use lightning::util::events::Event; @@ -55,6 +56,7 @@ use bitcoin::secp256k1::Secp256k1; use std::cell::RefCell; use std::collections::{HashMap, hash_map}; +use std::convert::TryInto; use std::cmp; use std::sync::{Arc, Mutex}; use std::sync::atomic::{AtomicU64,AtomicUsize,Ordering}; @@ -160,7 +162,7 @@ type ChannelMan = ChannelManager< EnforcingSigner, Arc, Arc, Arc, Arc, Arc>>, Arc, Arc, Arc, Arc>; -type PeerMan<'a> = PeerManager, Arc, Arc, Arc>>, Arc, IgnoringMessageHandler>; +type PeerMan<'a> = PeerManager, Arc, Arc, Arc, Arc>>, Arc, IgnoringMessageHandler>; struct MoneyLossDetector<'a> { manager: Arc, @@ -256,6 +258,7 @@ impl<'a> Drop for MoneyLossDetector<'a> { struct KeyProvider { node_secret: SecretKey, + inbound_payment_key: KeyMaterial, counter: AtomicU64, } impl KeysInterface for KeyProvider { @@ -265,6 +268,10 @@ impl KeysInterface for KeyProvider { self.node_secret.clone() } + fn get_inbound_payment_key_material(&self) -> KeyMaterial { + self.inbound_payment_key.clone() + } + fn get_destination_script(&self) -> Script { let secp_ctx = Secp256k1::signing_only(); let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(); @@ -364,10 +371,13 @@ pub fn do_test(data: &[u8], logger: &Arc) { Err(_) => return, }; + let inbound_payment_key = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42]; + let broadcast = Arc::new(TestBroadcaster{ txn_broadcasted: Mutex::new(Vec::new()) }); - let monitor = Arc::new(chainmonitor::ChainMonitor::new(None, broadcast.clone(), Arc::clone(&logger), fee_est.clone(), Arc::new(TestPersister{}))); + let monitor = Arc::new(chainmonitor::ChainMonitor::new(None, broadcast.clone(), Arc::clone(&logger), fee_est.clone(), + Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }))); - let keys_manager = Arc::new(KeyProvider { node_secret: our_network_key.clone(), counter: AtomicU64::new(0) }); + let keys_manager = Arc::new(KeyProvider { node_secret: our_network_key.clone(), inbound_payment_key: KeyMaterial(inbound_payment_key.try_into().unwrap()), counter: AtomicU64::new(0) }); let mut config = UserConfig::default(); config.channel_options.forwarding_fee_proportional_millionths = slice_to_be32(get_slice!(4)); config.channel_options.announced_channel = get_slice!(1)[0] != 0; @@ -378,8 +388,9 @@ pub fn do_test(data: &[u8], logger: &Arc) { }; let channelmanager = Arc::new(ChannelManager::new(fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params)); let our_id = PublicKey::from_secret_key(&Secp256k1::signing_only(), &keys_manager.get_node_secret()); - let network_graph = NetworkGraph::new(genesis_block(network).block_hash()); - let net_graph_msg_handler = Arc::new(NetGraphMsgHandler::new(network_graph, None, Arc::clone(&logger))); + let network_graph = Arc::new(NetworkGraph::new(genesis_block(network).block_hash())); + let net_graph_msg_handler = Arc::new(NetGraphMsgHandler::new(Arc::clone(&network_graph), None, Arc::clone(&logger))); + let scorer = Scorer::with_fixed_penalty(0); let peers = RefCell::new([false; 256]); let mut loss_detector = MoneyLossDetector::new(&peers, channelmanager.clone(), monitor.clone(), PeerManager::new(MessageHandler { @@ -434,8 +445,14 @@ pub fn do_test(data: &[u8], logger: &Arc) { } }, 4 => { - let value = slice_to_be24(get_slice!(3)) as u64; - let route = match get_route(&our_id, &net_graph_msg_handler.network_graph, &get_pubkey!(), None, None, &Vec::new(), value, 42, Arc::clone(&logger)) { + let final_value_msat = slice_to_be24(get_slice!(3)) as u64; + let payee = Payee::from_node_id(get_pubkey!()); + let params = RouteParameters { + payee, + final_value_msat, + final_cltv_expiry_delta: 42, + }; + let route = match find_route(&our_id, ¶ms, &network_graph, None, Arc::clone(&logger), &scorer) { Ok(route) => route, Err(_) => return, }; @@ -451,8 +468,14 @@ pub fn do_test(data: &[u8], logger: &Arc) { } }, 15 => { - let value = slice_to_be24(get_slice!(3)) as u64; - let mut route = match get_route(&our_id, &net_graph_msg_handler.network_graph, &get_pubkey!(), None, None, &Vec::new(), value, 42, Arc::clone(&logger)) { + let final_value_msat = slice_to_be24(get_slice!(3)) as u64; + let payee = Payee::from_node_id(get_pubkey!()); + let params = RouteParameters { + payee, + final_value_msat, + final_cltv_expiry_delta: 42, + }; + let mut route = match find_route(&our_id, ¶ms, &network_graph, None, Arc::clone(&logger), &scorer) { Ok(route) => route, Err(_) => return, }; @@ -514,7 +537,7 @@ pub fn do_test(data: &[u8], logger: &Arc) { let payment_hash = PaymentHash(Sha256::from_engine(sha).into_inner()); // Note that this may fail - our hashes may collide and we'll end up trying to // double-register the same payment_hash. - let _ = channelmanager.create_inbound_payment_for_hash(payment_hash, None, 1, 0); + let _ = channelmanager.create_inbound_payment_for_hash(payment_hash, None, 1); }, 9 => { for payment in payments_received.drain(..) { @@ -680,11 +703,11 @@ mod tests { // 030012 - inbound read from peer id 0 of len 18 // 0141 03000000000000000000000000000000 - message header indicating message length 321 // 0300fe - inbound read from peer id 0 of len 254 - // 0020 7500000000000000000000000000000000000000000000000000000000000000 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 000000000000c350 0000000000000000 000000000000014a ffffffffffffffff 0000000000000222 0000000000000000 000000fd 0006 01e3 030000000000000000000000000000000000000000000000000000000000000001 030000000000000000000000000000000000000000000000000000000000000002 030000000000000000000000000000000000000000000000000000000000000003 030000000000000000000000000000000000000000000000000000000000000004 - beginning of open_channel message + // 0020 7500000000000000000000000000000000000000000000000000000000000000 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 000000000000c350 0000000000000000 0000000000000162 ffffffffffffffff 0000000000000222 0000000000000000 000000fd 0006 01e3 030000000000000000000000000000000000000000000000000000000000000001 030000000000000000000000000000000000000000000000000000000000000002 030000000000000000000000000000000000000000000000000000000000000003 030000000000000000000000000000000000000000000000000000000000000004 - beginning of open_channel message // 030053 - inbound read from peer id 0 of len 83 // 030000000000000000000000000000000000000000000000000000000000000005 020900000000000000000000000000000000000000000000000000000000000000 01 03000000000000000000000000000000 - rest of open_channel and mac // - // 00fd00fd00fd - Three feerate requests (all returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator) + // 00fd00fd - Two feerate requests (all returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator) // - client should now respond with accept_channel (CHECK 1: type 33 to peer 03000000) // // 030012 - inbound read from peer id 0 of len 18 @@ -731,7 +754,7 @@ mod tests { // 030112 - inbound read from peer id 1 of len 18 // 0110 01000000000000000000000000000000 - message header indicating message length 272 // 0301ff - inbound read from peer id 1 of len 255 - // 0021 0000000000000000000000000000000000000000000000000000000000000e05 000000000000014a 00000000004c4b40 00000000000003e8 00000000000003e8 00000002 03f0 0005 030000000000000000000000000000000000000000000000000000000000000100 030000000000000000000000000000000000000000000000000000000000000200 030000000000000000000000000000000000000000000000000000000000000300 030000000000000000000000000000000000000000000000000000000000000400 030000000000000000000000000000000000000000000000000000000000000500 02660000000000000000000000000000 - beginning of accept_channel + // 0021 0000000000000000000000000000000000000000000000000000000000000e05 0000000000000162 00000000004c4b40 00000000000003e8 00000000000003e8 00000002 03f0 0005 030000000000000000000000000000000000000000000000000000000000000100 030000000000000000000000000000000000000000000000000000000000000200 030000000000000000000000000000000000000000000000000000000000000300 030000000000000000000000000000000000000000000000000000000000000400 030000000000000000000000000000000000000000000000000000000000000500 02660000000000000000000000000000 - beginning of accept_channel // 030121 - inbound read from peer id 1 of len 33 // 0000000000000000000000000000000000 01000000000000000000000000000000 - rest of accept_channel and mac // @@ -927,7 +950,7 @@ mod tests { // - client now fails the HTLC backwards as it was unable to extract the payment preimage (CHECK 9 duplicate and CHECK 10) let logger = Arc::new(TrackingLogger { lines: Mutex::new(HashMap::new()) }); - super::do_test(&::hex::decode("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012000a0300000000000000000000000000000003001a00100002200000022000030000000000000000000000000000000300120141030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c3500000000000000000000000000000014affffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030053030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010300000000000000000000000000000000fd00fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112000a0100000000000000000000000000000003011a0010000220000002200001000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120110010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000014a00000000004c4b4000000000000003e800000000000003e80000000203f00005030000000000000000000000000000000000000000000000000000000000000100030000000000000000000000000000000000000000000000000000000000000200030000000000000000000000000000000000000000000000000000000000000300030000000000000000000000000000000000000000000000000000000000000400030000000000000000000000000000000000000000000000000000000000000500026600000000000000000000000000000301210000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4e000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4e000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000927c0000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4b000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc)); + super::do_test(&::hex::decode("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012000a0300000000000000000000000000000003001a00100002200000022000030000000000000000000000000000000300120141030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000162ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030053030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010300000000000000000000000000000000fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112000a0100000000000000000000000000000003011a0010000220000002200001000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120110010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000016200000000004c4b4000000000000003e800000000000003e80000000203f00005030000000000000000000000000000000000000000000000000000000000000100030000000000000000000000000000000000000000000000000000000000000200030000000000000000000000000000000000000000000000000000000000000300030000000000000000000000000000000000000000000000000000000000000400030000000000000000000000000000000000000000000000000000000000000500026600000000000000000000000000000301210000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4e000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4e000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000927c0000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4b000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc)); let log_entries = logger.lines.lock().unwrap(); assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendAcceptChannel event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679".to_string())), Some(&1)); // 1 diff --git a/fuzz/src/msg_targets/gen_target.sh b/fuzz/src/msg_targets/gen_target.sh index 2e133484138..0e930cfa1ac 100755 --- a/fuzz/src/msg_targets/gen_target.sh +++ b/fuzz/src/msg_targets/gen_target.sh @@ -43,4 +43,5 @@ GEN_TEST QueryShortChannelIds test_msg "" GEN_TEST ReplyChannelRange test_msg "" GEN_TEST ErrorMessage test_msg_hole ", 32, 2" +GEN_TEST WarningMessage test_msg_hole ", 32, 2" GEN_TEST ChannelUpdate test_msg_hole ", 108, 1" diff --git a/fuzz/src/msg_targets/mod.rs b/fuzz/src/msg_targets/mod.rs index af70c58e30e..8acb690b04b 100644 --- a/fuzz/src/msg_targets/mod.rs +++ b/fuzz/src/msg_targets/mod.rs @@ -28,4 +28,5 @@ pub mod msg_node_announcement; pub mod msg_query_short_channel_ids; pub mod msg_reply_channel_range; pub mod msg_error_message; +pub mod msg_warning_message; pub mod msg_channel_update; diff --git a/fuzz/src/msg_targets/msg_warning_message.rs b/fuzz/src/msg_targets/msg_warning_message.rs new file mode 100644 index 00000000000..f033b6fd6e9 --- /dev/null +++ b/fuzz/src/msg_targets/msg_warning_message.rs @@ -0,0 +1,27 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +// This file is auto-generated by gen_target.sh based on msg_target_template.txt +// To modify it, modify msg_target_template.txt and run gen_target.sh instead. + +use lightning::ln::msgs; + +use msg_targets::utils::VecWriter; +use utils::test_logger; + +#[inline] +pub fn msg_warning_message_test(data: &[u8], _out: Out) { + test_msg_hole!(msgs::WarningMessage, data, 32, 2); +} + +#[no_mangle] +pub extern "C" fn msg_warning_message_run(data: *const u8, datalen: usize) { + let data = unsafe { std::slice::from_raw_parts(data, datalen) }; + test_msg_hole!(msgs::WarningMessage, data, 32, 2); +} diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index 6c792916f82..d6aa97e42e7 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -16,7 +16,8 @@ use lightning::chain::transaction::OutPoint; use lightning::ln::channelmanager::{ChannelDetails, ChannelCounterparty}; use lightning::ln::features::InitFeatures; use lightning::ln::msgs; -use lightning::routing::router::{get_route, RouteHint, RouteHintHop}; +use lightning::routing::router::{find_route, Payee, RouteHint, RouteHintHop, RouteParameters}; +use lightning::routing::scoring::Scorer; use lightning::util::logger::Logger; use lightning::util::ser::Readable; use lightning::routing::network_graph::{NetworkGraph, RoutingFees}; @@ -216,12 +217,13 @@ pub fn do_test(data: &[u8], out: Out) { funding_txo: Some(OutPoint { txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(), index: 0 }), short_channel_id: Some(scid), channel_value_satoshis: slice_to_be64(get_slice!(8)), - user_id: 0, inbound_capacity_msat: 0, + user_channel_id: 0, inbound_capacity_msat: 0, unspendable_punishment_reserve: None, confirmations_required: None, force_close_spend_delay: None, is_outbound: true, is_funding_locked: true, is_usable: true, is_public: true, + balance_msat: 0, outbound_capacity_msat: 0, }); } @@ -247,11 +249,16 @@ pub fn do_test(data: &[u8], out: Out) { }])); } } + let scorer = Scorer::with_fixed_penalty(0); for target in node_pks.iter() { - let _ = get_route(&our_pubkey, &net_graph, target, None, + let params = RouteParameters { + payee: Payee::from_node_id(*target).with_route_hints(last_hops.clone()), + final_value_msat: slice_to_be64(get_slice!(8)), + final_cltv_expiry_delta: slice_to_be32(get_slice!(4)), + }; + let _ = find_route(&our_pubkey, ¶ms, &net_graph, first_hops.map(|c| c.iter().collect::>()).as_ref().map(|a| a.as_slice()), - &last_hops.iter().collect::>(), - slice_to_be64(get_slice!(8)), slice_to_be32(get_slice!(4)), Arc::clone(&logger)); + Arc::clone(&logger), &scorer); } }, } diff --git a/fuzz/src/utils/test_persister.rs b/fuzz/src/utils/test_persister.rs index 9cb5b60b6a8..7ca1ff96d05 100644 --- a/fuzz/src/utils/test_persister.rs +++ b/fuzz/src/utils/test_persister.rs @@ -1,14 +1,20 @@ -use lightning::chain::channelmonitor; +use lightning::chain; +use lightning::chain::{chainmonitor, channelmonitor}; +use lightning::chain::chainmonitor::MonitorUpdateId; use lightning::chain::transaction::OutPoint; use lightning::util::enforcing_trait_impls::EnforcingSigner; -pub struct TestPersister {} -impl channelmonitor::Persist for TestPersister { - fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { - Ok(()) +use std::sync::Mutex; + +pub struct TestPersister { + pub update_ret: Mutex>, +} +impl chainmonitor::Persist for TestPersister { + fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> { + self.update_ret.lock().unwrap().clone() } - fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &channelmonitor::ChannelMonitorUpdate, _data: &channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { - Ok(()) + fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &Option, _data: &channelmonitor::ChannelMonitor, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> { + self.update_ret.lock().unwrap().clone() } } diff --git a/lightning-background-processor/Cargo.toml b/lightning-background-processor/Cargo.toml index 98d9ade945e..1837cb17e66 100644 --- a/lightning-background-processor/Cargo.toml +++ b/lightning-background-processor/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "lightning-background-processor" -version = "0.0.100" +version = "0.0.104" authors = ["Valentine Wallace "] license = "MIT OR Apache-2.0" -repository = "http://github.com/rust-bitcoin/rust-lightning" +repository = "http://github.com/lightningdevkit/rust-lightning" description = """ Utilities to perform required background tasks for Rust Lightning. """ @@ -11,9 +11,9 @@ edition = "2018" [dependencies] bitcoin = "0.27" -lightning = { version = "0.0.100", path = "../lightning", features = ["allow_wallclock_use"] } -lightning-persister = { version = "0.0.100", path = "../lightning-persister" } +lightning = { version = "0.0.104", path = "../lightning", features = ["std"] } +lightning-persister = { version = "0.0.104", path = "../lightning-persister" } [dev-dependencies] -lightning = { version = "0.0.100", path = "../lightning", features = ["_test_utils"] } - +lightning = { version = "0.0.104", path = "../lightning", features = ["_test_utils"] } +lightning-invoice = { version = "0.12.0", path = "../lightning-invoice" } diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 4e6fb6f02df..2dbc8053b4e 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -10,14 +10,12 @@ use lightning::chain; use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; -use lightning::chain::chainmonitor::ChainMonitor; -use lightning::chain::channelmonitor; +use lightning::chain::chainmonitor::{ChainMonitor, Persist}; use lightning::chain::keysinterface::{Sign, KeysInterface}; use lightning::ln::channelmanager::ChannelManager; use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; -use lightning::ln::peer_handler::{PeerManager, SocketDescriptor}; -use lightning::ln::peer_handler::CustomMessageHandler; -use lightning::routing::network_graph::NetGraphMsgHandler; +use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor}; +use lightning::routing::network_graph::{NetworkGraph, NetGraphMsgHandler}; use lightning::util::events::{Event, EventHandler, EventsProvider}; use lightning::util::logger::Logger; use std::sync::Arc; @@ -36,6 +34,8 @@ use std::ops::Deref; /// [`ChannelManager`] persistence should be done in the background. /// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`] /// at the appropriate intervals. +/// * Calling [`NetworkGraph::remove_stale_channels`] (if a [`NetGraphMsgHandler`] is provided to +/// [`BackgroundProcessor::start`]). /// /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied /// upon as doing so may result in high latency. @@ -60,13 +60,18 @@ const FRESHNESS_TIMER: u64 = 60; #[cfg(test)] const FRESHNESS_TIMER: u64 = 1; -#[cfg(not(debug_assertions))] +#[cfg(all(not(test), not(debug_assertions)))] const PING_TIMER: u64 = 5; /// Signature operations take a lot longer without compiler optimisations. /// Increasing the ping timer allows for this but slower devices will be disconnected if the /// timeout is reached. -#[cfg(debug_assertions)] +#[cfg(all(not(test), debug_assertions))] const PING_TIMER: u64 = 30; +#[cfg(test)] +const PING_TIMER: u64 = 1; + +/// Prune the network graph of stale entries hourly. +const NETWORK_PRUNE_TIMER: u64 = 60 * 60; /// Trait which handles persisting a [`ChannelManager`] to disk. /// @@ -103,7 +108,8 @@ ChannelManagerPersister for Fun where /// Decorates an [`EventHandler`] with common functionality provided by standard [`EventHandler`]s. struct DecoratingEventHandler< E: EventHandler, - N: Deref>, + N: Deref>, + G: Deref, A: Deref, L: Deref, > @@ -114,10 +120,11 @@ where A::Target: chain::Access, L::Target: Logger { impl< E: EventHandler, - N: Deref>, + N: Deref>, + G: Deref, A: Deref, L: Deref, -> EventHandler for DecoratingEventHandler +> EventHandler for DecoratingEventHandler where A::Target: chain::Access, L::Target: Logger { fn handle_event(&self, event: &Event) { if let Some(event_handler) = &self.net_graph_msg_handler { @@ -153,7 +160,7 @@ impl BackgroundProcessor { /// functionality implemented by other handlers. /// * [`NetGraphMsgHandler`] if given will update the [`NetworkGraph`] based on payment failures. /// - /// [top-level documentation]: Self + /// [top-level documentation]: BackgroundProcessor /// [`join`]: Self::join /// [`stop`]: Self::stop /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager @@ -168,16 +175,17 @@ impl BackgroundProcessor { T: 'static + Deref + Send + Sync, K: 'static + Deref + Send + Sync, F: 'static + Deref + Send + Sync, + G: 'static + Deref + Send + Sync, L: 'static + Deref + Send + Sync, P: 'static + Deref + Send + Sync, Descriptor: 'static + SocketDescriptor + Send + Sync, CMH: 'static + Deref + Send + Sync, RMH: 'static + Deref + Send + Sync, - EH: 'static + EventHandler + Send + Sync, + EH: 'static + EventHandler + Send, CMP: 'static + Send + ChannelManagerPersister, M: 'static + Deref> + Send + Sync, CM: 'static + Deref> + Send + Sync, - NG: 'static + Deref> + Send + Sync, + NG: 'static + Deref> + Send + Sync, UMH: 'static + Deref + Send + Sync, PM: 'static + Deref> + Send + Sync, >( @@ -192,7 +200,7 @@ impl BackgroundProcessor { K::Target: 'static + KeysInterface, F::Target: 'static + FeeEstimator, L::Target: 'static + Logger, - P::Target: 'static + channelmonitor::Persist, + P::Target: 'static + Persist, CMH::Target: 'static + ChannelMessageHandler, RMH::Target: 'static + RoutingMessageHandler, UMH::Target: 'static + CustomMessageHandler, @@ -200,13 +208,16 @@ impl BackgroundProcessor { let stop_thread = Arc::new(AtomicBool::new(false)); let stop_thread_clone = stop_thread.clone(); let handle = thread::spawn(move || -> Result<(), std::io::Error> { - let event_handler = DecoratingEventHandler { event_handler, net_graph_msg_handler }; + let event_handler = DecoratingEventHandler { event_handler, net_graph_msg_handler: net_graph_msg_handler.as_ref().map(|t| t.deref()) }; log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup"); channel_manager.timer_tick_occurred(); let mut last_freshness_call = Instant::now(); let mut last_ping_call = Instant::now(); + let mut last_prune_call = Instant::now(); + let mut have_pruned = false; + loop { peer_manager.process_events(); channel_manager.process_pending_events(&event_handler); @@ -214,7 +225,9 @@ impl BackgroundProcessor { let updates_available = channel_manager.await_persistable_update_timeout(Duration::from_millis(100)); if updates_available { + log_trace!(logger, "Persisting ChannelManager..."); persister.persist_manager(&*channel_manager)?; + log_trace!(logger, "Done persisting ChannelManager."); } // Exit the loop if the background processor was requested to stop. if stop_thread.load(Ordering::Acquire) == true { @@ -235,14 +248,26 @@ impl BackgroundProcessor { // timer, we should have disconnected all sockets by now (and they're probably // dead anyway), so disconnect them by calling `timer_tick_occurred()` twice. log_trace!(logger, "Awoke after more than double our ping timer, disconnecting peers."); - peer_manager.timer_tick_occurred(); - peer_manager.timer_tick_occurred(); + peer_manager.disconnect_all_peers(); last_ping_call = Instant::now(); } else if last_ping_call.elapsed().as_secs() > PING_TIMER { log_trace!(logger, "Calling PeerManager's timer_tick_occurred"); peer_manager.timer_tick_occurred(); last_ping_call = Instant::now(); } + + // Note that we want to run a graph prune once not long after startup before + // falling back to our usual hourly prunes. This avoids short-lived clients never + // pruning their network graph. We run once 60 seconds after startup before + // continuing our normal cadence. + if last_prune_call.elapsed().as_secs() > if have_pruned { NETWORK_PRUNE_TIMER } else { 60 } { + if let Some(ref handler) = net_graph_msg_handler { + log_trace!(logger, "Pruning network graph of stale entries"); + handler.network_graph().remove_stale_channels(); + last_prune_call = Instant::now(); + have_pruned = true; + } + } } }); Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) } @@ -315,6 +340,8 @@ mod tests { use lightning::util::events::{Event, MessageSendEventsProvider, MessageSendEvent}; use lightning::util::ser::Writeable; use lightning::util::test_utils; + use lightning_invoice::payment::{InvoicePayer, RetryAttempts}; + use lightning_invoice::utils::DefaultRouter; use lightning_persister::FilesystemPersister; use std::fs; use std::path::PathBuf; @@ -338,11 +365,12 @@ mod tests { struct Node { node: Arc>, - net_graph_msg_handler: Option, Arc>>>, + net_graph_msg_handler: Option, Arc, Arc>>>, peer_manager: Arc, Arc, Arc, IgnoringMessageHandler>>, chain_monitor: Arc, persister: Arc, tx_broadcaster: Arc, + network_graph: Arc, logger: Arc, best_block: BestBlock, } @@ -380,11 +408,11 @@ mod tests { let best_block = BestBlock::from_genesis(network); let params = ChainParameters { network, best_block }; let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), keys_manager.clone(), UserConfig::default(), params)); - let network_graph = NetworkGraph::new(genesis_block.header.block_hash()); - let net_graph_msg_handler = Some(Arc::new(NetGraphMsgHandler::new(network_graph, Some(chain_source.clone()), logger.clone()))); + let network_graph = Arc::new(NetworkGraph::new(genesis_block.header.block_hash())); + let net_graph_msg_handler = Some(Arc::new(NetGraphMsgHandler::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()))); let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )}; let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(), &seed, logger.clone(), IgnoringMessageHandler{})); - let node = Node { node: manager, net_graph_msg_handler, peer_manager, chain_monitor, persister, tx_broadcaster, logger, best_block }; + let node = Node { node: manager, net_graph_msg_handler, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block }; nodes.push(node); } @@ -486,9 +514,10 @@ mod tests { macro_rules! check_persisted_data { ($node: expr, $filepath: expr, $expected_bytes: expr) => { - match $node.write(&mut $expected_bytes) { - Ok(()) => { - loop { + loop { + $expected_bytes.clear(); + match $node.write(&mut $expected_bytes) { + Ok(()) => { match std::fs::read($filepath) { Ok(bytes) => { if bytes == $expected_bytes { @@ -499,9 +528,9 @@ mod tests { }, Err(_) => continue } - } - }, - Err(e) => panic!("Unexpected error: {}", e) + }, + Err(e) => panic!("Unexpected error: {}", e) + } } } } @@ -614,9 +643,25 @@ mod tests { .expect("SpendableOutputs not handled within deadline"); match event { Event::SpendableOutputs { .. } => {}, + Event::ChannelClosed { .. } => {}, _ => panic!("Unexpected event: {:?}", event), } assert!(bg_processor.stop().is_ok()); } + + #[test] + fn test_invoice_payer() { + let nodes = create_nodes(2, "test_invoice_payer".to_string()); + + // Initiate the background processors to watch each node. + let data_dir = nodes[0].persister.get_data_dir(); + let persister = move |node: &ChannelManager, Arc, Arc, Arc, Arc>| FilesystemPersister::persist_manager(data_dir.clone(), node); + let router = DefaultRouter::new(Arc::clone(&nodes[0].network_graph), Arc::clone(&nodes[0].logger)); + let scorer = Arc::new(Mutex::new(test_utils::TestScorer::default())); + let invoice_payer = Arc::new(InvoicePayer::new(Arc::clone(&nodes[0].node), router, scorer, Arc::clone(&nodes[0].logger), |_: &_| {}, RetryAttempts(2))); + let event_handler = Arc::clone(&invoice_payer); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone()); + assert!(bg_processor.stop().is_ok()); + } } diff --git a/lightning-block-sync/Cargo.toml b/lightning-block-sync/Cargo.toml index 0902cc32069..d12428409f7 100644 --- a/lightning-block-sync/Cargo.toml +++ b/lightning-block-sync/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "lightning-block-sync" -version = "0.0.100" +version = "0.0.104" authors = ["Jeffrey Czyz", "Matt Corallo"] license = "MIT OR Apache-2.0" -repository = "http://github.com/rust-bitcoin/rust-lightning" +repository = "http://github.com/lightningdevkit/rust-lightning" description = """ Utilities to fetch the chain data from a block source and feed them into Rust Lightning. """ @@ -15,7 +15,7 @@ rpc-client = [ "serde", "serde_json", "chunked_transfer" ] [dependencies] bitcoin = "0.27" -lightning = { version = "0.0.100", path = "../lightning" } +lightning = { version = "0.0.104", path = "../lightning" } tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true } serde = { version = "1.0", features = ["derive"], optional = true } serde_json = { version = "1.0", optional = true } diff --git a/lightning-block-sync/src/convert.rs b/lightning-block-sync/src/convert.rs index e8e1427bdb6..358076f4c25 100644 --- a/lightning-block-sync/src/convert.rs +++ b/lightning-block-sync/src/convert.rs @@ -1,11 +1,12 @@ -use crate::{BlockHeaderData, BlockSourceError}; use crate::http::{BinaryResponse, JsonResponse}; use crate::utils::hex_to_uint256; +use crate::{BlockHeaderData, BlockSourceError}; use bitcoin::blockdata::block::{Block, BlockHeader}; use bitcoin::consensus::encode; use bitcoin::hash_types::{BlockHash, TxMerkleNode, Txid}; -use bitcoin::hashes::hex::{ToHex, FromHex}; +use bitcoin::hashes::hex::{FromHex, ToHex}; +use bitcoin::Transaction; use serde::Deserialize; @@ -104,7 +105,6 @@ impl TryFrom for BlockHeaderData { } } - /// Converts a JSON value into a block. Assumes the block is hex-encoded in a JSON string. impl TryInto for JsonResponse { type Error = std::io::Error; @@ -181,13 +181,77 @@ impl TryInto for JsonResponse { } } +/// Converts a JSON value into a transaction. WATCH OUT! this cannot be used for zero-input transactions +/// (e.g. createrawtransaction). See https://github.com/rust-bitcoin/rust-bitcoincore-rpc/issues/197 +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + let hex_tx = if self.0.is_object() { + // result is json encoded + match &self.0["hex"] { + // result has hex field + serde_json::Value::String(hex_data) => match self.0["complete"] { + // result may or may not be signed (e.g. signrawtransactionwithwallet) + serde_json::Value::Bool(x) => { + if x == false { + let reason = match &self.0["errors"][0]["error"] { + serde_json::Value::String(x) => x.as_str(), + _ => "Unknown error", + }; + + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("transaction couldn't be signed. {}", reason), + )); + } else { + hex_data + } + } + // result is a complete transaction (e.g. getrawtranaction verbose) + _ => hex_data, + }, + _ => return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "expected JSON string", + )), + } + } else { + // result is plain text (e.g. getrawtransaction no verbose) + match self.0.as_str() { + Some(hex_tx) => hex_tx, + None => { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "expected JSON string", + )) + } + } + }; + + match Vec::::from_hex(hex_tx) { + Err(_) => Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "invalid hex data", + )), + Ok(tx_data) => match encode::deserialize(&tx_data) { + Err(_) => Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "invalid transaction", + )), + Ok(tx) => Ok(tx), + }, + } + } +} + #[cfg(test)] pub(crate) mod tests { use super::*; use bitcoin::blockdata::constants::genesis_block; - use bitcoin::consensus::encode; use bitcoin::hashes::Hash; use bitcoin::network::constants::Network; + use serde_json::value::Number; + use serde_json::Value; /// Converts from `BlockHeaderData` into a `GetHeaderResponse` JSON value. impl From for serde_json::Value { @@ -541,4 +605,101 @@ pub(crate) mod tests { Ok(txid) => assert_eq!(txid, target_txid), } } + + // TryInto can be used in two ways, first with plain hex response where data is + // the hex encoded transaction (e.g. as a result of getrawtransaction) or as a JSON object + // where the hex encoded transaction can be found in the hex field of the object (if present) + // (e.g. as a result of signrawtransactionwithwallet). + + // plain hex transaction + + #[test] + fn into_tx_from_json_response_with_invalid_hex_data() { + let response = JsonResponse(serde_json::json!("foobar")); + match TryInto::::try_into(response) { + Err(e) => { + assert_eq!(e.kind(), std::io::ErrorKind::InvalidData); + assert_eq!(e.get_ref().unwrap().to_string(), "invalid hex data"); + } + Ok(_) => panic!("Expected error"), + } + } + + #[test] + fn into_tx_from_json_response_with_invalid_data_type() { + let response = JsonResponse(Value::Number(Number::from_f64(1.0).unwrap())); + match TryInto::::try_into(response) { + Err(e) => { + assert_eq!(e.kind(), std::io::ErrorKind::InvalidData); + assert_eq!(e.get_ref().unwrap().to_string(), "expected JSON string"); + } + Ok(_) => panic!("Expected error"), + } + } + + #[test] + fn into_tx_from_json_response_with_invalid_tx_data() { + let response = JsonResponse(serde_json::json!("abcd")); + match TryInto::::try_into(response) { + Err(e) => { + assert_eq!(e.kind(), std::io::ErrorKind::InvalidData); + assert_eq!(e.get_ref().unwrap().to_string(), "invalid transaction"); + } + Ok(_) => panic!("Expected error"), + } + } + + #[test] + fn into_tx_from_json_response_with_valid_tx_data_plain() { + let genesis_block = genesis_block(Network::Bitcoin); + let target_tx = genesis_block.txdata.get(0).unwrap(); + let response = JsonResponse(serde_json::json!(encode::serialize_hex(&target_tx))); + match TryInto::::try_into(response) { + Err(e) => panic!("Unexpected error: {:?}", e), + Ok(tx) => assert_eq!(&tx, target_tx), + } + } + + #[test] + fn into_tx_from_json_response_with_valid_tx_data_hex_field() { + let genesis_block = genesis_block(Network::Bitcoin); + let target_tx = genesis_block.txdata.get(0).unwrap(); + let response = JsonResponse(serde_json::json!({"hex": encode::serialize_hex(&target_tx)})); + match TryInto::::try_into(response) { + Err(e) => panic!("Unexpected error: {:?}", e), + Ok(tx) => assert_eq!(&tx, target_tx), + } + } + + // transaction in hex field of JSON object + + #[test] + fn into_tx_from_json_response_with_no_hex_field() { + let response = JsonResponse(serde_json::json!({ "error": "foo" })); + match TryInto::::try_into(response) { + Err(e) => { + assert_eq!(e.kind(), std::io::ErrorKind::InvalidData); + assert_eq!( + e.get_ref().unwrap().to_string(), + "expected JSON string" + ); + } + Ok(_) => panic!("Expected error"), + } + } + + #[test] + fn into_tx_from_json_response_not_signed() { + let response = JsonResponse(serde_json::json!({ "hex": "foo", "complete": false })); + match TryInto::::try_into(response) { + Err(e) => { + assert_eq!(e.kind(), std::io::ErrorKind::InvalidData); + assert!( + e.get_ref().unwrap().to_string().contains( + "transaction couldn't be signed") + ); + } + Ok(_) => panic!("Expected error"), + } + } } diff --git a/lightning-block-sync/src/http.rs b/lightning-block-sync/src/http.rs index 0721babfde3..a3935edf5cb 100644 --- a/lightning-block-sync/src/http.rs +++ b/lightning-block-sync/src/http.rs @@ -27,9 +27,10 @@ const TCP_STREAM_TIMEOUT: Duration = Duration::from_secs(5); /// Timeout for reading the first byte of a response. This is separate from the general read /// timeout as it is not uncommon for Bitcoin Core to be blocked waiting on UTXO cache flushes for -/// upwards of a minute or more. Note that we always retry once when we time out, so the maximum -/// time we allow Bitcoin Core to block for is twice this value. -const TCP_STREAM_RESPONSE_TIMEOUT: Duration = Duration::from_secs(120); +/// upwards of 10 minutes on slow devices (e.g. RPis with SSDs over USB). Note that we always retry +/// once when we time out, so the maximum time we allow Bitcoin Core to block for is twice this +/// value. +const TCP_STREAM_RESPONSE_TIMEOUT: Duration = Duration::from_secs(300); /// Maximum HTTP message header size in bytes. const MAX_HTTP_MESSAGE_HEADER_SIZE: usize = 8192; diff --git a/lightning-block-sync/src/init.rs b/lightning-block-sync/src/init.rs index d51cd3ba606..0bdc2a0255b 100644 --- a/lightning-block-sync/src/init.rs +++ b/lightning-block-sync/src/init.rs @@ -40,8 +40,8 @@ BlockSourceResult { /// /// use lightning::chain; /// use lightning::chain::Watch; +/// use lightning::chain::chainmonitor; /// use lightning::chain::chainmonitor::ChainMonitor; -/// use lightning::chain::channelmonitor; /// use lightning::chain::channelmonitor::ChannelMonitor; /// use lightning::chain::chaininterface::BroadcasterInterface; /// use lightning::chain::chaininterface::FeeEstimator; @@ -65,7 +65,7 @@ BlockSourceResult { /// F: FeeEstimator, /// L: Logger, /// C: chain::Filter, -/// P: channelmonitor::Persist, +/// P: chainmonitor::Persist, /// >( /// block_source: &mut B, /// chain_monitor: &ChainMonitor, diff --git a/lightning-invoice/Cargo.toml b/lightning-invoice/Cargo.toml index baa9a79c5d5..c4b905e863b 100644 --- a/lightning-invoice/Cargo.toml +++ b/lightning-invoice/Cargo.toml @@ -1,20 +1,27 @@ [package] name = "lightning-invoice" description = "Data structures to parse and serialize BOLT11 lightning invoices" -version = "0.8.0" +version = "0.12.0" authors = ["Sebastian Geisler "] documentation = "https://docs.rs/lightning-invoice/" license = "MIT OR Apache-2.0" keywords = [ "lightning", "bitcoin", "invoice", "BOLT11" ] readme = "README.md" +[features] +default = ["std"] +no-std = ["hashbrown", "lightning/no-std", "core2/alloc"] +std = ["bitcoin_hashes/std", "num-traits/std", "lightning/std", "bech32/std"] + [dependencies] -bech32 = "0.8" -lightning = { version = "0.0.100", path = "../lightning" } -secp256k1 = { version = "0.20", features = ["recovery"] } -num-traits = "0.2.8" -bitcoin_hashes = "0.10" +bech32 = { version = "0.8", default-features = false } +lightning = { version = "0.0.104", path = "../lightning", default-features = false } +secp256k1 = { version = "0.20", default-features = false, features = ["recovery", "alloc"] } +num-traits = { version = "0.2.8", default-features = false } +bitcoin_hashes = { version = "0.10", default-features = false } +hashbrown = { version = "0.11", optional = true } +core2 = { version = "0.3.0", default-features = false, optional = true } [dev-dependencies] +lightning = { version = "0.0.104", path = "../lightning", default-features = false, features = ["_test_utils"] } hex = "0.3" -lightning = { version = "0.0.100", path = "../lightning", features = ["_test_utils"] } diff --git a/lightning-invoice/src/de.rs b/lightning-invoice/src/de.rs index 777ac660f8e..53f6b83da5f 100644 --- a/lightning-invoice/src/de.rs +++ b/lightning-invoice/src/de.rs @@ -1,15 +1,17 @@ +#[cfg(feature = "std")] use std::error; -use std::fmt; -use std::fmt::{Display, Formatter}; -use std::num::ParseIntError; -use std::str; -use std::str::FromStr; +use core::fmt; +use core::fmt::{Display, Formatter}; +use core::num::ParseIntError; +use core::str; +use core::str::FromStr; use bech32; use bech32::{u5, FromBase32}; use bitcoin_hashes::Hash; use bitcoin_hashes::sha256; +use crate::prelude::*; use lightning::ln::PaymentSecret; use lightning::routing::network_graph::RoutingFees; use lightning::routing::router::{RouteHint, RouteHintHop}; @@ -28,7 +30,7 @@ use self::hrp_sm::parse_hrp; /// State machine to parse the hrp mod hrp_sm { - use std::ops::Range; + use core::ops::Range; #[derive(PartialEq, Eq, Debug)] enum States { @@ -723,8 +725,10 @@ impl Display for ParseOrSemanticError { } } +#[cfg(feature = "std")] impl error::Error for ParseError {} +#[cfg(feature = "std")] impl error::Error for ParseOrSemanticError {} macro_rules! from_error { diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index 61e394da6db..2164f1c530f 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -6,6 +6,7 @@ #![deny(broken_intra_doc_links)] #![cfg_attr(feature = "strict", deny(warnings))] +#![cfg_attr(all(not(feature = "std"), not(test)), no_std)] //! This crate provides data structures to represent //! [lightning BOLT11](https://github.com/lightningnetwork/lightning-rfc/blob/master/11-payment-encoding.md) @@ -15,13 +16,24 @@ //! * For parsing use `str::parse::(&self)` (see the docs of `impl FromStr for Invoice`) //! * For constructing invoices use the `InvoiceBuilder` //! * For serializing invoices use the `Display`/`ToString` traits + +#[cfg(not(any(feature = "std", feature = "no-std")))] +compile_error!("at least one of the `std` or `no-std` features must be enabled"); + +pub mod payment; pub mod utils; extern crate bech32; extern crate bitcoin_hashes; -extern crate lightning; +#[macro_use] extern crate lightning; extern crate num_traits; extern crate secp256k1; +extern crate alloc; +#[cfg(any(test, feature = "std"))] +extern crate core; + +#[cfg(feature = "std")] +use std::time::SystemTime; use bech32::u5; use bitcoin_hashes::Hash; @@ -36,22 +48,47 @@ use secp256k1::key::PublicKey; use secp256k1::{Message, Secp256k1}; use secp256k1::recovery::RecoverableSignature; -use std::fmt::{Display, Formatter, self}; -use std::iter::FilterMap; -use std::ops::Deref; -use std::slice::Iter; -use std::time::{SystemTime, Duration, UNIX_EPOCH}; +use core::fmt::{Display, Formatter, self}; +use core::iter::FilterMap; +use core::ops::Deref; +use core::slice::Iter; +use core::time::Duration; mod de; mod ser; mod tb; +mod prelude { + #[cfg(feature = "hashbrown")] + extern crate hashbrown; + + pub use alloc::{vec, vec::Vec, string::String, collections::VecDeque, boxed::Box}; + #[cfg(not(feature = "hashbrown"))] + pub use std::collections::{HashMap, HashSet, hash_map}; + #[cfg(feature = "hashbrown")] + pub use self::hashbrown::{HashMap, HashSet, hash_map}; + + pub use alloc::string::ToString; +} + +use prelude::*; + +/// Sync compat for std/no_std +#[cfg(feature = "std")] +mod sync { + pub use ::std::sync::{Mutex, MutexGuard}; +} + +/// Sync compat for std/no_std +#[cfg(not(feature = "std"))] +mod sync; + pub use de::{ParseError, ParseOrSemanticError}; // TODO: fix before 2037 (see rust PR #55527) /// Defines the maximum UNIX timestamp that can be represented as `SystemTime`. This is checked by /// one of the unit tests, please run them. -const SYSTEM_TIME_MAX_UNIX_TIMESTAMP: u64 = std::i32::MAX as u64; +const SYSTEM_TIME_MAX_UNIX_TIMESTAMP: u64 = core::i32::MAX as u64; /// Allow the expiry time to be up to one year. Since this reduces the range of possible timestamps /// it should be rather low as long as we still have to support 32bit time representations @@ -76,10 +113,11 @@ pub const DEFAULT_MIN_FINAL_CLTV_EXPIRY: u64 = 18; /// can remove this functions and run the test `test_system_time_bounds_assumptions`. In any case, /// please open an issue. If all tests pass you should be able to use this library safely by just /// removing this function till we patch it accordingly. +#[cfg(feature = "std")] fn __system_time_size_check() { // Use 2 * sizeof(u64) as expected size since the expected underlying implementation is storing // a `Duration` since `SystemTime::UNIX_EPOCH`. - unsafe { std::mem::transmute_copy::(&UNIX_EPOCH); } + unsafe { core::mem::transmute_copy::(&SystemTime::UNIX_EPOCH); } } @@ -93,32 +131,41 @@ fn __system_time_size_check() { /// If this function fails this is considered a bug. Please open an issue describing your /// platform and stating your current system time. /// +/// Note that this currently does nothing in `no_std` environments, because they don't have +/// a `SystemTime` implementation. +/// /// # Panics /// If the check fails this function panics. By calling this function on startup you ensure that /// this wont happen at an arbitrary later point in time. pub fn check_platform() { - // The upper and lower bounds of `SystemTime` are not part of its public contract and are - // platform specific. That's why we have to test if our assumptions regarding these bounds - // hold on the target platform. - // - // If this test fails on your platform, please don't use the library and open an issue - // instead so we can resolve the situation. Currently this library is tested on: - // * Linux (64bit) - let fail_date = UNIX_EPOCH + Duration::from_secs(SYSTEM_TIME_MAX_UNIX_TIMESTAMP); - let year = Duration::from_secs(60 * 60 * 24 * 365); - - // Make sure that the library will keep working for another year - assert!(fail_date.duration_since(SystemTime::now()).unwrap() > year); - - let max_ts = PositiveTimestamp::from_unix_timestamp( - SYSTEM_TIME_MAX_UNIX_TIMESTAMP - MAX_EXPIRY_TIME - ).unwrap(); - let max_exp = ::ExpiryTime::from_seconds(MAX_EXPIRY_TIME).unwrap(); - - assert_eq!( - (*max_ts.as_time() + *max_exp.as_duration()).duration_since(UNIX_EPOCH).unwrap().as_secs(), - SYSTEM_TIME_MAX_UNIX_TIMESTAMP - ); + #[cfg(feature = "std")] + check_system_time_bounds(); +} + +#[cfg(feature = "std")] +fn check_system_time_bounds() { + // The upper and lower bounds of `SystemTime` are not part of its public contract and are + // platform specific. That's why we have to test if our assumptions regarding these bounds + // hold on the target platform. + // + // If this test fails on your platform, please don't use the library and open an issue + // instead so we can resolve the situation. Currently this library is tested on: + // * Linux (64bit) + let fail_date = SystemTime::UNIX_EPOCH + Duration::from_secs(SYSTEM_TIME_MAX_UNIX_TIMESTAMP); + let year = Duration::from_secs(60 * 60 * 24 * 365); + + // Make sure that the library will keep working for another year + assert!(fail_date.duration_since(SystemTime::now()).unwrap() > year); + + let max_ts = PositiveTimestamp::from_unix_timestamp( + SYSTEM_TIME_MAX_UNIX_TIMESTAMP - MAX_EXPIRY_TIME + ).unwrap(); + let max_exp = ::ExpiryTime::from_seconds(MAX_EXPIRY_TIME).unwrap(); + + assert_eq!( + (max_ts.as_time() + *max_exp.as_duration()).duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(), + SYSTEM_TIME_MAX_UNIX_TIMESTAMP + ); } @@ -141,6 +188,9 @@ pub fn check_platform() { /// /// use lightning_invoice::{Currency, InvoiceBuilder}; /// +/// # #[cfg(not(feature = "std"))] +/// # fn main() {} +/// # #[cfg(feature = "std")] /// # fn main() { /// let private_key = SecretKey::from_slice( /// &[ @@ -185,11 +235,11 @@ pub struct InvoiceBuilder, error: Option, - phantom_d: std::marker::PhantomData, - phantom_h: std::marker::PhantomData, - phantom_t: std::marker::PhantomData, - phantom_c: std::marker::PhantomData, - phantom_s: std::marker::PhantomData, + phantom_d: core::marker::PhantomData, + phantom_h: core::marker::PhantomData, + phantom_t: core::marker::PhantomData, + phantom_c: core::marker::PhantomData, + phantom_s: core::marker::PhantomData, } /// Represents a syntactically and semantically correct lightning BOLT11 invoice. @@ -284,9 +334,9 @@ pub struct RawDataPart { /// /// # Invariants /// The UNIX timestamp representing the stored time has to be positive and small enough so that -/// a `EpiryTime` can be added to it without an overflow. +/// a `ExpiryTime` can be added to it without an overflow. #[derive(Eq, PartialEq, Debug, Clone)] -pub struct PositiveTimestamp(SystemTime); +pub struct PositiveTimestamp(Duration); /// SI prefixes for the human readable part #[derive(Eq, PartialEq, Debug, Clone, Copy)] @@ -378,7 +428,8 @@ pub enum TaggedField { /// SHA-256 hash #[derive(Clone, Debug, Hash, Eq, PartialEq)] -pub struct Sha256(pub sha256::Hash); +pub struct Sha256(/// (C-not exported) as the native hash types are not currently mapped + pub sha256::Hash); /// Description string /// @@ -457,11 +508,11 @@ impl InvoiceBuilder { tagged_fields: Vec::new(), error: None, - phantom_d: std::marker::PhantomData, - phantom_h: std::marker::PhantomData, - phantom_t: std::marker::PhantomData, - phantom_c: std::marker::PhantomData, - phantom_s: std::marker::PhantomData, + phantom_d: core::marker::PhantomData, + phantom_h: core::marker::PhantomData, + phantom_t: core::marker::PhantomData, + phantom_c: core::marker::PhantomData, + phantom_s: core::marker::PhantomData, } } } @@ -477,11 +528,11 @@ impl InvoiceBui tagged_fields: self.tagged_fields, error: self.error, - phantom_d: std::marker::PhantomData, - phantom_h: std::marker::PhantomData, - phantom_t: std::marker::PhantomData, - phantom_c: std::marker::PhantomData, - phantom_s: std::marker::PhantomData, + phantom_d: core::marker::PhantomData, + phantom_h: core::marker::PhantomData, + phantom_t: core::marker::PhantomData, + phantom_c: core::marker::PhantomData, + phantom_s: core::marker::PhantomData, } } @@ -587,7 +638,8 @@ impl InvoiceBuilder InvoiceBuilder { - /// Sets the timestamp. + /// Sets the timestamp to a specific [`SystemTime`]. + #[cfg(feature = "std")] pub fn timestamp(mut self, time: SystemTime) -> InvoiceBuilder { match PositiveTimestamp::from_system_time(time) { Ok(t) => self.timestamp = Some(t), @@ -597,7 +649,18 @@ impl InvoiceBuilder InvoiceBuilder { + match PositiveTimestamp::from_duration_since_epoch(time) { + Ok(t) => self.timestamp = Some(t), + Err(e) => self.error = Some(e), + } + + self.set_flags() + } + + /// Sets the timestamp to the current system time. + #[cfg(feature = "std")] pub fn current_timestamp(mut self) -> InvoiceBuilder { let now = PositiveTimestamp::from_system_time(SystemTime::now()); self.timestamp = Some(now.expect("for the foreseeable future this shouldn't happen")); @@ -774,7 +837,7 @@ impl SignedRawInvoice { macro_rules! find_extract { ($iter:expr, $enm:pat, $enm_var:ident) => { find_all_extract!($iter, $enm, $enm_var).next() - }; + }; } /// Finds the all elements of an enum stream of a given variant and extracts one member of the @@ -797,11 +860,11 @@ macro_rules! find_extract { /// ``` macro_rules! find_all_extract { ($iter:expr, $enm:pat, $enm_var:ident) => { - $iter.filter_map(|tf| match *tf { + $iter.filter_map(|tf| match *tf { $enm => Some($enm_var), _ => None, }) - }; + }; } #[allow(missing_docs)] @@ -947,49 +1010,52 @@ impl PositiveTimestamp { if unix_seconds > SYSTEM_TIME_MAX_UNIX_TIMESTAMP - MAX_EXPIRY_TIME { Err(CreationError::TimestampOutOfBounds) } else { - Ok(PositiveTimestamp(UNIX_EPOCH + Duration::from_secs(unix_seconds))) + Ok(PositiveTimestamp(Duration::from_secs(unix_seconds))) } } /// Create a new `PositiveTimestamp` from a `SystemTime` with a corresponding unix timestamp in - /// the Range `0...SYSTEM_TIME_MAX_UNIX_TIMESTAMP - MAX_EXPIRY_TIME`, otherwise return a + /// the range `0...SYSTEM_TIME_MAX_UNIX_TIMESTAMP - MAX_EXPIRY_TIME`, otherwise return a /// `CreationError::TimestampOutOfBounds`. + #[cfg(feature = "std")] pub fn from_system_time(time: SystemTime) -> Result { - if time - .duration_since(UNIX_EPOCH) - .map(|t| t.as_secs() <= SYSTEM_TIME_MAX_UNIX_TIMESTAMP - MAX_EXPIRY_TIME) - .unwrap_or(true) - { - Ok(PositiveTimestamp(time)) - } else { + time.duration_since(SystemTime::UNIX_EPOCH) + .map(Self::from_duration_since_epoch) + .unwrap_or(Err(CreationError::TimestampOutOfBounds)) + } + + /// Create a new `PositiveTimestamp` from a `Duration` since the UNIX epoch in + /// the range `0...SYSTEM_TIME_MAX_UNIX_TIMESTAMP - MAX_EXPIRY_TIME`, otherwise return a + /// `CreationError::TimestampOutOfBounds`. + pub fn from_duration_since_epoch(duration: Duration) -> Result { + if duration.as_secs() <= SYSTEM_TIME_MAX_UNIX_TIMESTAMP - MAX_EXPIRY_TIME { + Ok(PositiveTimestamp(duration)) + } else { Err(CreationError::TimestampOutOfBounds) } } /// Returns the UNIX timestamp representing the stored time pub fn as_unix_timestamp(&self) -> u64 { - self.0.duration_since(UNIX_EPOCH) - .expect("ensured by type contract/constructors") - .as_secs() + self.0.as_secs() } - /// Returns a reference to the internal `SystemTime` time representation - pub fn as_time(&self) -> &SystemTime { - &self.0 + /// Returns the duration of the stored time since the UNIX epoch + pub fn as_duration_since_epoch(&self) -> Duration { + self.0 } -} -impl Into for PositiveTimestamp { - fn into(self) -> SystemTime { - self.0 + /// Returns the `SystemTime` representing the stored time + #[cfg(feature = "std")] + pub fn as_time(&self) -> SystemTime { + SystemTime::UNIX_EPOCH + self.0 } } -impl Deref for PositiveTimestamp { - type Target = SystemTime; - - fn deref(&self) -> &Self::Target { - &self.0 +#[cfg(feature = "std")] +impl Into for PositiveTimestamp { + fn into(self) -> SystemTime { + SystemTime::UNIX_EPOCH + self.0 } } @@ -1130,11 +1196,17 @@ impl Invoice { Ok(invoice) } - /// Returns the `Invoice`'s timestamp (should equal it's creation time) - pub fn timestamp(&self) -> &SystemTime { + /// Returns the `Invoice`'s timestamp (should equal its creation time) + #[cfg(feature = "std")] + pub fn timestamp(&self) -> SystemTime { self.signed_invoice.raw_invoice().data.timestamp.as_time() } + /// Returns the `Invoice`'s timestamp as a duration since the UNIX epoch + pub fn duration_since_epoch(&self) -> Duration { + self.signed_invoice.raw_invoice().data.timestamp.0 + } + /// Returns an iterator over all tagged fields of this Invoice. /// /// (C-not exported) As there is not yet a manual mapping for a FilterMap @@ -1187,6 +1259,27 @@ impl Invoice { .unwrap_or(Duration::from_secs(DEFAULT_EXPIRY_TIME)) } + /// Returns whether the invoice has expired. + #[cfg(feature = "std")] + pub fn is_expired(&self) -> bool { + Self::is_expired_from_epoch(&self.timestamp(), self.expiry_time()) + } + + /// Returns whether the expiry time from the given epoch has passed. + #[cfg(feature = "std")] + pub(crate) fn is_expired_from_epoch(epoch: &SystemTime, expiry_time: Duration) -> bool { + match epoch.elapsed() { + Ok(elapsed) => elapsed > expiry_time, + Err(_) => false, + } + } + + /// Returns whether the expiry time would pass at the given point in time. + /// `at_time` is the timestamp as a duration since the UNIX epoch. + pub fn would_expire(&self, at_time: Duration) -> bool { + self.duration_since_epoch() + self.expiry_time() < at_time + } + /// Returns the invoice's `min_final_cltv_expiry` time, if present, otherwise /// [`DEFAULT_MIN_FINAL_CLTV_EXPIRY`]. pub fn min_final_cltv_expiry(&self) -> u64 { @@ -1208,10 +1301,10 @@ impl Invoice { } /// Returns a list of all routes included in the invoice as the underlying hints - pub fn route_hints(&self) -> Vec<&RouteHint> { + pub fn route_hints(&self) -> Vec { find_all_extract!( self.signed_invoice.known_tagged_fields(), TaggedField::PrivateRoute(ref x), x - ).map(|route| &**route).collect() + ).map(|route| (**route).clone()).collect() } /// Returns the currency for which the invoice was issued @@ -1219,8 +1312,13 @@ impl Invoice { self.signed_invoice.currency() } + /// Returns the amount if specified in the invoice as millisatoshis. + pub fn amount_milli_satoshis(&self) -> Option { + self.signed_invoice.amount_pico_btc().map(|v| v / 10) + } + /// Returns the amount if specified in the invoice as pico . - pub fn amount_pico_btc(&self) -> Option { + fn amount_pico_btc(&self) -> Option { self.signed_invoice.amount_pico_btc() } } @@ -1393,6 +1491,9 @@ pub enum CreationError { /// The supplied expiry time could cause an overflow if added to a `PositiveTimestamp` ExpiryTimeOutOfBounds, + + /// The supplied millisatoshi amount was greater than the total bitcoin supply. + InvalidAmount, } impl Display for CreationError { @@ -1402,10 +1503,12 @@ impl Display for CreationError { CreationError::RouteTooLong => f.write_str("The specified route has too many hops and can't be encoded"), CreationError::TimestampOutOfBounds => f.write_str("The unix timestamp of the supplied date is <0 or can't be represented as `SystemTime`"), CreationError::ExpiryTimeOutOfBounds => f.write_str("The supplied expiry time could cause an overflow if added to a `PositiveTimestamp`"), + CreationError::InvalidAmount => f.write_str("The supplied millisatoshi amount was greater than the total bitcoin supply"), } } } +#[cfg(feature = "std")] impl std::error::Error for CreationError { } /// Errors that may occur when converting a `RawInvoice` to an `Invoice`. They relate to the @@ -1461,6 +1564,7 @@ impl Display for SemanticError { } } +#[cfg(feature = "std")] impl std::error::Error for SemanticError { } /// When signing using a fallible method either an user-supplied `SignError` or a `CreationError` @@ -1492,15 +1596,15 @@ mod test { fn test_system_time_bounds_assumptions() { ::check_platform(); - assert_eq!( - ::PositiveTimestamp::from_unix_timestamp(::SYSTEM_TIME_MAX_UNIX_TIMESTAMP + 1), - Err(::CreationError::TimestampOutOfBounds) - ); + assert_eq!( + ::PositiveTimestamp::from_unix_timestamp(::SYSTEM_TIME_MAX_UNIX_TIMESTAMP + 1), + Err(::CreationError::TimestampOutOfBounds) + ); - assert_eq!( - ::ExpiryTime::from_seconds(::MAX_EXPIRY_TIME + 1), - Err(::CreationError::ExpiryTimeOutOfBounds) - ); + assert_eq!( + ::ExpiryTime::from_seconds(::MAX_EXPIRY_TIME + 1), + Err(::CreationError::ExpiryTimeOutOfBounds) + ); } #[test] @@ -1703,7 +1807,7 @@ mod test { let builder = InvoiceBuilder::new(Currency::Bitcoin) .description("Test".into()) .payment_hash(sha256::Hash::from_slice(&[0;32][..]).unwrap()) - .current_timestamp(); + .duration_since_epoch(Duration::from_secs(1234567)); let invoice = builder.clone() .amount_milli_satoshis(1500) @@ -1732,7 +1836,7 @@ mod test { let builder = InvoiceBuilder::new(Currency::Bitcoin) .payment_hash(sha256::Hash::from_slice(&[0;32][..]).unwrap()) - .current_timestamp() + .duration_since_epoch(Duration::from_secs(1234567)) .min_final_cltv_expiry(144); let too_long_string = String::from_iter( @@ -1848,7 +1952,7 @@ mod test { let builder = InvoiceBuilder::new(Currency::BitcoinTestnet) .amount_milli_satoshis(123) - .timestamp(UNIX_EPOCH + Duration::from_secs(1234567)) + .duration_since_epoch(Duration::from_secs(1234567)) .payee_pub_key(public_key.clone()) .expiry_time(Duration::from_secs(54321)) .min_final_cltv_expiry(144) @@ -1867,8 +1971,10 @@ mod test { assert!(invoice.check_signature().is_ok()); assert_eq!(invoice.tagged_fields().count(), 10); + assert_eq!(invoice.amount_milli_satoshis(), Some(123)); assert_eq!(invoice.amount_pico_btc(), Some(1230)); assert_eq!(invoice.currency(), Currency::BitcoinTestnet); + #[cfg(feature = "std")] assert_eq!( invoice.timestamp().duration_since(UNIX_EPOCH).unwrap().as_secs(), 1234567 @@ -1900,7 +2006,7 @@ mod test { .description("Test".into()) .payment_hash(sha256::Hash::from_slice(&[0;32][..]).unwrap()) .payment_secret(PaymentSecret([0; 32])) - .current_timestamp() + .duration_since_epoch(Duration::from_secs(1234567)) .build_raw() .unwrap() .sign::<_, ()>(|hash| { @@ -1913,5 +2019,30 @@ mod test { assert_eq!(invoice.min_final_cltv_expiry(), DEFAULT_MIN_FINAL_CLTV_EXPIRY); assert_eq!(invoice.expiry_time(), Duration::from_secs(DEFAULT_EXPIRY_TIME)); + assert!(!invoice.would_expire(Duration::from_secs(1234568))); + } + + #[test] + fn test_expiration() { + use ::*; + use secp256k1::Secp256k1; + use secp256k1::key::SecretKey; + + let signed_invoice = InvoiceBuilder::new(Currency::Bitcoin) + .description("Test".into()) + .payment_hash(sha256::Hash::from_slice(&[0;32][..]).unwrap()) + .payment_secret(PaymentSecret([0; 32])) + .duration_since_epoch(Duration::from_secs(1234567)) + .build_raw() + .unwrap() + .sign::<_, ()>(|hash| { + let privkey = SecretKey::from_slice(&[41; 32]).unwrap(); + let secp_ctx = Secp256k1::new(); + Ok(secp_ctx.sign_recoverable(hash, &privkey)) + }) + .unwrap(); + let invoice = Invoice::from_signed(signed_invoice).unwrap(); + + assert!(invoice.would_expire(Duration::from_secs(1234567 + DEFAULT_EXPIRY_TIME + 1))); } } diff --git a/lightning-invoice/src/payment.rs b/lightning-invoice/src/payment.rs new file mode 100644 index 00000000000..d73ca538981 --- /dev/null +++ b/lightning-invoice/src/payment.rs @@ -0,0 +1,1787 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! A module for paying Lightning invoices and sending spontaneous payments. +//! +//! Defines an [`InvoicePayer`] utility for sending payments, parameterized by [`Payer`] and +//! [`Router`] traits. Implementations of [`Payer`] provide the payer's node id, channels, and means +//! to send a payment over a [`Route`]. Implementations of [`Router`] find a [`Route`] between payer +//! and payee using information provided by the payer and from the payee's [`Invoice`], when +//! applicable. +//! +//! [`InvoicePayer`] is parameterized by a [`LockableScore`], which it uses for scoring failed and +//! successful payment paths upon receiving [`Event::PaymentPathFailed`] and +//! [`Event::PaymentPathSuccessful`] events, respectively. +//! +//! [`InvoicePayer`] is capable of retrying failed payments. It accomplishes this by implementing +//! [`EventHandler`] which decorates a user-provided handler. It will intercept any +//! [`Event::PaymentPathFailed`] events and retry the failed paths for a fixed number of total +//! attempts or until retry is no longer possible. In such a situation, [`InvoicePayer`] will pass +//! along the events to the user-provided handler. +//! +//! # Example +//! +//! ``` +//! # extern crate lightning; +//! # extern crate lightning_invoice; +//! # extern crate secp256k1; +//! # +//! # #[cfg(feature = "no-std")] +//! # extern crate core2; +//! # +//! # use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; +//! # use lightning::ln::channelmanager::{ChannelDetails, PaymentId, PaymentSendFailure}; +//! # use lightning::ln::msgs::LightningError; +//! # use lightning::routing::scoring::Score; +//! # use lightning::routing::network_graph::NodeId; +//! # use lightning::routing::router::{Route, RouteHop, RouteParameters}; +//! # use lightning::util::events::{Event, EventHandler, EventsProvider}; +//! # use lightning::util::logger::{Logger, Record}; +//! # use lightning::util::ser::{Writeable, Writer}; +//! # use lightning_invoice::Invoice; +//! # use lightning_invoice::payment::{InvoicePayer, Payer, RetryAttempts, Router}; +//! # use secp256k1::key::PublicKey; +//! # use std::cell::RefCell; +//! # use std::ops::Deref; +//! # +//! # #[cfg(not(feature = "std"))] +//! # use core2::io; +//! # #[cfg(feature = "std")] +//! # use std::io; +//! # +//! # struct FakeEventProvider {} +//! # impl EventsProvider for FakeEventProvider { +//! # fn process_pending_events(&self, handler: H) where H::Target: EventHandler {} +//! # } +//! # +//! # struct FakePayer {} +//! # impl Payer for FakePayer { +//! # fn node_id(&self) -> PublicKey { unimplemented!() } +//! # fn first_hops(&self) -> Vec { unimplemented!() } +//! # fn send_payment( +//! # &self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option +//! # ) -> Result { unimplemented!() } +//! # fn send_spontaneous_payment( +//! # &self, route: &Route, payment_preimage: PaymentPreimage +//! # ) -> Result { unimplemented!() } +//! # fn retry_payment( +//! # &self, route: &Route, payment_id: PaymentId +//! # ) -> Result<(), PaymentSendFailure> { unimplemented!() } +//! # fn abandon_payment(&self, payment_id: PaymentId) { unimplemented!() } +//! # } +//! # +//! # struct FakeRouter {} +//! # impl Router for FakeRouter { +//! # fn find_route( +//! # &self, payer: &PublicKey, params: &RouteParameters, payment_hash: &PaymentHash, +//! # first_hops: Option<&[&ChannelDetails]>, scorer: &S +//! # ) -> Result { unimplemented!() } +//! # } +//! # +//! # struct FakeScorer {} +//! # impl Writeable for FakeScorer { +//! # fn write(&self, w: &mut W) -> Result<(), io::Error> { unimplemented!(); } +//! # } +//! # impl Score for FakeScorer { +//! # fn channel_penalty_msat( +//! # &self, _short_channel_id: u64, _send_amt: u64, _chan_amt: Option, _source: &NodeId, _target: &NodeId +//! # ) -> u64 { 0 } +//! # fn payment_path_failed(&mut self, _path: &[&RouteHop], _short_channel_id: u64) {} +//! # fn payment_path_successful(&mut self, _path: &[&RouteHop]) {} +//! # } +//! # +//! # struct FakeLogger {} +//! # impl Logger for FakeLogger { +//! # fn log(&self, record: &Record) { unimplemented!() } +//! # } +//! # +//! # fn main() { +//! let event_handler = |event: &Event| { +//! match event { +//! Event::PaymentPathFailed { .. } => println!("payment failed after retries"), +//! Event::PaymentSent { .. } => println!("payment successful"), +//! _ => {}, +//! } +//! }; +//! # let payer = FakePayer {}; +//! # let router = FakeRouter {}; +//! # let scorer = RefCell::new(FakeScorer {}); +//! # let logger = FakeLogger {}; +//! let invoice_payer = InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(2)); +//! +//! let invoice = "..."; +//! if let Ok(invoice) = invoice.parse::() { +//! invoice_payer.pay_invoice(&invoice).unwrap(); +//! +//! # let event_provider = FakeEventProvider {}; +//! loop { +//! event_provider.process_pending_events(&invoice_payer); +//! } +//! } +//! # } +//! ``` +//! +//! # Note +//! +//! The [`Route`] is computed before each payment attempt. Any updates affecting path finding such +//! as updates to the network graph or changes to channel scores should be applied prior to +//! retries, typically by way of composing [`EventHandler`]s accordingly. + +use crate::Invoice; + +use bitcoin_hashes::Hash; +use bitcoin_hashes::sha256::Hash as Sha256; + +use crate::prelude::*; +use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::ln::channelmanager::{ChannelDetails, PaymentId, PaymentSendFailure}; +use lightning::ln::msgs::LightningError; +use lightning::routing::scoring::{LockableScore, Score}; +use lightning::routing::router::{Payee, Route, RouteParameters}; +use lightning::util::events::{Event, EventHandler}; +use lightning::util::logger::Logger; +use crate::sync::Mutex; + +use secp256k1::key::PublicKey; + +use core::ops::Deref; +use core::time::Duration; +#[cfg(feature = "std")] +use std::time::SystemTime; + +/// A utility for paying [`Invoice`]s and sending spontaneous payments. +/// +/// See [module-level documentation] for details. +/// +/// [module-level documentation]: crate::payment +pub struct InvoicePayer +where + P::Target: Payer, + R: for <'a> Router<<::Target as LockableScore<'a>>::Locked>, + S::Target: for <'a> LockableScore<'a>, + L::Target: Logger, +{ + payer: P, + router: R, + scorer: S, + logger: L, + event_handler: E, + /// Caches the overall attempts at making a payment, which is updated prior to retrying. + payment_cache: Mutex>, + retry_attempts: RetryAttempts, +} + +/// A trait defining behavior of an [`Invoice`] payer. +pub trait Payer { + /// Returns the payer's node id. + fn node_id(&self) -> PublicKey; + + /// Returns the payer's channels. + fn first_hops(&self) -> Vec; + + /// Sends a payment over the Lightning Network using the given [`Route`]. + fn send_payment( + &self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option + ) -> Result; + + /// Sends a spontaneous payment over the Lightning Network using the given [`Route`]. + fn send_spontaneous_payment( + &self, route: &Route, payment_preimage: PaymentPreimage + ) -> Result; + + /// Retries a failed payment path for the [`PaymentId`] using the given [`Route`]. + fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure>; + + /// Signals that no further retries for the given payment will occur. + fn abandon_payment(&self, payment_id: PaymentId); +} + +/// A trait defining behavior for routing an [`Invoice`] payment. +pub trait Router { + /// Finds a [`Route`] between `payer` and `payee` for a payment with the given values. + fn find_route( + &self, payer: &PublicKey, params: &RouteParameters, payment_hash: &PaymentHash, + first_hops: Option<&[&ChannelDetails]>, scorer: &S + ) -> Result; +} + +/// Number of attempts to retry payment path failures for an [`Invoice`]. +/// +/// Note that this is the number of *path* failures, not full payment retries. For multi-path +/// payments, if this is less than the total number of paths, we will never even retry all of the +/// payment's paths. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +pub struct RetryAttempts(pub usize); + +/// An error that may occur when making a payment. +#[derive(Clone, Debug)] +pub enum PaymentError { + /// An error resulting from the provided [`Invoice`] or payment hash. + Invoice(&'static str), + /// An error occurring when finding a route. + Routing(LightningError), + /// An error occurring when sending a payment. + Sending(PaymentSendFailure), +} + +impl InvoicePayer +where + P::Target: Payer, + R: for <'a> Router<<::Target as LockableScore<'a>>::Locked>, + S::Target: for <'a> LockableScore<'a>, + L::Target: Logger, +{ + /// Creates an invoice payer that retries failed payment paths. + /// + /// Will forward any [`Event::PaymentPathFailed`] events to the decorated `event_handler` once + /// `retry_attempts` has been exceeded for a given [`Invoice`]. + pub fn new( + payer: P, router: R, scorer: S, logger: L, event_handler: E, retry_attempts: RetryAttempts + ) -> Self { + Self { + payer, + router, + scorer, + logger, + event_handler, + payment_cache: Mutex::new(HashMap::new()), + retry_attempts, + } + } + + /// Pays the given [`Invoice`], caching it for later use in case a retry is needed. + /// + /// You should ensure that the `invoice.payment_hash()` is unique and the same payment_hash has + /// never been paid before. Because [`InvoicePayer`] is stateless no effort is made to do so + /// for you. + pub fn pay_invoice(&self, invoice: &Invoice) -> Result { + if invoice.amount_milli_satoshis().is_none() { + Err(PaymentError::Invoice("amount missing")) + } else { + self.pay_invoice_using_amount(invoice, None) + } + } + + /// Pays the given zero-value [`Invoice`] using the given amount, caching it for later use in + /// case a retry is needed. + /// + /// You should ensure that the `invoice.payment_hash()` is unique and the same payment_hash has + /// never been paid before. Because [`InvoicePayer`] is stateless no effort is made to do so + /// for you. + pub fn pay_zero_value_invoice( + &self, invoice: &Invoice, amount_msats: u64 + ) -> Result { + if invoice.amount_milli_satoshis().is_some() { + Err(PaymentError::Invoice("amount unexpected")) + } else { + self.pay_invoice_using_amount(invoice, Some(amount_msats)) + } + } + + fn pay_invoice_using_amount( + &self, invoice: &Invoice, amount_msats: Option + ) -> Result { + debug_assert!(invoice.amount_milli_satoshis().is_some() ^ amount_msats.is_some()); + + let payment_hash = PaymentHash(invoice.payment_hash().clone().into_inner()); + match self.payment_cache.lock().unwrap().entry(payment_hash) { + hash_map::Entry::Occupied(_) => return Err(PaymentError::Invoice("payment pending")), + hash_map::Entry::Vacant(entry) => entry.insert(0), + }; + + let payment_secret = Some(invoice.payment_secret().clone()); + let mut payee = Payee::from_node_id(invoice.recover_payee_pub_key()) + .with_expiry_time(expiry_time_from_unix_epoch(&invoice).as_secs()) + .with_route_hints(invoice.route_hints()); + if let Some(features) = invoice.features() { + payee = payee.with_features(features.clone()); + } + let params = RouteParameters { + payee, + final_value_msat: invoice.amount_milli_satoshis().or(amount_msats).unwrap(), + final_cltv_expiry_delta: invoice.min_final_cltv_expiry() as u32, + }; + + let send_payment = |route: &Route| { + self.payer.send_payment(route, payment_hash, &payment_secret) + }; + self.pay_internal(¶ms, payment_hash, send_payment) + .map_err(|e| { self.payment_cache.lock().unwrap().remove(&payment_hash); e }) + } + + /// Pays `pubkey` an amount using the hash of the given preimage, caching it for later use in + /// case a retry is needed. + /// + /// You should ensure that `payment_preimage` is unique and that its `payment_hash` has never + /// been paid before. Because [`InvoicePayer`] is stateless no effort is made to do so for you. + pub fn pay_pubkey( + &self, pubkey: PublicKey, payment_preimage: PaymentPreimage, amount_msats: u64, + final_cltv_expiry_delta: u32 + ) -> Result { + let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); + match self.payment_cache.lock().unwrap().entry(payment_hash) { + hash_map::Entry::Occupied(_) => return Err(PaymentError::Invoice("payment pending")), + hash_map::Entry::Vacant(entry) => entry.insert(0), + }; + + let params = RouteParameters { + payee: Payee::for_keysend(pubkey), + final_value_msat: amount_msats, + final_cltv_expiry_delta, + }; + + let send_payment = |route: &Route| { + self.payer.send_spontaneous_payment(route, payment_preimage) + }; + self.pay_internal(¶ms, payment_hash, send_payment) + .map_err(|e| { self.payment_cache.lock().unwrap().remove(&payment_hash); e }) + } + + fn pay_internal Result + Copy>( + &self, params: &RouteParameters, payment_hash: PaymentHash, send_payment: F, + ) -> Result { + #[cfg(feature = "std")] { + if has_expired(params) { + log_trace!(self.logger, "Invoice expired prior to send for payment {}", log_bytes!(payment_hash.0)); + return Err(PaymentError::Invoice("Invoice expired prior to send")); + } + } + + let payer = self.payer.node_id(); + let first_hops = self.payer.first_hops(); + let route = self.router.find_route( + &payer, params, &payment_hash, Some(&first_hops.iter().collect::>()), + &self.scorer.lock() + ).map_err(|e| PaymentError::Routing(e))?; + + match send_payment(&route) { + Ok(payment_id) => Ok(payment_id), + Err(e) => match e { + PaymentSendFailure::ParameterError(_) => Err(e), + PaymentSendFailure::PathParameterError(_) => Err(e), + PaymentSendFailure::AllFailedRetrySafe(_) => { + let mut payment_cache = self.payment_cache.lock().unwrap(); + let retry_count = payment_cache.get_mut(&payment_hash).unwrap(); + if *retry_count >= self.retry_attempts.0 { + Err(e) + } else { + *retry_count += 1; + core::mem::drop(payment_cache); + Ok(self.pay_internal(params, payment_hash, send_payment)?) + } + }, + PaymentSendFailure::PartialFailure { failed_paths_retry, payment_id, .. } => { + if let Some(retry_data) = failed_paths_retry { + // Some paths were sent, even if we failed to send the full MPP value our + // recipient may misbehave and claim the funds, at which point we have to + // consider the payment sent, so return `Ok()` here, ignoring any retry + // errors. + let _ = self.retry_payment(payment_id, payment_hash, &retry_data); + Ok(payment_id) + } else { + // This may happen if we send a payment and some paths fail, but + // only due to a temporary monitor failure or the like, implying + // they're really in-flight, but we haven't sent the initial + // HTLC-Add messages yet. + Ok(payment_id) + } + }, + }, + }.map_err(|e| PaymentError::Sending(e)) + } + + fn retry_payment( + &self, payment_id: PaymentId, payment_hash: PaymentHash, params: &RouteParameters + ) -> Result<(), ()> { + let max_payment_attempts = self.retry_attempts.0 + 1; + let attempts = *self.payment_cache.lock().unwrap() + .entry(payment_hash) + .and_modify(|attempts| *attempts += 1) + .or_insert(1); + + if attempts >= max_payment_attempts { + log_trace!(self.logger, "Payment {} exceeded maximum attempts; not retrying (attempts: {})", log_bytes!(payment_hash.0), attempts); + return Err(()); + } + + #[cfg(feature = "std")] { + if has_expired(params) { + log_trace!(self.logger, "Invoice expired for payment {}; not retrying (attempts: {})", log_bytes!(payment_hash.0), attempts); + return Err(()); + } + } + + let payer = self.payer.node_id(); + let first_hops = self.payer.first_hops(); + let route = self.router.find_route( + &payer, ¶ms, &payment_hash, Some(&first_hops.iter().collect::>()), + &self.scorer.lock() + ); + if route.is_err() { + log_trace!(self.logger, "Failed to find a route for payment {}; not retrying (attempts: {})", log_bytes!(payment_hash.0), attempts); + return Err(()); + } + + match self.payer.retry_payment(&route.unwrap(), payment_id) { + Ok(()) => Ok(()), + Err(PaymentSendFailure::ParameterError(_)) | + Err(PaymentSendFailure::PathParameterError(_)) => { + log_trace!(self.logger, "Failed to retry for payment {} due to bogus route/payment data, not retrying.", log_bytes!(payment_hash.0)); + Err(()) + }, + Err(PaymentSendFailure::AllFailedRetrySafe(_)) => { + self.retry_payment(payment_id, payment_hash, params) + }, + Err(PaymentSendFailure::PartialFailure { failed_paths_retry, .. }) => { + if let Some(retry) = failed_paths_retry { + // Always return Ok for the same reason as noted in pay_internal. + let _ = self.retry_payment(payment_id, payment_hash, &retry); + } + Ok(()) + }, + } + } + + /// Removes the payment cached by the given payment hash. + /// + /// Should be called once a payment has failed or succeeded if not using [`InvoicePayer`] as an + /// [`EventHandler`]. Otherwise, calling this method is unnecessary. + pub fn remove_cached_payment(&self, payment_hash: &PaymentHash) { + self.payment_cache.lock().unwrap().remove(payment_hash); + } +} + +fn expiry_time_from_unix_epoch(invoice: &Invoice) -> Duration { + invoice.signed_invoice.raw_invoice.data.timestamp.0 + invoice.expiry_time() +} + +#[cfg(feature = "std")] +fn has_expired(params: &RouteParameters) -> bool { + if let Some(expiry_time) = params.payee.expiry_time { + Invoice::is_expired_from_epoch(&SystemTime::UNIX_EPOCH, Duration::from_secs(expiry_time)) + } else { false } +} + +impl EventHandler for InvoicePayer +where + P::Target: Payer, + R: for <'a> Router<<::Target as LockableScore<'a>>::Locked>, + S::Target: for <'a> LockableScore<'a>, + L::Target: Logger, +{ + fn handle_event(&self, event: &Event) { + match event { + Event::PaymentPathFailed { + payment_id, payment_hash, rejected_by_dest, path, short_channel_id, retry, .. + } => { + if let Some(short_channel_id) = short_channel_id { + let path = path.iter().collect::>(); + self.scorer.lock().payment_path_failed(&path, *short_channel_id); + } + + if payment_id.is_none() { + log_trace!(self.logger, "Payment {} has no id; not retrying", log_bytes!(payment_hash.0)); + } else if *rejected_by_dest { + log_trace!(self.logger, "Payment {} rejected by destination; not retrying", log_bytes!(payment_hash.0)); + self.payer.abandon_payment(payment_id.unwrap()); + } else if retry.is_none() { + log_trace!(self.logger, "Payment {} missing retry params; not retrying", log_bytes!(payment_hash.0)); + self.payer.abandon_payment(payment_id.unwrap()); + } else if self.retry_payment(payment_id.unwrap(), *payment_hash, retry.as_ref().unwrap()).is_ok() { + // We retried at least somewhat, don't provide the PaymentPathFailed event to the user. + return; + } else { + self.payer.abandon_payment(payment_id.unwrap()); + } + }, + Event::PaymentFailed { payment_hash, .. } => { + self.remove_cached_payment(&payment_hash); + }, + Event::PaymentPathSuccessful { path, .. } => { + let path = path.iter().collect::>(); + self.scorer.lock().payment_path_successful(&path); + }, + Event::PaymentSent { payment_hash, .. } => { + let mut payment_cache = self.payment_cache.lock().unwrap(); + let attempts = payment_cache + .remove(payment_hash) + .map_or(1, |attempts| attempts + 1); + log_trace!(self.logger, "Payment {} succeeded (attempts: {})", log_bytes!(payment_hash.0), attempts); + }, + _ => {}, + } + + // Delegate to the decorated event handler unless the payment is retried. + self.event_handler.handle_event(event) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{InvoiceBuilder, Currency}; + use utils::create_invoice_from_channelmanager_and_duration_since_epoch; + use bitcoin_hashes::sha256::Hash as Sha256; + use lightning::ln::PaymentPreimage; + use lightning::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures}; + use lightning::ln::functional_test_utils::*; + use lightning::ln::msgs::{ChannelMessageHandler, ErrorAction, LightningError}; + use lightning::routing::network_graph::NodeId; + use lightning::routing::router::{Payee, Route, RouteHop}; + use lightning::util::test_utils::TestLogger; + use lightning::util::errors::APIError; + use lightning::util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; + use secp256k1::{SecretKey, PublicKey, Secp256k1}; + use std::cell::RefCell; + use std::collections::VecDeque; + use std::time::{SystemTime, Duration}; + use DEFAULT_EXPIRY_TIME; + + fn invoice(payment_preimage: PaymentPreimage) -> Invoice { + let payment_hash = Sha256::hash(&payment_preimage.0); + let private_key = SecretKey::from_slice(&[42; 32]).unwrap(); + + InvoiceBuilder::new(Currency::Bitcoin) + .description("test".into()) + .payment_hash(payment_hash) + .payment_secret(PaymentSecret([0; 32])) + .duration_since_epoch(duration_since_epoch()) + .min_final_cltv_expiry(144) + .amount_milli_satoshis(128) + .build_signed(|hash| { + Secp256k1::new().sign_recoverable(hash, &private_key) + }) + .unwrap() + } + + fn duration_since_epoch() -> Duration { + #[cfg(feature = "std")] + let duration_since_epoch = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); + #[cfg(not(feature = "std"))] + let duration_since_epoch = Duration::from_secs(1234567); + duration_since_epoch + } + + fn zero_value_invoice(payment_preimage: PaymentPreimage) -> Invoice { + let payment_hash = Sha256::hash(&payment_preimage.0); + let private_key = SecretKey::from_slice(&[42; 32]).unwrap(); + + InvoiceBuilder::new(Currency::Bitcoin) + .description("test".into()) + .payment_hash(payment_hash) + .payment_secret(PaymentSecret([0; 32])) + .duration_since_epoch(duration_since_epoch()) + .min_final_cltv_expiry(144) + .build_signed(|hash| { + Secp256k1::new().sign_recoverable(hash, &private_key) + }) + .unwrap() + } + + #[cfg(feature = "std")] + fn expired_invoice(payment_preimage: PaymentPreimage) -> Invoice { + let payment_hash = Sha256::hash(&payment_preimage.0); + let private_key = SecretKey::from_slice(&[42; 32]).unwrap(); + let duration = duration_since_epoch() + .checked_sub(Duration::from_secs(DEFAULT_EXPIRY_TIME * 2)) + .unwrap(); + InvoiceBuilder::new(Currency::Bitcoin) + .description("test".into()) + .payment_hash(payment_hash) + .payment_secret(PaymentSecret([0; 32])) + .duration_since_epoch(duration) + .min_final_cltv_expiry(144) + .amount_milli_satoshis(128) + .build_signed(|hash| { + Secp256k1::new().sign_recoverable(hash, &private_key) + }) + .unwrap() + } + + fn pubkey() -> PublicKey { + PublicKey::from_slice(&hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap() + } + + #[test] + fn pays_invoice_on_first_attempt() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + let payment_hash = PaymentHash(invoice.payment_hash().clone().into_inner()); + let final_value_msat = invoice.amount_milli_satoshis().unwrap(); + + let payer = TestPayer::new().expect_send(Amount::ForInvoice(final_value_msat)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(0)); + + let payment_id = Some(invoice_payer.pay_invoice(&invoice).unwrap()); + assert_eq!(*payer.attempts.borrow(), 1); + + invoice_payer.handle_event(&Event::PaymentSent { + payment_id, payment_preimage, payment_hash, fee_paid_msat: None + }); + assert_eq!(*event_handled.borrow(), true); + assert_eq!(*payer.attempts.borrow(), 1); + } + + #[test] + fn pays_invoice_on_retry() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + let payment_hash = PaymentHash(invoice.payment_hash().clone().into_inner()); + let final_value_msat = invoice.amount_milli_satoshis().unwrap(); + + let payer = TestPayer::new() + .expect_send(Amount::ForInvoice(final_value_msat)) + .expect_send(Amount::OnRetry(final_value_msat / 2)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(2)); + + let payment_id = Some(invoice_payer.pay_invoice(&invoice).unwrap()); + assert_eq!(*payer.attempts.borrow(), 1); + + let event = Event::PaymentPathFailed { + payment_id, + payment_hash, + network_update: None, + rejected_by_dest: false, + all_paths_failed: false, + path: TestRouter::path_for_value(final_value_msat), + short_channel_id: None, + retry: Some(TestRouter::retry_for_invoice(&invoice)), + }; + invoice_payer.handle_event(&event); + assert_eq!(*event_handled.borrow(), false); + assert_eq!(*payer.attempts.borrow(), 2); + + invoice_payer.handle_event(&Event::PaymentSent { + payment_id, payment_preimage, payment_hash, fee_paid_msat: None + }); + assert_eq!(*event_handled.borrow(), true); + assert_eq!(*payer.attempts.borrow(), 2); + } + + #[test] + fn pays_invoice_on_partial_failure() { + let event_handler = |_: &_| { panic!() }; + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + let retry = TestRouter::retry_for_invoice(&invoice); + let final_value_msat = invoice.amount_milli_satoshis().unwrap(); + + let payer = TestPayer::new() + .fails_with_partial_failure(retry.clone(), OnAttempt(1)) + .fails_with_partial_failure(retry, OnAttempt(2)) + .expect_send(Amount::ForInvoice(final_value_msat)) + .expect_send(Amount::OnRetry(final_value_msat / 2)) + .expect_send(Amount::OnRetry(final_value_msat / 2)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(2)); + + assert!(invoice_payer.pay_invoice(&invoice).is_ok()); + } + + #[test] + fn retries_payment_path_for_unknown_payment() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + let payment_hash = PaymentHash(invoice.payment_hash().clone().into_inner()); + let final_value_msat = invoice.amount_milli_satoshis().unwrap(); + + let payer = TestPayer::new() + .expect_send(Amount::OnRetry(final_value_msat / 2)) + .expect_send(Amount::OnRetry(final_value_msat / 2)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(2)); + + let payment_id = Some(PaymentId([1; 32])); + let event = Event::PaymentPathFailed { + payment_id, + payment_hash, + network_update: None, + rejected_by_dest: false, + all_paths_failed: false, + path: TestRouter::path_for_value(final_value_msat), + short_channel_id: None, + retry: Some(TestRouter::retry_for_invoice(&invoice)), + }; + invoice_payer.handle_event(&event); + assert_eq!(*event_handled.borrow(), false); + assert_eq!(*payer.attempts.borrow(), 1); + + invoice_payer.handle_event(&event); + assert_eq!(*event_handled.borrow(), false); + assert_eq!(*payer.attempts.borrow(), 2); + + invoice_payer.handle_event(&Event::PaymentSent { + payment_id, payment_preimage, payment_hash, fee_paid_msat: None + }); + assert_eq!(*event_handled.borrow(), true); + assert_eq!(*payer.attempts.borrow(), 2); + } + + #[test] + fn fails_paying_invoice_after_max_retries() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + let final_value_msat = invoice.amount_milli_satoshis().unwrap(); + + let payer = TestPayer::new() + .expect_send(Amount::ForInvoice(final_value_msat)) + .expect_send(Amount::OnRetry(final_value_msat / 2)) + .expect_send(Amount::OnRetry(final_value_msat / 2)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(2)); + + let payment_id = Some(invoice_payer.pay_invoice(&invoice).unwrap()); + assert_eq!(*payer.attempts.borrow(), 1); + + let event = Event::PaymentPathFailed { + payment_id, + payment_hash: PaymentHash(invoice.payment_hash().clone().into_inner()), + network_update: None, + rejected_by_dest: false, + all_paths_failed: true, + path: TestRouter::path_for_value(final_value_msat), + short_channel_id: None, + retry: Some(TestRouter::retry_for_invoice(&invoice)), + }; + invoice_payer.handle_event(&event); + assert_eq!(*event_handled.borrow(), false); + assert_eq!(*payer.attempts.borrow(), 2); + + let event = Event::PaymentPathFailed { + payment_id, + payment_hash: PaymentHash(invoice.payment_hash().clone().into_inner()), + network_update: None, + rejected_by_dest: false, + all_paths_failed: false, + path: TestRouter::path_for_value(final_value_msat / 2), + short_channel_id: None, + retry: Some(RouteParameters { + final_value_msat: final_value_msat / 2, ..TestRouter::retry_for_invoice(&invoice) + }), + }; + invoice_payer.handle_event(&event); + assert_eq!(*event_handled.borrow(), false); + assert_eq!(*payer.attempts.borrow(), 3); + + invoice_payer.handle_event(&event); + assert_eq!(*event_handled.borrow(), true); + assert_eq!(*payer.attempts.borrow(), 3); + } + + #[test] + fn fails_paying_invoice_with_missing_retry_params() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + let final_value_msat = invoice.amount_milli_satoshis().unwrap(); + + let payer = TestPayer::new().expect_send(Amount::ForInvoice(final_value_msat)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(2)); + + let payment_id = Some(invoice_payer.pay_invoice(&invoice).unwrap()); + assert_eq!(*payer.attempts.borrow(), 1); + + let event = Event::PaymentPathFailed { + payment_id, + payment_hash: PaymentHash(invoice.payment_hash().clone().into_inner()), + network_update: None, + rejected_by_dest: false, + all_paths_failed: false, + path: vec![], + short_channel_id: None, + retry: None, + }; + invoice_payer.handle_event(&event); + assert_eq!(*event_handled.borrow(), true); + assert_eq!(*payer.attempts.borrow(), 1); + } + + // Expiration is checked only in an std environment + #[cfg(feature = "std")] + #[test] + fn fails_paying_invoice_after_expiration() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payer = TestPayer::new(); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(2)); + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = expired_invoice(payment_preimage); + if let PaymentError::Invoice(msg) = invoice_payer.pay_invoice(&invoice).unwrap_err() { + assert_eq!(msg, "Invoice expired prior to send"); + } else { panic!("Expected Invoice Error"); } + } + + // Expiration is checked only in an std environment + #[cfg(feature = "std")] + #[test] + fn fails_retrying_invoice_after_expiration() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + let final_value_msat = invoice.amount_milli_satoshis().unwrap(); + + let payer = TestPayer::new().expect_send(Amount::ForInvoice(final_value_msat)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(2)); + + let payment_id = Some(invoice_payer.pay_invoice(&invoice).unwrap()); + assert_eq!(*payer.attempts.borrow(), 1); + + let mut retry_data = TestRouter::retry_for_invoice(&invoice); + retry_data.payee.expiry_time = Some(SystemTime::now() + .checked_sub(Duration::from_secs(2)).unwrap() + .duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs()); + let event = Event::PaymentPathFailed { + payment_id, + payment_hash: PaymentHash(invoice.payment_hash().clone().into_inner()), + network_update: None, + rejected_by_dest: false, + all_paths_failed: false, + path: vec![], + short_channel_id: None, + retry: Some(retry_data), + }; + invoice_payer.handle_event(&event); + assert_eq!(*event_handled.borrow(), true); + assert_eq!(*payer.attempts.borrow(), 1); + } + + #[test] + fn fails_paying_invoice_after_retry_error() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + let final_value_msat = invoice.amount_milli_satoshis().unwrap(); + + let payer = TestPayer::new() + .fails_on_attempt(2) + .expect_send(Amount::ForInvoice(final_value_msat)) + .expect_send(Amount::OnRetry(final_value_msat / 2)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(2)); + + let payment_id = Some(invoice_payer.pay_invoice(&invoice).unwrap()); + assert_eq!(*payer.attempts.borrow(), 1); + + let event = Event::PaymentPathFailed { + payment_id, + payment_hash: PaymentHash(invoice.payment_hash().clone().into_inner()), + network_update: None, + rejected_by_dest: false, + all_paths_failed: false, + path: TestRouter::path_for_value(final_value_msat / 2), + short_channel_id: None, + retry: Some(TestRouter::retry_for_invoice(&invoice)), + }; + invoice_payer.handle_event(&event); + assert_eq!(*event_handled.borrow(), true); + assert_eq!(*payer.attempts.borrow(), 2); + } + + #[test] + fn fails_paying_invoice_after_rejected_by_payee() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + let final_value_msat = invoice.amount_milli_satoshis().unwrap(); + + let payer = TestPayer::new().expect_send(Amount::ForInvoice(final_value_msat)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(2)); + + let payment_id = Some(invoice_payer.pay_invoice(&invoice).unwrap()); + assert_eq!(*payer.attempts.borrow(), 1); + + let event = Event::PaymentPathFailed { + payment_id, + payment_hash: PaymentHash(invoice.payment_hash().clone().into_inner()), + network_update: None, + rejected_by_dest: true, + all_paths_failed: false, + path: vec![], + short_channel_id: None, + retry: Some(TestRouter::retry_for_invoice(&invoice)), + }; + invoice_payer.handle_event(&event); + assert_eq!(*event_handled.borrow(), true); + assert_eq!(*payer.attempts.borrow(), 1); + } + + #[test] + fn fails_repaying_invoice_with_pending_payment() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + let final_value_msat = invoice.amount_milli_satoshis().unwrap(); + + let payer = TestPayer::new() + .expect_send(Amount::ForInvoice(final_value_msat)) + .expect_send(Amount::ForInvoice(final_value_msat)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(0)); + + let payment_id = Some(invoice_payer.pay_invoice(&invoice).unwrap()); + + // Cannot repay an invoice pending payment. + match invoice_payer.pay_invoice(&invoice) { + Err(PaymentError::Invoice("payment pending")) => {}, + Err(_) => panic!("unexpected error"), + Ok(_) => panic!("expected invoice error"), + } + + // Can repay an invoice once cleared from cache. + let payment_hash = PaymentHash(invoice.payment_hash().clone().into_inner()); + invoice_payer.remove_cached_payment(&payment_hash); + assert!(invoice_payer.pay_invoice(&invoice).is_ok()); + + // Cannot retry paying an invoice if cleared from cache. + invoice_payer.remove_cached_payment(&payment_hash); + let event = Event::PaymentPathFailed { + payment_id, + payment_hash, + network_update: None, + rejected_by_dest: false, + all_paths_failed: false, + path: vec![], + short_channel_id: None, + retry: Some(TestRouter::retry_for_invoice(&invoice)), + }; + invoice_payer.handle_event(&event); + assert_eq!(*event_handled.borrow(), true); + } + + #[test] + fn fails_paying_invoice_with_routing_errors() { + let payer = TestPayer::new(); + let router = FailingRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, |_: &_| {}, RetryAttempts(0)); + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + match invoice_payer.pay_invoice(&invoice) { + Err(PaymentError::Routing(_)) => {}, + Err(_) => panic!("unexpected error"), + Ok(_) => panic!("expected routing error"), + } + } + + #[test] + fn fails_paying_invoice_with_sending_errors() { + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + let final_value_msat = invoice.amount_milli_satoshis().unwrap(); + + let payer = TestPayer::new() + .fails_on_attempt(1) + .expect_send(Amount::ForInvoice(final_value_msat)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, |_: &_| {}, RetryAttempts(0)); + + match invoice_payer.pay_invoice(&invoice) { + Err(PaymentError::Sending(_)) => {}, + Err(_) => panic!("unexpected error"), + Ok(_) => panic!("expected sending error"), + } + } + + #[test] + fn pays_zero_value_invoice_using_amount() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = zero_value_invoice(payment_preimage); + let payment_hash = PaymentHash(invoice.payment_hash().clone().into_inner()); + let final_value_msat = 100; + + let payer = TestPayer::new().expect_send(Amount::ForInvoice(final_value_msat)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(0)); + + let payment_id = + Some(invoice_payer.pay_zero_value_invoice(&invoice, final_value_msat).unwrap()); + assert_eq!(*payer.attempts.borrow(), 1); + + invoice_payer.handle_event(&Event::PaymentSent { + payment_id, payment_preimage, payment_hash, fee_paid_msat: None + }); + assert_eq!(*event_handled.borrow(), true); + assert_eq!(*payer.attempts.borrow(), 1); + } + + #[test] + fn fails_paying_zero_value_invoice_with_amount() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payer = TestPayer::new(); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(0)); + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + + // Cannot repay an invoice pending payment. + match invoice_payer.pay_zero_value_invoice(&invoice, 100) { + Err(PaymentError::Invoice("amount unexpected")) => {}, + Err(_) => panic!("unexpected error"), + Ok(_) => panic!("expected invoice error"), + } + } + + #[test] + fn pays_pubkey_with_amount() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let pubkey = pubkey(); + let payment_preimage = PaymentPreimage([1; 32]); + let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); + let final_value_msat = 100; + let final_cltv_expiry_delta = 42; + + let payer = TestPayer::new() + .expect_send(Amount::Spontaneous(final_value_msat)) + .expect_send(Amount::OnRetry(final_value_msat)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new()); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(2)); + + let payment_id = Some(invoice_payer.pay_pubkey( + pubkey, payment_preimage, final_value_msat, final_cltv_expiry_delta + ).unwrap()); + assert_eq!(*payer.attempts.borrow(), 1); + + let retry = RouteParameters { + payee: Payee::for_keysend(pubkey), + final_value_msat, + final_cltv_expiry_delta, + }; + let event = Event::PaymentPathFailed { + payment_id, + payment_hash, + network_update: None, + rejected_by_dest: false, + all_paths_failed: false, + path: vec![], + short_channel_id: None, + retry: Some(retry), + }; + invoice_payer.handle_event(&event); + assert_eq!(*event_handled.borrow(), false); + assert_eq!(*payer.attempts.borrow(), 2); + + invoice_payer.handle_event(&Event::PaymentSent { + payment_id, payment_preimage, payment_hash, fee_paid_msat: None + }); + assert_eq!(*event_handled.borrow(), true); + assert_eq!(*payer.attempts.borrow(), 2); + } + + #[test] + fn scores_failed_channel() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + let payment_hash = PaymentHash(invoice.payment_hash().clone().into_inner()); + let final_value_msat = invoice.amount_milli_satoshis().unwrap(); + let path = TestRouter::path_for_value(final_value_msat); + let short_channel_id = Some(path[0].short_channel_id); + + // Expect that scorer is given short_channel_id upon handling the event. + let payer = TestPayer::new() + .expect_send(Amount::ForInvoice(final_value_msat)) + .expect_send(Amount::OnRetry(final_value_msat / 2)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new().expect(PaymentPath::Failure { + path: path.clone(), short_channel_id: path[0].short_channel_id, + })); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(2)); + + let payment_id = Some(invoice_payer.pay_invoice(&invoice).unwrap()); + let event = Event::PaymentPathFailed { + payment_id, + payment_hash, + network_update: None, + rejected_by_dest: false, + all_paths_failed: false, + path, + short_channel_id, + retry: Some(TestRouter::retry_for_invoice(&invoice)), + }; + invoice_payer.handle_event(&event); + } + + #[test] + fn scores_successful_channels() { + let event_handled = core::cell::RefCell::new(false); + let event_handler = |_: &_| { *event_handled.borrow_mut() = true; }; + + let payment_preimage = PaymentPreimage([1; 32]); + let invoice = invoice(payment_preimage); + let payment_hash = Some(PaymentHash(invoice.payment_hash().clone().into_inner())); + let final_value_msat = invoice.amount_milli_satoshis().unwrap(); + let route = TestRouter::route_for_value(final_value_msat); + + // Expect that scorer is given short_channel_id upon handling the event. + let payer = TestPayer::new().expect_send(Amount::ForInvoice(final_value_msat)); + let router = TestRouter {}; + let scorer = RefCell::new(TestScorer::new() + .expect(PaymentPath::Success { path: route.paths[0].clone() }) + .expect(PaymentPath::Success { path: route.paths[1].clone() }) + ); + let logger = TestLogger::new(); + let invoice_payer = + InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, RetryAttempts(2)); + + let payment_id = invoice_payer.pay_invoice(&invoice).unwrap(); + let event = Event::PaymentPathSuccessful { + payment_id, payment_hash, path: route.paths[0].clone() + }; + invoice_payer.handle_event(&event); + let event = Event::PaymentPathSuccessful { + payment_id, payment_hash, path: route.paths[1].clone() + }; + invoice_payer.handle_event(&event); + } + + struct TestRouter; + + impl TestRouter { + fn route_for_value(final_value_msat: u64) -> Route { + Route { + paths: vec![ + vec![RouteHop { + pubkey: PublicKey::from_slice(&hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(), + channel_features: ChannelFeatures::empty(), + node_features: NodeFeatures::empty(), + short_channel_id: 0, fee_msat: final_value_msat / 2, cltv_expiry_delta: 144 + }], + vec![RouteHop { + pubkey: PublicKey::from_slice(&hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(), + channel_features: ChannelFeatures::empty(), + node_features: NodeFeatures::empty(), + short_channel_id: 1, fee_msat: final_value_msat / 2, cltv_expiry_delta: 144 + }], + ], + payee: None, + } + } + + fn path_for_value(final_value_msat: u64) -> Vec { + TestRouter::route_for_value(final_value_msat).paths[0].clone() + } + + fn retry_for_invoice(invoice: &Invoice) -> RouteParameters { + let mut payee = Payee::from_node_id(invoice.recover_payee_pub_key()) + .with_expiry_time(expiry_time_from_unix_epoch(invoice).as_secs()) + .with_route_hints(invoice.route_hints()); + if let Some(features) = invoice.features() { + payee = payee.with_features(features.clone()); + } + let final_value_msat = invoice.amount_milli_satoshis().unwrap() / 2; + RouteParameters { + payee, + final_value_msat, + final_cltv_expiry_delta: invoice.min_final_cltv_expiry() as u32, + } + } + } + + impl Router for TestRouter { + fn find_route( + &self, _payer: &PublicKey, params: &RouteParameters, _payment_hash: &PaymentHash, + _first_hops: Option<&[&ChannelDetails]>, _scorer: &S + ) -> Result { + Ok(Route { + payee: Some(params.payee.clone()), ..Self::route_for_value(params.final_value_msat) + }) + } + } + + struct FailingRouter; + + impl Router for FailingRouter { + fn find_route( + &self, _payer: &PublicKey, _params: &RouteParameters, _payment_hash: &PaymentHash, + _first_hops: Option<&[&ChannelDetails]>, _scorer: &S + ) -> Result { + Err(LightningError { err: String::new(), action: ErrorAction::IgnoreError }) + } + } + + struct TestScorer { + expectations: Option>, + } + + #[derive(Debug)] + enum PaymentPath { + Failure { path: Vec, short_channel_id: u64 }, + Success { path: Vec }, + } + + impl TestScorer { + fn new() -> Self { + Self { + expectations: None, + } + } + + fn expect(mut self, expectation: PaymentPath) -> Self { + self.expectations.get_or_insert_with(|| VecDeque::new()).push_back(expectation); + self + } + } + + #[cfg(c_bindings)] + impl lightning::util::ser::Writeable for TestScorer { + fn write(&self, _: &mut W) -> Result<(), std::io::Error> { unreachable!(); } + } + + impl Score for TestScorer { + fn channel_penalty_msat( + &self, _short_channel_id: u64, _send_amt: u64, _chan_amt: Option, _source: &NodeId, _target: &NodeId + ) -> u64 { 0 } + + fn payment_path_failed(&mut self, actual_path: &[&RouteHop], actual_short_channel_id: u64) { + if let Some(expectations) = &mut self.expectations { + match expectations.pop_front() { + Some(PaymentPath::Failure { path, short_channel_id }) => { + assert_eq!(actual_path, &path.iter().collect::>()[..]); + assert_eq!(actual_short_channel_id, short_channel_id); + }, + Some(PaymentPath::Success { path }) => { + panic!("Unexpected successful payment path: {:?}", path) + }, + None => panic!("Unexpected payment_path_failed call: {:?}", actual_path), + } + } + } + + fn payment_path_successful(&mut self, actual_path: &[&RouteHop]) { + if let Some(expectations) = &mut self.expectations { + match expectations.pop_front() { + Some(PaymentPath::Failure { path, .. }) => { + panic!("Unexpected payment path failure: {:?}", path) + }, + Some(PaymentPath::Success { path }) => { + assert_eq!(actual_path, &path.iter().collect::>()[..]); + }, + None => panic!("Unexpected payment_path_successful call: {:?}", actual_path), + } + } + } + } + + impl Drop for TestScorer { + fn drop(&mut self) { + if std::thread::panicking() { + return; + } + + if let Some(expectations) = &self.expectations { + if !expectations.is_empty() { + panic!("Unsatisfied scorer expectations: {:?}", expectations); + } + } + } + } + + struct TestPayer { + expectations: core::cell::RefCell>, + attempts: core::cell::RefCell, + failing_on_attempt: core::cell::RefCell>, + } + + #[derive(Clone, Debug, PartialEq, Eq)] + enum Amount { + ForInvoice(u64), + Spontaneous(u64), + OnRetry(u64), + } + + struct OnAttempt(usize); + + impl TestPayer { + fn new() -> Self { + Self { + expectations: core::cell::RefCell::new(VecDeque::new()), + attempts: core::cell::RefCell::new(0), + failing_on_attempt: core::cell::RefCell::new(HashMap::new()), + } + } + + fn expect_send(self, value_msat: Amount) -> Self { + self.expectations.borrow_mut().push_back(value_msat); + self + } + + fn fails_on_attempt(self, attempt: usize) -> Self { + let failure = PaymentSendFailure::ParameterError(APIError::MonitorUpdateFailed); + self.fails_with(failure, OnAttempt(attempt)) + } + + fn fails_with_partial_failure(self, retry: RouteParameters, attempt: OnAttempt) -> Self { + self.fails_with(PaymentSendFailure::PartialFailure { + results: vec![], + failed_paths_retry: Some(retry), + payment_id: PaymentId([1; 32]), + }, attempt) + } + + fn fails_with(self, failure: PaymentSendFailure, attempt: OnAttempt) -> Self { + self.failing_on_attempt.borrow_mut().insert(attempt.0, failure); + self + } + + fn check_attempts(&self) -> Result { + let mut attempts = self.attempts.borrow_mut(); + *attempts += 1; + + match self.failing_on_attempt.borrow_mut().remove(&*attempts) { + Some(failure) => Err(failure), + None => Ok(PaymentId([1; 32])), + } + } + + fn check_value_msats(&self, actual_value_msats: Amount) { + let expected_value_msats = self.expectations.borrow_mut().pop_front(); + if let Some(expected_value_msats) = expected_value_msats { + assert_eq!(actual_value_msats, expected_value_msats); + } else { + panic!("Unexpected amount: {:?}", actual_value_msats); + } + } + } + + impl Drop for TestPayer { + fn drop(&mut self) { + if std::thread::panicking() { + return; + } + + if !self.expectations.borrow().is_empty() { + panic!("Unsatisfied payment expectations: {:?}", self.expectations.borrow()); + } + } + } + + impl Payer for TestPayer { + fn node_id(&self) -> PublicKey { + let secp_ctx = Secp256k1::new(); + PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()) + } + + fn first_hops(&self) -> Vec { + Vec::new() + } + + fn send_payment( + &self, route: &Route, _payment_hash: PaymentHash, + _payment_secret: &Option + ) -> Result { + self.check_value_msats(Amount::ForInvoice(route.get_total_amount())); + self.check_attempts() + } + + fn send_spontaneous_payment( + &self, route: &Route, _payment_preimage: PaymentPreimage, + ) -> Result { + self.check_value_msats(Amount::Spontaneous(route.get_total_amount())); + self.check_attempts() + } + + fn retry_payment( + &self, route: &Route, _payment_id: PaymentId + ) -> Result<(), PaymentSendFailure> { + self.check_value_msats(Amount::OnRetry(route.get_total_amount())); + self.check_attempts().map(|_| ()) + } + + fn abandon_payment(&self, _payment_id: PaymentId) { } + } + + // *** Full Featured Functional Tests with a Real ChannelManager *** + struct ManualRouter(RefCell>>); + + impl Router for ManualRouter { + fn find_route( + &self, _payer: &PublicKey, _params: &RouteParameters, _payment_hash: &PaymentHash, + _first_hops: Option<&[&ChannelDetails]>, _scorer: &S + ) -> Result { + self.0.borrow_mut().pop_front().unwrap() + } + } + impl ManualRouter { + fn expect_find_route(&self, result: Result) { + self.0.borrow_mut().push_back(result); + } + } + impl Drop for ManualRouter { + fn drop(&mut self) { + if std::thread::panicking() { + return; + } + assert!(self.0.borrow_mut().is_empty()); + } + } + + #[test] + fn retry_multi_path_single_failed_payment() { + // Tests that we can/will retry after a single path of an MPP payment failed immediately + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0, InitFeatures::known(), InitFeatures::known()); + let chans = nodes[0].node.list_usable_channels(); + let mut route = Route { + paths: vec![ + vec![RouteHop { + pubkey: nodes[1].node.get_our_node_id(), + node_features: NodeFeatures::known(), + short_channel_id: chans[0].short_channel_id.unwrap(), + channel_features: ChannelFeatures::known(), + fee_msat: 10_000, + cltv_expiry_delta: 100, + }], + vec![RouteHop { + pubkey: nodes[1].node.get_our_node_id(), + node_features: NodeFeatures::known(), + short_channel_id: chans[1].short_channel_id.unwrap(), + channel_features: ChannelFeatures::known(), + fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than + cltv_expiry_delta: 100, + }], + ], + payee: Some(Payee::from_node_id(nodes[1].node.get_our_node_id())), + }; + let router = ManualRouter(RefCell::new(VecDeque::new())); + router.expect_find_route(Ok(route.clone())); + // On retry, split the payment across both channels. + route.paths[0][0].fee_msat = 50_000_001; + route.paths[1][0].fee_msat = 50_000_000; + router.expect_find_route(Ok(route.clone())); + + let event_handler = |_: &_| { panic!(); }; + let scorer = RefCell::new(TestScorer::new()); + let invoice_payer = InvoicePayer::new(nodes[0].node, router, &scorer, nodes[0].logger, event_handler, RetryAttempts(1)); + + assert!(invoice_payer.pay_invoice(&create_invoice_from_channelmanager_and_duration_since_epoch( + &nodes[1].node, nodes[1].keys_manager, Currency::Bitcoin, Some(100_010_000), "Invoice".to_string(), + duration_since_epoch()).unwrap()) + .is_ok()); + let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(htlc_msgs.len(), 2); + check_added_monitors!(nodes[0], 2); + } + + #[test] + fn immediate_retry_on_failure() { + // Tests that we can/will retry immediately after a failure + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0, InitFeatures::known(), InitFeatures::known()); + let chans = nodes[0].node.list_usable_channels(); + let mut route = Route { + paths: vec![ + vec![RouteHop { + pubkey: nodes[1].node.get_our_node_id(), + node_features: NodeFeatures::known(), + short_channel_id: chans[0].short_channel_id.unwrap(), + channel_features: ChannelFeatures::known(), + fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than + cltv_expiry_delta: 100, + }], + ], + payee: Some(Payee::from_node_id(nodes[1].node.get_our_node_id())), + }; + let router = ManualRouter(RefCell::new(VecDeque::new())); + router.expect_find_route(Ok(route.clone())); + // On retry, split the payment across both channels. + route.paths.push(route.paths[0].clone()); + route.paths[0][0].short_channel_id = chans[1].short_channel_id.unwrap(); + route.paths[0][0].fee_msat = 50_000_000; + route.paths[1][0].fee_msat = 50_000_001; + router.expect_find_route(Ok(route.clone())); + + let event_handler = |_: &_| { panic!(); }; + let scorer = RefCell::new(TestScorer::new()); + let invoice_payer = InvoicePayer::new(nodes[0].node, router, &scorer, nodes[0].logger, event_handler, RetryAttempts(1)); + + assert!(invoice_payer.pay_invoice(&create_invoice_from_channelmanager_and_duration_since_epoch( + &nodes[1].node, nodes[1].keys_manager, Currency::Bitcoin, Some(100_010_000), "Invoice".to_string(), + duration_since_epoch()).unwrap()) + .is_ok()); + let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(htlc_msgs.len(), 2); + check_added_monitors!(nodes[0], 2); + } + + #[test] + fn no_extra_retries_on_back_to_back_fail() { + // In a previous release, we had a race where we may exceed the payment retry count if we + // get two failures in a row with the second having `all_paths_failed` set. + // Generally, when we give up trying to retry a payment, we don't know for sure what the + // current state of the ChannelManager event queue is. Specifically, we cannot be sure that + // there are not multiple additional `PaymentPathFailed` or even `PaymentSent` events + // pending which we will see later. Thus, when we previously removed the retry tracking map + // entry after a `all_paths_failed` `PaymentPathFailed` event, we may have dropped the + // retry entry even though more events for the same payment were still pending. This led to + // us retrying a payment again even though we'd already given up on it. + // + // We now have a separate event - `PaymentFailed` which indicates no HTLCs remain and which + // is used to remove the payment retry counter entries instead. This tests for the specific + // excess-retry case while also testing `PaymentFailed` generation. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_2_scid = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + + let mut route = Route { + paths: vec![ + vec![RouteHop { + pubkey: nodes[1].node.get_our_node_id(), + node_features: NodeFeatures::known(), + short_channel_id: chan_1_scid, + channel_features: ChannelFeatures::known(), + fee_msat: 0, + cltv_expiry_delta: 100, + }, RouteHop { + pubkey: nodes[2].node.get_our_node_id(), + node_features: NodeFeatures::known(), + short_channel_id: chan_2_scid, + channel_features: ChannelFeatures::known(), + fee_msat: 100_000_000, + cltv_expiry_delta: 100, + }], + vec![RouteHop { + pubkey: nodes[1].node.get_our_node_id(), + node_features: NodeFeatures::known(), + short_channel_id: chan_1_scid, + channel_features: ChannelFeatures::known(), + fee_msat: 0, + cltv_expiry_delta: 100, + }, RouteHop { + pubkey: nodes[2].node.get_our_node_id(), + node_features: NodeFeatures::known(), + short_channel_id: chan_2_scid, + channel_features: ChannelFeatures::known(), + fee_msat: 100_000_000, + cltv_expiry_delta: 100, + }] + ], + payee: Some(Payee::from_node_id(nodes[2].node.get_our_node_id())), + }; + let router = ManualRouter(RefCell::new(VecDeque::new())); + router.expect_find_route(Ok(route.clone())); + // On retry, we'll only be asked for one path + route.paths.remove(1); + router.expect_find_route(Ok(route.clone())); + + let expected_events: RefCell> = RefCell::new(VecDeque::new()); + let event_handler = |event: &Event| { + let event_checker = expected_events.borrow_mut().pop_front().unwrap(); + event_checker(event); + }; + let scorer = RefCell::new(TestScorer::new()); + let invoice_payer = InvoicePayer::new(nodes[0].node, router, &scorer, nodes[0].logger, event_handler, RetryAttempts(1)); + + assert!(invoice_payer.pay_invoice(&create_invoice_from_channelmanager_and_duration_since_epoch( + &nodes[1].node, nodes[1].keys_manager, Currency::Bitcoin, Some(100_010_000), "Invoice".to_string(), + duration_since_epoch()).unwrap()) + .is_ok()); + let htlc_updates = SendEvent::from_node(&nodes[0]); + check_added_monitors!(nodes[0], 1); + assert_eq!(htlc_updates.msgs.len(), 1); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &htlc_updates.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &htlc_updates.commitment_msg); + check_added_monitors!(nodes[1], 1); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa); + check_added_monitors!(nodes[0], 1); + let second_htlc_updates = SendEvent::from_node(&nodes[0]); + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs); + check_added_monitors!(nodes[0], 1); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg); + check_added_monitors!(nodes[1], 1); + let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa); + check_added_monitors!(nodes[1], 1); + let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa); + check_added_monitors!(nodes[0], 1); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_fail_update.commitment_signed); + check_added_monitors!(nodes[0], 1); + let (as_second_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa); + check_added_monitors!(nodes[1], 1); + let bs_second_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_third_cs); + check_added_monitors!(nodes[1], 1); + let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_second_fail_update.update_fail_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_fail_update.commitment_signed); + check_added_monitors!(nodes[0], 1); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa); + check_added_monitors!(nodes[0], 1); + let (as_third_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_third_raa); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_fourth_cs); + check_added_monitors!(nodes[1], 1); + let bs_fourth_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_fourth_raa); + check_added_monitors!(nodes[0], 1); + + // At this point A has sent two HTLCs which both failed due to lack of fee. It now has two + // pending `PaymentPathFailed` events, one with `all_paths_failed` unset, and the second + // with it set. The first event will use up the only retry we are allowed, with the second + // `PaymentPathFailed` being passed up to the user (us, in this case). Previously, we'd + // treated this as "HTLC complete" and dropped the retry counter, causing us to retry again + // if the final HTLC failed. + expected_events.borrow_mut().push_back(&|ev: &Event| { + if let Event::PaymentPathFailed { rejected_by_dest, all_paths_failed, .. } = ev { + assert!(!rejected_by_dest); + assert!(all_paths_failed); + } else { panic!("Unexpected event"); } + }); + nodes[0].node.process_pending_events(&invoice_payer); + assert!(expected_events.borrow().is_empty()); + + let retry_htlc_updates = SendEvent::from_node(&nodes[0]); + check_added_monitors!(nodes[0], 1); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &retry_htlc_updates.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true); + let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], &bs_fail_update.commitment_signed, false, true); + + expected_events.borrow_mut().push_back(&|ev: &Event| { + if let Event::PaymentPathFailed { rejected_by_dest, all_paths_failed, .. } = ev { + assert!(!rejected_by_dest); + assert!(all_paths_failed); + } else { panic!("Unexpected event"); } + }); + expected_events.borrow_mut().push_back(&|ev: &Event| { + if let Event::PaymentFailed { .. } = ev { + } else { panic!("Unexpected event"); } + }); + nodes[0].node.process_pending_events(&invoice_payer); + assert!(expected_events.borrow().is_empty()); + } +} diff --git a/lightning-invoice/src/ser.rs b/lightning-invoice/src/ser.rs index 7d9ca9eb895..f921a5b0f69 100644 --- a/lightning-invoice/src/ser.rs +++ b/lightning-invoice/src/ser.rs @@ -1,6 +1,7 @@ -use std::fmt; -use std::fmt::{Display, Formatter}; +use core::fmt; +use core::fmt::{Display, Formatter}; use bech32::{ToBase32, u5, WriteBase32, Base32Len}; +use crate::prelude::*; use super::{Invoice, Sha256, TaggedField, ExpiryTime, MinFinalCltvExpiry, Fallback, PayeePubKey, InvoiceSignature, PositiveTimestamp, PrivateRoute, Description, RawTaggedField, Currency, RawHrp, SiPrefix, constants, SignedRawInvoice, RawDataPart}; @@ -64,7 +65,7 @@ impl<'a, W: WriteBase32> BytesToBase32<'a, W> { pub fn finalize(mut self) -> Result<(), W::Err> { self.inner_finalize()?; - std::mem::forget(self); + core::mem::forget(self); Ok(()) } diff --git a/lightning-invoice/src/sync.rs b/lightning-invoice/src/sync.rs new file mode 100644 index 00000000000..fae923feb65 --- /dev/null +++ b/lightning-invoice/src/sync.rs @@ -0,0 +1,37 @@ +use core::cell::{RefCell, RefMut}; +use core::ops::{Deref, DerefMut}; + +pub type LockResult = Result; + +pub struct Mutex { + inner: RefCell +} + +#[must_use = "if unused the Mutex will immediately unlock"] +pub struct MutexGuard<'a, T: ?Sized + 'a> { + lock: RefMut<'a, T>, +} + +impl Deref for MutexGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + &self.lock.deref() + } +} + +impl DerefMut for MutexGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + self.lock.deref_mut() + } +} + +impl Mutex { + pub fn new(inner: T) -> Mutex { + Mutex { inner: RefCell::new(inner) } + } + + pub fn lock<'a>(&'a self) -> LockResult> { + Ok(MutexGuard { lock: self.inner.borrow_mut() }) + } +} diff --git a/lightning-invoice/src/utils.rs b/lightning-invoice/src/utils.rs index 409fa803cab..ffa84c98ef6 100644 --- a/lightning-invoice/src/utils.rs +++ b/lightning-invoice/src/utils.rs @@ -1,17 +1,27 @@ //! Convenient utilities to create an invoice. -use {Currency, DEFAULT_EXPIRY_TIME, Invoice, InvoiceBuilder, SignOrCreationError, RawInvoice}; + +use {CreationError, Currency, DEFAULT_EXPIRY_TIME, Invoice, InvoiceBuilder, SignOrCreationError, RawInvoice}; +use payment::{Payer, Router}; + use bech32::ToBase32; use bitcoin_hashes::Hash; +use crate::prelude::*; use lightning::chain; use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; use lightning::chain::keysinterface::{Sign, KeysInterface}; -use lightning::ln::channelmanager::{ChannelManager, MIN_FINAL_CLTV_EXPIRY}; -use lightning::routing::network_graph::RoutingFees; -use lightning::routing::router::{RouteHint, RouteHintHop}; +use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::ln::channelmanager::{ChannelDetails, ChannelManager, PaymentId, PaymentSendFailure, MIN_FINAL_CLTV_EXPIRY}; +use lightning::ln::msgs::LightningError; +use lightning::routing::scoring::Score; +use lightning::routing::network_graph::{NetworkGraph, RoutingFees}; +use lightning::routing::router::{Route, RouteHint, RouteHintHop, RouteParameters, find_route}; use lightning::util::logger::Logger; -use std::convert::TryInto; -use std::ops::Deref; +use secp256k1::key::PublicKey; +use core::convert::TryInto; +use core::ops::Deref; +use core::time::Duration; +#[cfg(feature = "std")] /// Utility to construct an invoice. Generally, unless you want to do something like a custom /// cltv_expiry, this is what you should be using to create an invoice. The reason being, this /// method stores the invoice's payment secret and preimage in `ChannelManager`, so (a) the user @@ -21,6 +31,33 @@ pub fn create_invoice_from_channelmanager, keys_manager: K, network: Currency, amt_msat: Option, description: String ) -> Result> +where + M::Target: chain::Watch, + T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, + L::Target: Logger, +{ + use std::time::SystemTime; + let duration = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) + .expect("for the foreseeable future this shouldn't happen"); + create_invoice_from_channelmanager_and_duration_since_epoch( + channelmanager, + keys_manager, + network, + amt_msat, + description, + duration + ) +} + +/// See [`create_invoice_from_channelmanager`] +/// This version can be used in a `no_std` environment, where [`std::time::SystemTime`] is not +/// available and the current time is supplied by the caller. +pub fn create_invoice_from_channelmanager_and_duration_since_epoch( + channelmanager: &ChannelManager, keys_manager: K, network: Currency, + amt_msat: Option, description: String, duration_since_epoch: Duration, +) -> Result> where M::Target: chain::Watch, T::Target: BroadcasterInterface, @@ -53,15 +90,15 @@ where }])); } + // `create_inbound_payment` only returns an error if the amount is greater than the total bitcoin + // supply. let (payment_hash, payment_secret) = channelmanager.create_inbound_payment( - amt_msat, - DEFAULT_EXPIRY_TIME.try_into().unwrap(), - 0, - ); + amt_msat, DEFAULT_EXPIRY_TIME.try_into().unwrap()) + .map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?; let our_node_pubkey = channelmanager.get_our_node_id(); let mut invoice = InvoiceBuilder::new(network) .description(description) - .current_timestamp() + .duration_since_epoch(duration_since_epoch) .payee_pub_key(our_node_pubkey) .payment_hash(Hash::from_slice(&payment_hash.0).unwrap()) .payment_secret(payment_secret) @@ -89,17 +126,83 @@ where } } +/// A [`Router`] implemented using [`find_route`]. +pub struct DefaultRouter, L: Deref> where L::Target: Logger { + network_graph: G, + logger: L, +} + +impl, L: Deref> DefaultRouter where L::Target: Logger { + /// Creates a new router using the given [`NetworkGraph`] and [`Logger`]. + pub fn new(network_graph: G, logger: L) -> Self { + Self { network_graph, logger } + } +} + +impl, L: Deref, S: Score> Router for DefaultRouter +where L::Target: Logger { + fn find_route( + &self, payer: &PublicKey, params: &RouteParameters, _payment_hash: &PaymentHash, + first_hops: Option<&[&ChannelDetails]>, scorer: &S + ) -> Result { + find_route(payer, params, &*self.network_graph, first_hops, &*self.logger, scorer) + } +} + +impl Payer for ChannelManager +where + M::Target: chain::Watch, + T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, + L::Target: Logger, +{ + fn node_id(&self) -> PublicKey { + self.get_our_node_id() + } + + fn first_hops(&self) -> Vec { + self.list_usable_channels() + } + + fn send_payment( + &self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option + ) -> Result { + self.send_payment(route, payment_hash, payment_secret) + } + + fn send_spontaneous_payment( + &self, route: &Route, payment_preimage: PaymentPreimage, + ) -> Result { + self.send_spontaneous_payment(route, Some(payment_preimage)) + .map(|(_, payment_id)| payment_id) + } + + fn retry_payment( + &self, route: &Route, payment_id: PaymentId + ) -> Result<(), PaymentSendFailure> { + self.retry_payment(route, payment_id) + } + + fn abandon_payment(&self, payment_id: PaymentId) { + self.abandon_payment(payment_id) + } +} + #[cfg(test)] mod test { + use core::time::Duration; use {Currency, Description, InvoiceDescription}; use lightning::ln::PaymentHash; use lightning::ln::channelmanager::MIN_FINAL_CLTV_EXPIRY; use lightning::ln::functional_test_utils::*; use lightning::ln::features::InitFeatures; use lightning::ln::msgs::ChannelMessageHandler; - use lightning::routing::router; + use lightning::routing::router::{Payee, RouteParameters, find_route}; use lightning::util::events::MessageSendEventsProvider; use lightning::util::test_utils; + use utils::create_invoice_from_channelmanager_and_duration_since_epoch; + #[test] fn test_from_channelmanager() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -107,26 +210,28 @@ mod test { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let _chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let invoice = ::utils::create_invoice_from_channelmanager(&nodes[1].node, nodes[1].keys_manager, Currency::BitcoinTestnet, Some(10_000), "test".to_string()).unwrap(); + let invoice = create_invoice_from_channelmanager_and_duration_since_epoch( + &nodes[1].node, nodes[1].keys_manager, Currency::BitcoinTestnet, Some(10_000), "test".to_string(), + Duration::from_secs(1234567)).unwrap(); assert_eq!(invoice.amount_pico_btc(), Some(100_000)); assert_eq!(invoice.min_final_cltv_expiry(), MIN_FINAL_CLTV_EXPIRY as u64); assert_eq!(invoice.description(), InvoiceDescription::Direct(&Description("test".to_string()))); - let amt_msat = invoice.amount_pico_btc().unwrap() / 10; + let payee = Payee::from_node_id(invoice.recover_payee_pub_key()) + .with_features(invoice.features().unwrap().clone()) + .with_route_hints(invoice.route_hints()); + let params = RouteParameters { + payee, + final_value_msat: invoice.amount_milli_satoshis().unwrap(), + final_cltv_expiry_delta: invoice.min_final_cltv_expiry() as u32, + }; let first_hops = nodes[0].node.list_usable_channels(); - let last_hops = invoice.route_hints(); - let network_graph = &nodes[0].net_graph_msg_handler.network_graph; + let network_graph = node_cfgs[0].network_graph; let logger = test_utils::TestLogger::new(); - let route = router::get_route( - &nodes[0].node.get_our_node_id(), - network_graph, - &invoice.recover_payee_pub_key(), - Some(invoice.features().unwrap().clone()), - Some(&first_hops.iter().collect::>()), - &last_hops, - amt_msat, - invoice.min_final_cltv_expiry() as u32, - &logger, + let scorer = test_utils::TestScorer::with_fixed_penalty(0); + let route = find_route( + &nodes[0].node.get_our_node_id(), ¶ms, network_graph, + Some(&first_hops.iter().collect::>()), &logger, &scorer, ).unwrap(); let payment_event = { diff --git a/lightning-invoice/tests/ser_de.rs b/lightning-invoice/tests/ser_de.rs index a2cc0e2e2b6..1eaeb313785 100644 --- a/lightning-invoice/tests/ser_de.rs +++ b/lightning-invoice/tests/ser_de.rs @@ -15,7 +15,7 @@ use lightning_invoice::*; use secp256k1::PublicKey; use secp256k1::recovery::{RecoverableSignature, RecoveryId}; use std::collections::HashSet; -use std::time::{Duration, UNIX_EPOCH}; +use std::time::Duration; use std::str::FromStr; fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { @@ -23,7 +23,7 @@ fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { ( "lnbc1pvjluezsp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdpl2pkx2ctnv5sxxmmwwd5kgetjypeh2ursdae8g6twvus8g6rfwvs8qun0dfjkxaq9qrsgq357wnc5r2ueh7ck6q93dj32dlqnls087fxdwk8qakdyafkq3yap9us6v52vjjsrvywa6rt52cm9r9zqt8r2t7mlcwspyetp5h2tztugp9lfyql".to_owned(), InvoiceBuilder::new(Currency::Bitcoin) - .timestamp(UNIX_EPOCH + Duration::from_secs(1496314658)) + .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) .payment_hash(sha256::Hash::from_hex( "0001020304050607080900010203040506070809000102030405060708090102" @@ -44,7 +44,7 @@ fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { "lnbc2500u1pvjluezsp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5xysxxatsyp3k7enxv4jsxqzpu9qrsgquk0rl77nj30yxdy8j9vdx85fkpmdla2087ne0xh8nhedh8w27kyke0lp53ut353s06fv3qfegext0eh0ymjpf39tuven09sam30g4vgpfna3rh".to_owned(), InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(250_000_000) - .timestamp(UNIX_EPOCH + Duration::from_secs(1496314658)) + .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) .payment_hash(sha256::Hash::from_hex( "0001020304050607080900010203040506070809000102030405060708090102" @@ -66,7 +66,7 @@ fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { "lnbc2500u1pvjluezsp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdpquwpc4curk03c9wlrswe78q4eyqc7d8d0xqzpu9qrsgqhtjpauu9ur7fw2thcl4y9vfvh4m9wlfyz2gem29g5ghe2aak2pm3ps8fdhtceqsaagty2vph7utlgj48u0ged6a337aewvraedendscp573dxr".to_owned(), InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(250_000_000) - .timestamp(UNIX_EPOCH + Duration::from_secs(1496314658)) + .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) .payment_hash(sha256::Hash::from_hex( "0001020304050607080900010203040506070809000102030405060708090102" @@ -88,7 +88,7 @@ fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { "lnbc20m1pvjluezsp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqs9qrsgq7ea976txfraylvgzuxs8kgcw23ezlrszfnh8r6qtfpr6cxga50aj6txm9rxrydzd06dfeawfk6swupvz4erwnyutnjq7x39ymw6j38gp7ynn44".to_owned(), InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(2_000_000_000) - .timestamp(UNIX_EPOCH + Duration::from_secs(1496314658)) + .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) .payment_hash(sha256::Hash::from_hex( @@ -109,7 +109,7 @@ fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { "lntb20m1pvjluezsp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygshp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqfpp3x9et2e20v6pu37c5d9vax37wxq72un989qrsgqdj545axuxtnfemtpwkc45hx9d2ft7x04mt8q7y6t0k2dge9e7h8kpy9p34ytyslj3yu569aalz2xdk8xkd7ltxqld94u8h2esmsmacgpghe9k8".to_owned(), InvoiceBuilder::new(Currency::BitcoinTestnet) .amount_milli_satoshis(2_000_000_000) - .timestamp(UNIX_EPOCH + Duration::from_secs(1496314658)) + .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) .payment_hash(sha256::Hash::from_hex( @@ -131,7 +131,7 @@ fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { "lnbc20m1pvjluezsp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqsfpp3qjmp7lwpagxun9pygexvgpjdc4jdj85fr9yq20q82gphp2nflc7jtzrcazrra7wwgzxqc8u7754cdlpfrmccae92qgzqvzq2ps8pqqqqqqpqqqqq9qqqvpeuqafqxu92d8lr6fvg0r5gv0heeeqgcrqlnm6jhphu9y00rrhy4grqszsvpcgpy9qqqqqqgqqqqq7qqzq9qrsgqdfjcdk6w3ak5pca9hwfwfh63zrrz06wwfya0ydlzpgzxkn5xagsqz7x9j4jwe7yj7vaf2k9lqsdk45kts2fd0fkr28am0u4w95tt2nsq76cqw0".to_owned(), InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(2_000_000_000) - .timestamp(UNIX_EPOCH + Duration::from_secs(1496314658)) + .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) .payment_hash(sha256::Hash::from_hex( @@ -170,7 +170,7 @@ fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { "lnbc20m1pvjluezsp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygshp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqfppj3a24vwu6r8ejrss3axul8rxldph2q7z99qrsgqz6qsgww34xlatfj6e3sngrwfy3ytkt29d2qttr8qz2mnedfqysuqypgqex4haa2h8fx3wnypranf3pdwyluftwe680jjcfp438u82xqphf75ym".to_owned(), InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(2_000_000_000) - .timestamp(UNIX_EPOCH + Duration::from_secs(1496314658)) + .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) .payment_hash(sha256::Hash::from_hex( @@ -192,7 +192,7 @@ fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { "lnbc20m1pvjluezsp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygshp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqfppqw508d6qejxtdg4y5r3zarvary0c5xw7k9qrsgqt29a0wturnys2hhxpner2e3plp6jyj8qx7548zr2z7ptgjjc7hljm98xhjym0dg52sdrvqamxdezkmqg4gdrvwwnf0kv2jdfnl4xatsqmrnsse".to_owned(), InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(2_000_000_000) - .timestamp(UNIX_EPOCH + Duration::from_secs(1496314658)) + .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) .payment_hash(sha256::Hash::from_hex( @@ -216,7 +216,7 @@ fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { "lnbc20m1pvjluezsp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygshp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqfp4qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q9qrsgq9vlvyj8cqvq6ggvpwd53jncp9nwc47xlrsnenq2zp70fq83qlgesn4u3uyf4tesfkkwwfg3qs54qe426hp3tz7z6sweqdjg05axsrjqp9yrrwc".to_owned(), InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(2_000_000_000) - .timestamp(UNIX_EPOCH + Duration::from_secs(1496314658)) + .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) .payment_hash(sha256::Hash::from_hex( @@ -240,7 +240,7 @@ fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { "lnbc9678785340p1pwmna7lpp5gc3xfm08u9qy06djf8dfflhugl6p7lgza6dsjxq454gxhj9t7a0sd8dgfkx7cmtwd68yetpd5s9xar0wfjn5gpc8qhrsdfq24f5ggrxdaezqsnvda3kkum5wfjkzmfqf3jkgem9wgsyuctwdus9xgrcyqcjcgpzgfskx6eqf9hzqnteypzxz7fzypfhg6trddjhygrcyqezcgpzfysywmm5ypxxjemgw3hxjmn8yptk7untd9hxwg3q2d6xjcmtv4ezq7pqxgsxzmnyyqcjqmt0wfjjq6t5v4khxsp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygsxqyjw5qcqp2rzjq0gxwkzc8w6323m55m4jyxcjwmy7stt9hwkwe2qxmy8zpsgg7jcuwz87fcqqeuqqqyqqqqlgqqqqn3qq9q9qrsgqrvgkpnmps664wgkp43l22qsgdw4ve24aca4nymnxddlnp8vh9v2sdxlu5ywdxefsfvm0fq3sesf08uf6q9a2ke0hc9j6z6wlxg5z5kqpu2v9wz".to_owned(), InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(967878534) - .timestamp(UNIX_EPOCH + Duration::from_secs(1572468703)) + .duration_since_epoch(Duration::from_secs(1572468703)) .payment_secret(PaymentSecret([0x11; 32])) .payment_hash(sha256::Hash::from_hex( "462264ede7e14047e9b249da94fefc47f41f7d02ee9b091815a5506bc8abf75f" @@ -272,7 +272,7 @@ fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { "lnbc25m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5vdhkven9v5sxyetpdeessp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygs9q5sqqqqqqqqqqqqqqqqsgq2a25dxl5hrntdtn6zvydt7d66hyzsyhqs4wdynavys42xgl6sgx9c4g7me86a27t07mdtfry458rtjr0v92cnmswpsjscgt2vcse3sgpz3uapa".to_owned(), InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(2_500_000_000) - .timestamp(UNIX_EPOCH + Duration::from_secs(1496314658)) + .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) .payment_hash(sha256::Hash::from_hex( "0001020304050607080900010203040506070809000102030405060708090102" @@ -293,7 +293,7 @@ fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { "LNBC25M1PVJLUEZPP5QQQSYQCYQ5RQWZQFQQQSYQCYQ5RQWZQFQQQSYQCYQ5RQWZQFQYPQDQ5VDHKVEN9V5SXYETPDEESSP5ZYG3ZYG3ZYG3ZYG3ZYG3ZYG3ZYG3ZYG3ZYG3ZYG3ZYG3ZYG3ZYGS9Q5SQQQQQQQQQQQQQQQQSGQ2A25DXL5HRNTDTN6ZVYDT7D66HYZSYHQS4WDYNAVYS42XGL6SGX9C4G7ME86A27T07MDTFRY458RTJR0V92CNMSWPSJSCGT2VCSE3SGPZ3UAPA".to_owned(), InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(2_500_000_000) - .timestamp(UNIX_EPOCH + Duration::from_secs(1496314658)) + .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) .payment_hash(sha256::Hash::from_hex( "0001020304050607080900010203040506070809000102030405060708090102" @@ -314,7 +314,7 @@ fn get_test_tuples() -> Vec<(String, SignedRawInvoice, bool, bool)> { "lnbc25m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5vdhkven9v5sxyetpdeessp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygs9q5sqqqqqqqqqqqqqqqqsgq2qrqqqfppnqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqppnqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqpp4qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqhpnqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqhp4qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqspnqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsp4qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqnp5qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqnpkqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqz599y53s3ujmcfjp5xrdap68qxymkqphwsexhmhr8wdz5usdzkzrse33chw6dlp3jhuhge9ley7j2ayx36kawe7kmgg8sv5ugdyusdcqzn8z9x".to_owned(), InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(2_500_000_000) - .timestamp(UNIX_EPOCH + Duration::from_secs(1496314658)) + .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) .payment_hash(sha256::Hash::from_hex( "0001020304050607080900010203040506070809000102030405060708090102" diff --git a/lightning-net-tokio/Cargo.toml b/lightning-net-tokio/Cargo.toml index 053766e10f3..185e5d128d5 100644 --- a/lightning-net-tokio/Cargo.toml +++ b/lightning-net-tokio/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "lightning-net-tokio" -version = "0.0.100" +version = "0.0.104" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-bitcoin/rust-lightning/" +repository = "https://github.com/lightningdevkit/rust-lightning/" description = """ Implementation of the rust-lightning network stack using Tokio. For Rust-Lightning clients which wish to make direct connections to Lightning P2P nodes, this is a simple alternative to implementing the required network stack, especially for those already using Tokio. @@ -12,7 +12,7 @@ edition = "2018" [dependencies] bitcoin = "0.27" -lightning = { version = "0.0.100", path = "../lightning" } +lightning = { version = "0.0.104", path = "../lightning" } tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net", "time" ] } [dev-dependencies] diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 13a4c0d934f..62c036b9796 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -34,7 +34,7 @@ //! type Logger = dyn lightning::util::logger::Logger + Send + Sync; //! type ChainAccess = dyn lightning::chain::Access + Send + Sync; //! type ChainFilter = dyn lightning::chain::Filter + Send + Sync; -//! type DataPersister = dyn lightning::chain::channelmonitor::Persist + Send + Sync; +//! type DataPersister = dyn lightning::chain::chainmonitor::Persist + Send + Sync; //! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor, Arc, Arc, Arc, Arc>; //! type ChannelManager = Arc>; //! type PeerManager = Arc>; diff --git a/lightning-persister/Cargo.toml b/lightning-persister/Cargo.toml index cc585b9c0e8..27f772cf326 100644 --- a/lightning-persister/Cargo.toml +++ b/lightning-persister/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "lightning-persister" -version = "0.0.100" +version = "0.0.104" authors = ["Valentine Wallace", "Matt Corallo"] license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-bitcoin/rust-lightning/" +repository = "https://github.com/lightningdevkit/rust-lightning/" description = """ Utilities to manage Rust-Lightning channel data persistence and retrieval. """ @@ -13,11 +13,11 @@ unstable = ["lightning/unstable"] [dependencies] bitcoin = "0.27" -lightning = { version = "0.0.100", path = "../lightning" } +lightning = { version = "0.0.104", path = "../lightning" } libc = "0.2" [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { version = "0.0.100", path = "../lightning", features = ["_test_utils"] } +lightning = { version = "0.0.104", path = "../lightning", features = ["_test_utils"] } diff --git a/lightning-persister/src/lib.rs b/lightning-persister/src/lib.rs index d68a06d71b4..b853b5796b6 100644 --- a/lightning-persister/src/lib.rs +++ b/lightning-persister/src/lib.rs @@ -17,8 +17,8 @@ use bitcoin::hashes::hex::{FromHex, ToHex}; use crate::util::DiskWriteable; use lightning::chain; use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; -use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr}; -use lightning::chain::channelmonitor; +use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate}; +use lightning::chain::chainmonitor; use lightning::chain::keysinterface::{Sign, KeysInterface}; use lightning::chain::transaction::OutPoint; use lightning::ln::channelmanager::ChannelManager; @@ -158,17 +158,22 @@ impl FilesystemPersister { } } -impl channelmonitor::Persist for FilesystemPersister { - fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> { +impl chainmonitor::Persist for FilesystemPersister { + // TODO: We really need a way for the persister to inform the user that its time to crash/shut + // down once these start returning failure. + // A PermanentFailure implies we need to shut down since we're force-closing channels without + // even broadcasting! + + fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor, _update_id: chainmonitor::MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> { let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index); util::write_to_file(self.path_to_monitor_data(), filename, monitor) - .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure) + .map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure) } - fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &ChannelMonitorUpdate, monitor: &ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> { + fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &Option, monitor: &ChannelMonitor, _update_id: chainmonitor::MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> { let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index); util::write_to_file(self.path_to_monitor_data(), filename, monitor) - .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure) + .map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure) } } @@ -180,13 +185,13 @@ mod tests { use bitcoin::blockdata::block::{Block, BlockHeader}; use bitcoin::hashes::hex::FromHex; use bitcoin::Txid; - use lightning::chain::channelmonitor::{Persist, ChannelMonitorUpdateErr}; + use lightning::chain::ChannelMonitorUpdateErr; + use lightning::chain::chainmonitor::Persist; use lightning::chain::transaction::OutPoint; - use lightning::{check_closed_broadcast, check_added_monitors}; + use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors}; use lightning::ln::features::InitFeatures; use lightning::ln::functional_test_utils::*; - use lightning::ln::msgs::ErrorAction; - use lightning::util::events::{MessageSendEventsProvider, MessageSendEvent}; + use lightning::util::events::{ClosureReason, MessageSendEventsProvider}; use lightning::util::test_utils; use std::fs; #[cfg(target_os = "windows")] @@ -259,6 +264,7 @@ mod tests { // Force close because cooperative close doesn't result in any persisted // updates. nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap(); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -268,6 +274,7 @@ mod tests { let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]}); check_closed_broadcast!(nodes[1], true); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); check_added_monitors!(nodes[1], 1); // Make sure everything is persisted as expected after close. @@ -291,7 +298,10 @@ mod tests { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); nodes[1].node.force_close_channel(&chan.2).unwrap(); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); + let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); + let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap(); // Set the persister's directory to read-only, which should result in // returning a permanent failure when we then attempt to persist a @@ -305,7 +315,7 @@ mod tests { txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 }; - match persister.persist_new_channel(test_txo, &added_monitors[0].1) { + match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) { Err(ChannelMonitorUpdateErr::PermanentFailure) => {}, _ => panic!("unexpected result from persisting new channel") } @@ -327,7 +337,10 @@ mod tests { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); nodes[1].node.force_close_channel(&chan.2).unwrap(); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); + let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); + let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap(); // Create the persister with an invalid directory name and test that the // channel fails to open because the directories fail to be created. There @@ -339,7 +352,7 @@ mod tests { txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 }; - match persister.persist_new_channel(test_txo, &added_monitors[0].1) { + match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) { Err(ChannelMonitorUpdateErr::PermanentFailure) => {}, _ => panic!("unexpected result from persisting new channel") } diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index 1ef4c74f991..55ee177996c 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "lightning" -version = "0.0.100" +version = "0.0.104" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-bitcoin/rust-lightning/" +repository = "https://github.com/lightningdevkit/rust-lightning/" description = """ A Bitcoin Lightning library in Rust. Does most of the hard work, without implying a specific runtime, requiring clients implement basic network logic, chain interactions and disk storage. @@ -11,7 +11,6 @@ Still missing tons of error-handling. See GitHub issues for suggested projects i """ [features] -allow_wallclock_use = [] fuzztarget = ["bitcoin/fuzztarget", "regex"] # Internal test utilities exposed to other repo crates _test_utils = ["hex", "regex", "bitcoin/bitcoinconsensus"] @@ -21,6 +20,7 @@ max_level_error = [] max_level_warn = [] max_level_info = [] max_level_debug = [] +max_level_trace = [] # Allow signing of local transactions that may have been revoked or will be revoked, for functional testing (e.g. justice tx handling). # This is unsafe to use in production because it may result in the counterparty publishing taking our funds. unsafe_revoked_tx_signing = [] @@ -39,6 +39,7 @@ secp256k1 = { version = "0.20.2", default-features = false, features = ["alloc"] hashbrown = { version = "0.11", optional = true } hex = { version = "0.3", optional = true } regex = { version = "0.1.80", optional = true } +backtrace = { version = "0.3", optional = true } core2 = { version = "0.3.0", optional = true, default-features = false } @@ -52,6 +53,3 @@ secp256k1 = { version = "0.20.2", default-features = false, features = ["alloc"] version = "0.27" default-features = false features = ["bitcoinconsensus", "secp-recovery"] - -[package.metadata.docs.rs] -features = ["allow_wallclock_use"] # When https://github.com/rust-lang/rust/issues/43781 complies with our MSVR, we can add nice banners in the docs for the methods behind this feature-gate. diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 8ccfb945543..9a45a88ffe4 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -41,12 +41,12 @@ pub enum ConfirmationTarget { pub trait FeeEstimator { /// Gets estimated satoshis of fee required per 1000 Weight-Units. /// - /// Must be no smaller than 253 (ie 1 satoshi-per-byte rounded up to ensure later round-downs - /// don't put us below 1 satoshi-per-byte). + /// Must return a value no smaller than 253 (ie 1 satoshi-per-byte rounded up to ensure later + /// round-downs don't put us below 1 satoshi-per-byte). /// - /// This translates to: - /// * satoshis-per-byte * 250 - /// * ceil(satoshis-per-kbyte / 4) + /// This method can be implemented with the following unit conversions: + /// * max(satoshis-per-byte * 250, 253) + /// * max(satoshis-per-kbyte / 4, 253) fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32; } diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 101ad665200..05f88329176 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -27,20 +27,185 @@ use bitcoin::blockdata::block::{Block, BlockHeader}; use bitcoin::hash_types::Txid; use chain; -use chain::{Filter, WatchedOutput}; +use chain::{ChannelMonitorUpdateErr, Filter, WatchedOutput}; use chain::chaininterface::{BroadcasterInterface, FeeEstimator}; -use chain::channelmonitor; -use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, Balance, MonitorEvent, Persist, TransactionOutputs}; +use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS}; use chain::transaction::{OutPoint, TransactionData}; use chain::keysinterface::Sign; +use util::atomic_counter::AtomicCounter; use util::logger::Logger; +use util::errors::APIError; use util::events; use util::events::EventHandler; use ln::channelmanager::ChannelDetails; use prelude::*; -use sync::RwLock; +use sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard}; use core::ops::Deref; +use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; + +#[derive(Clone, Copy, Hash, PartialEq, Eq)] +/// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents +/// entirely opaque. +enum UpdateOrigin { + /// An update that was generated by the `ChannelManager` (via our `chain::Watch` + /// implementation). This corresponds to an actual [`ChannelMonitorUpdate::update_id`] field + /// and [`ChannelMonitor::get_latest_update_id`]. + OffChain(u64), + /// An update that was generated during blockchain processing. The ID here is specific to the + /// generating [`ChainMonitor`] and does *not* correspond to any on-disk IDs. + ChainSync(u64), +} + +/// An opaque identifier describing a specific [`Persist`] method call. +#[derive(Clone, Copy, Hash, PartialEq, Eq)] +pub struct MonitorUpdateId { + contents: UpdateOrigin, +} + +impl MonitorUpdateId { + pub(crate) fn from_monitor_update(update: &ChannelMonitorUpdate) -> Self { + Self { contents: UpdateOrigin::OffChain(update.update_id) } + } + pub(crate) fn from_new_monitor(monitor: &ChannelMonitor) -> Self { + Self { contents: UpdateOrigin::OffChain(monitor.get_latest_update_id()) } + } +} + +/// `Persist` defines behavior for persisting channel monitors: this could mean +/// writing once to disk, and/or uploading to one or more backup services. +/// +/// Each method can return three possible values: +/// * If persistence (including any relevant `fsync()` calls) happens immediately, the +/// implementation should return `Ok(())`, indicating normal channel operation should continue. +/// * If persistence happens asynchronously, implementations should first ensure the +/// [`ChannelMonitor`] or [`ChannelMonitorUpdate`] are written durably to disk, and then return +/// `Err(ChannelMonitorUpdateErr::TemporaryFailure)` while the update continues in the +/// background. Once the update completes, [`ChainMonitor::channel_monitor_updated`] should be +/// called with the corresponding [`MonitorUpdateId`]. +/// +/// Note that unlike the direct [`chain::Watch`] interface, +/// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs. +/// +/// * If persistence fails for some reason, implementations should return +/// `Err(ChannelMonitorUpdateErr::PermanentFailure)`, in which case the channel will likely be +/// closed without broadcasting the latest state. See +/// [`ChannelMonitorUpdateErr::PermanentFailure`] for more details. +pub trait Persist { + /// Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is + /// called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup. + /// + /// The data can be stored any way you want, but the identifier provided by LDK is the + /// channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint + /// and the stored channel data). Note that you **must** persist every new monitor to disk. + /// + /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`], + /// if you return [`ChannelMonitorUpdateErr::TemporaryFailure`]. + /// + /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor` + /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors. + /// + /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager + /// [`Writeable::write`]: crate::util::ser::Writeable::write + fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>; + + /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given + /// update. + /// + /// Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the + /// updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more + /// details. + /// + /// During blockchain synchronization operations, this may be called with no + /// [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted. + /// Note that after the full [`ChannelMonitor`] is persisted any previous + /// [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be + /// applied to the persisted [`ChannelMonitor`] as they were already applied. + /// + /// If an implementer chooses to persist the updates only, they need to make + /// sure that all the updates are applied to the `ChannelMonitors` *before* + /// the set of channel monitors is given to the `ChannelManager` + /// deserialization routine. See [`ChannelMonitor::update_monitor`] for + /// applying a monitor update to a monitor. If full `ChannelMonitors` are + /// persisted, then there is no need to persist individual updates. + /// + /// Note that there could be a performance tradeoff between persisting complete + /// channel monitors on every update vs. persisting only updates and applying + /// them in batches. The size of each monitor grows `O(number of state updates)` + /// whereas updates are small and `O(1)`. + /// + /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`], + /// if you return [`ChannelMonitorUpdateErr::TemporaryFailure`]. + /// + /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`, + /// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and + /// [`ChannelMonitorUpdateErr`] for requirements when returning errors. + /// + /// [`Writeable::write`]: crate::util::ser::Writeable::write + fn update_persisted_channel(&self, channel_id: OutPoint, update: &Option, data: &ChannelMonitor, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>; +} + +struct MonitorHolder { + monitor: ChannelMonitor, + /// The full set of pending monitor updates for this Channel. + /// + /// Note that this lock must be held during updates to prevent a race where we call + /// update_persisted_channel, the user returns a TemporaryFailure, and then calls + /// channel_monitor_updated immediately, racing our insertion of the pending update into the + /// contained Vec. + /// + /// Beyond the synchronization of updates themselves, we cannot handle user events until after + /// any chain updates have been stored on disk. Thus, we scan this list when returning updates + /// to the ChannelManager, refusing to return any updates for a ChannelMonitor which is still + /// being persisted fully to disk after a chain update. + /// + /// This avoids the possibility of handling, e.g. an on-chain claim, generating a claim monitor + /// event, resulting in the relevant ChannelManager generating a PaymentSent event and dropping + /// the pending payment entry, and then reloading before the monitor is persisted, resulting in + /// the ChannelManager re-adding the same payment entry, before the same block is replayed, + /// resulting in a duplicate PaymentSent event. + pending_monitor_updates: Mutex>, + /// When the user returns a PermanentFailure error from an update_persisted_channel call during + /// block processing, we inform the ChannelManager that the channel should be closed + /// asynchronously. In order to ensure no further changes happen before the ChannelManager has + /// processed the closure event, we set this to true and return PermanentFailure for any other + /// chain::Watch events. + channel_perm_failed: AtomicBool, + /// The last block height at which no [`UpdateOrigin::ChainSync`] monitor updates were present + /// in `pending_monitor_updates`. + /// If it's been more than [`LATENCY_GRACE_PERIOD_BLOCKS`] since we started waiting on a chain + /// sync event, we let monitor events return to `ChannelManager` because we cannot hold them up + /// forever or we'll end up with HTLC preimages waiting to feed back into an upstream channel + /// forever, risking funds loss. + last_chain_persist_height: AtomicUsize, +} + +impl MonitorHolder { + fn has_pending_offchain_updates(&self, pending_monitor_updates_lock: &MutexGuard>) -> bool { + pending_monitor_updates_lock.iter().any(|update_id| + if let UpdateOrigin::OffChain(_) = update_id.contents { true } else { false }) + } + fn has_pending_chainsync_updates(&self, pending_monitor_updates_lock: &MutexGuard>) -> bool { + pending_monitor_updates_lock.iter().any(|update_id| + if let UpdateOrigin::ChainSync(_) = update_id.contents { true } else { false }) + } +} + +/// A read-only reference to a current ChannelMonitor. +/// +/// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is +/// released. +pub struct LockedChannelMonitor<'a, ChannelSigner: Sign> { + lock: RwLockReadGuard<'a, HashMap>>, + funding_txo: OutPoint, +} + +impl Deref for LockedChannelMonitor<'_, ChannelSigner> { + type Target = ChannelMonitor; + fn deref(&self) -> &ChannelMonitor { + &self.lock.get(&self.funding_txo).expect("Checked at construction").monitor + } +} /// An implementation of [`chain::Watch`] for monitoring channels. /// @@ -56,15 +221,23 @@ pub struct ChainMonitor, + P::Target: Persist, { - /// The monitors - pub monitors: RwLock>>, + monitors: RwLock>>, + /// When we generate a [`MonitorUpdateId`] for a chain-event monitor persistence, we need a + /// unique ID, which we calculate by simply getting the next value from this counter. Note that + /// the ID is never persisted so it's ok that they reset on restart. + sync_persistence_id: AtomicCounter, chain_source: Option, broadcaster: T, logger: L, fee_estimator: F, persister: P, + /// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly + /// from the user and not from a [`ChannelMonitor`]. + pending_monitor_events: Mutex>, + /// The best block height seen, used as a proxy for the passage of time. + highest_chain_height: AtomicUsize, } impl ChainMonitor @@ -72,7 +245,7 @@ where C::Target: chain::Filter, T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, - P::Target: channelmonitor::Persist, + P::Target: Persist, { /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view /// of a channel and reacting accordingly based on transactions in the given chain data. See @@ -83,31 +256,75 @@ where C::Target: chain::Filter, /// calls must not exclude any transactions matching the new outputs nor any in-block /// descendants of such transactions. It is not necessary to re-fetch the block to obtain /// updated `txdata`. - fn process_chain_data(&self, header: &BlockHeader, txdata: &TransactionData, process: FN) + /// + /// Calls which represent a new blockchain tip height should set `best_height`. + fn process_chain_data(&self, header: &BlockHeader, best_height: Option, txdata: &TransactionData, process: FN) where FN: Fn(&ChannelMonitor, &TransactionData) -> Vec { let mut dependent_txdata = Vec::new(); - let monitors = self.monitors.read().unwrap(); - for monitor in monitors.values() { - let mut txn_outputs = process(monitor, txdata); - - // Register any new outputs with the chain source for filtering, storing any dependent - // transactions from within the block that previously had not been included in txdata. - if let Some(ref chain_source) = self.chain_source { - let block_hash = header.block_hash(); - for (txid, mut outputs) in txn_outputs.drain(..) { - for (idx, output) in outputs.drain(..) { - // Register any new outputs with the chain source for filtering and recurse - // if it indicates that there are dependent transactions within the block - // that had not been previously included in txdata. - let output = WatchedOutput { - block_hash: Some(block_hash), - outpoint: OutPoint { txid, index: idx as u16 }, - script_pubkey: output.script_pubkey, - }; - if let Some(tx) = chain_source.register_output(output) { - dependent_txdata.push(tx); + { + let monitor_states = self.monitors.write().unwrap(); + if let Some(height) = best_height { + // If the best block height is being updated, update highest_chain_height under the + // monitors write lock. + let old_height = self.highest_chain_height.load(Ordering::Acquire); + let new_height = height as usize; + if new_height > old_height { + self.highest_chain_height.store(new_height, Ordering::Release); + } + } + + for (funding_outpoint, monitor_state) in monitor_states.iter() { + let monitor = &monitor_state.monitor; + let mut txn_outputs; + { + txn_outputs = process(monitor, txdata); + let update_id = MonitorUpdateId { + contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()), + }; + let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); + if let Some(height) = best_height { + if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) { + // If there are not ChainSync persists awaiting completion, go ahead and + // set last_chain_persist_height here - we wouldn't want the first + // TemporaryFailure to always immediately be considered "overly delayed". + monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release); + } + } + + log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor)); + match self.persister.update_persisted_channel(*funding_outpoint, &None, monitor, update_id) { + Ok(()) => + log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)), + Err(ChannelMonitorUpdateErr::PermanentFailure) => { + monitor_state.channel_perm_failed.store(true, Ordering::Release); + self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateFailed(*funding_outpoint)); + }, + Err(ChannelMonitorUpdateErr::TemporaryFailure) => { + log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor)); + pending_monitor_updates.push(update_id); + }, + } + } + + // Register any new outputs with the chain source for filtering, storing any dependent + // transactions from within the block that previously had not been included in txdata. + if let Some(ref chain_source) = self.chain_source { + let block_hash = header.block_hash(); + for (txid, mut outputs) in txn_outputs.drain(..) { + for (idx, output) in outputs.drain(..) { + // Register any new outputs with the chain source for filtering and recurse + // if it indicates that there are dependent transactions within the block + // that had not been previously included in txdata. + let output = WatchedOutput { + block_hash: Some(block_hash), + outpoint: OutPoint { txid, index: idx as u16 }, + script_pubkey: output.script_pubkey, + }; + if let Some(tx) = chain_source.register_output(output) { + dependent_txdata.push(tx); + } } } } @@ -119,7 +336,7 @@ where C::Target: chain::Filter, dependent_txdata.sort_unstable_by_key(|(index, _tx)| *index); dependent_txdata.dedup_by_key(|(index, _tx)| *index); let txdata: Vec<_> = dependent_txdata.iter().map(|(index, tx)| (*index, tx)).collect(); - self.process_chain_data(header, &txdata, process); + self.process_chain_data(header, None, &txdata, process); // We skip the best height the second go-around } } @@ -133,11 +350,14 @@ where C::Target: chain::Filter, pub fn new(chain_source: Option, broadcaster: T, logger: L, feeest: F, persister: P) -> Self { Self { monitors: RwLock::new(HashMap::new()), + sync_persistence_id: AtomicCounter::new(), chain_source, broadcaster, logger, fee_estimator: feeest, persister, + pending_monitor_events: Mutex::new(Vec::new()), + highest_chain_height: AtomicUsize::new(0), } } @@ -150,10 +370,10 @@ where C::Target: chain::Filter, /// /// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for /// inclusion in the return value. - pub fn get_claimable_balances(&self, ignored_channels: &[ChannelDetails]) -> Vec { + pub fn get_claimable_balances(&self, ignored_channels: &[&ChannelDetails]) -> Vec { let mut ret = Vec::new(); - let monitors = self.monitors.read().unwrap(); - for (_, monitor) in monitors.iter().filter(|(funding_outpoint, _)| { + let monitor_states = self.monitors.read().unwrap(); + for (_, monitor_state) in monitor_states.iter().filter(|(funding_outpoint, _)| { for chan in ignored_channels { if chan.funding_txo.as_ref() == Some(funding_outpoint) { return false; @@ -161,11 +381,107 @@ where C::Target: chain::Filter, } true }) { - ret.append(&mut monitor.get_claimable_balances()); + ret.append(&mut monitor_state.monitor.get_claimable_balances()); } ret } + /// Gets the [`LockedChannelMonitor`] for a given funding outpoint, returning an `Err` if no + /// such [`ChannelMonitor`] is currently being monitored for. + /// + /// Note that the result holds a mutex over our monitor set, and should not be held + /// indefinitely. + pub fn get_monitor(&self, funding_txo: OutPoint) -> Result, ()> { + let lock = self.monitors.read().unwrap(); + if lock.get(&funding_txo).is_some() { + Ok(LockedChannelMonitor { lock, funding_txo }) + } else { + Err(()) + } + } + + /// Lists the funding outpoint of each [`ChannelMonitor`] being monitored. + /// + /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always + /// monitoring for on-chain state resolutions. + pub fn list_monitors(&self) -> Vec { + self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect() + } + + #[cfg(test)] + pub fn remove_monitor(&self, funding_txo: &OutPoint) -> ChannelMonitor { + self.monitors.write().unwrap().remove(funding_txo).unwrap().monitor + } + + /// Indicates the persistence of a [`ChannelMonitor`] has completed after + /// [`ChannelMonitorUpdateErr::TemporaryFailure`] was returned from an update operation. + /// + /// Thus, the anticipated use is, at a high level: + /// 1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the + /// update to disk and begins updating any remote (e.g. watchtower/backup) copies, + /// returning [`ChannelMonitorUpdateErr::TemporaryFailure`], + /// 2) once all remote copies are updated, you call this function with the + /// `completed_update_id` that completed, and once all pending updates have completed the + /// channel will be re-enabled. + // Note that we re-enable only after `UpdateOrigin::OffChain` updates complete, we don't + // care about `UpdateOrigin::ChainSync` updates for the channel state being updated. We + // only care about `UpdateOrigin::ChainSync` for returning `MonitorEvent`s. + /// + /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently + /// registered [`ChannelMonitor`]s. + pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: MonitorUpdateId) -> Result<(), APIError> { + let monitors = self.monitors.read().unwrap(); + let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else { + return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) }); + }; + let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap(); + pending_monitor_updates.retain(|update_id| *update_id != completed_update_id); + + match completed_update_id { + MonitorUpdateId { contents: UpdateOrigin::OffChain(_) } => { + // Note that we only check for `UpdateOrigin::OffChain` failures here - if + // we're being told that a `UpdateOrigin::OffChain` monitor update completed, + // we only care about ensuring we don't tell the `ChannelManager` to restore + // the channel to normal operation until all `UpdateOrigin::OffChain` updates + // complete. + // If there's some `UpdateOrigin::ChainSync` update still pending that's okay + // - we can still update our channel state, just as long as we don't return + // `MonitorEvent`s from the monitor back to the `ChannelManager` until they + // complete. + let monitor_is_pending_updates = monitor_data.has_pending_offchain_updates(&pending_monitor_updates); + if monitor_is_pending_updates || monitor_data.channel_perm_failed.load(Ordering::Acquire) { + // If there are still monitor updates pending (or an old monitor update + // finished after a later one perm-failed), we cannot yet construct an + // UpdateCompleted event. + return Ok(()); + } + self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted { + funding_txo, + monitor_update_id: monitor_data.monitor.get_latest_update_id(), + }); + }, + MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => { + if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) { + monitor_data.last_chain_persist_height.store(self.highest_chain_height.load(Ordering::Acquire), Ordering::Release); + // The next time release_pending_monitor_events is called, any events for this + // ChannelMonitor will be returned. + } + }, + } + Ok(()) + } + + /// This wrapper avoids having to update some of our tests for now as they assume the direct + /// chain::Watch API wherein we mark a monitor fully-updated by just calling + /// channel_monitor_updated once with the highest ID. + #[cfg(any(test, feature = "fuzztarget"))] + pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) { + self.pending_monitor_events.lock().unwrap().push(MonitorEvent::UpdateCompleted { + funding_txo, + monitor_update_id, + }); + } + #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))] pub fn get_and_clear_pending_events(&self) -> Vec { use util::events::EventsProvider; @@ -183,23 +499,23 @@ where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, - P::Target: channelmonitor::Persist, + P::Target: Persist, { fn block_connected(&self, block: &Block, height: u32) { let header = &block.header; let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height); - self.process_chain_data(header, &txdata, |monitor, txdata| { + self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| { monitor.block_connected( header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger) }); } fn block_disconnected(&self, header: &BlockHeader, height: u32) { - let monitors = self.monitors.read().unwrap(); + let monitor_states = self.monitors.read().unwrap(); log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height); - for monitor in monitors.values() { - monitor.block_disconnected( + for monitor_state in monitor_states.values() { + monitor_state.monitor.block_disconnected( header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger); } } @@ -212,11 +528,11 @@ where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, - P::Target: channelmonitor::Persist, + P::Target: Persist, { fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) { log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash()); - self.process_chain_data(header, txdata, |monitor, txdata| { + self.process_chain_data(header, None, txdata, |monitor, txdata| { monitor.transactions_confirmed( header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger) }); @@ -224,15 +540,15 @@ where fn transaction_unconfirmed(&self, txid: &Txid) { log_debug!(self.logger, "Transaction {} reorganized out of chain", txid); - let monitors = self.monitors.read().unwrap(); - for monitor in monitors.values() { - monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &*self.logger); + let monitor_states = self.monitors.read().unwrap(); + for monitor_state in monitor_states.values() { + monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &*self.logger); } } fn best_block_updated(&self, header: &BlockHeader, height: u32) { log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height); - self.process_chain_data(header, &[], |monitor, txdata| { + self.process_chain_data(header, Some(height), &[], |monitor, txdata| { // While in practice there shouldn't be any recursive calls when given empty txdata, // it's still possible if a chain::Filter implementation returns a transaction. debug_assert!(txdata.is_empty()); @@ -243,9 +559,9 @@ where fn get_relevant_txids(&self) -> Vec { let mut txids = Vec::new(); - let monitors = self.monitors.read().unwrap(); - for monitor in monitors.values() { - txids.append(&mut monitor.get_relevant_txids()); + let monitor_states = self.monitors.read().unwrap(); + for monitor_state in monitor_states.values() { + txids.append(&mut monitor_state.monitor.get_relevant_txids()); } txids.sort_unstable(); @@ -260,7 +576,7 @@ where C::Target: chain::Filter, T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, - P::Target: channelmonitor::Persist, + P::Target: Persist, { /// Adds the monitor that watches the channel referred to by the given outpoint. /// @@ -276,20 +592,30 @@ where C::Target: chain::Filter, return Err(ChannelMonitorUpdateErr::PermanentFailure)}, hash_map::Entry::Vacant(e) => e, }; - if let Err(e) = self.persister.persist_new_channel(funding_outpoint, &monitor) { - log_error!(self.logger, "Failed to persist new channel data"); - return Err(e); + log_trace!(self.logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor)); + let update_id = MonitorUpdateId::from_new_monitor(&monitor); + let mut pending_monitor_updates = Vec::new(); + let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id); + if persist_res.is_err() { + log_error!(self.logger, "Failed to persist new ChannelMonitor for channel {}: {:?}", log_funding_info!(monitor), persist_res); + } else { + log_trace!(self.logger, "Finished persisting new ChannelMonitor for channel {}", log_funding_info!(monitor)); } - { - let funding_txo = monitor.get_funding_txo(); - log_trace!(self.logger, "Got new Channel Monitor for channel {}", log_bytes!(funding_txo.0.to_channel_id()[..])); - - if let Some(ref chain_source) = self.chain_source { - monitor.load_outputs_to_watch(chain_source); - } + if persist_res == Err(ChannelMonitorUpdateErr::PermanentFailure) { + return persist_res; + } else if persist_res.is_err() { + pending_monitor_updates.push(update_id); } - entry.insert(monitor); - Ok(()) + if let Some(ref chain_source) = self.chain_source { + monitor.load_outputs_to_watch(chain_source); + } + entry.insert(MonitorHolder { + monitor, + pending_monitor_updates: Mutex::new(pending_monitor_updates), + channel_perm_failed: AtomicBool::new(false), + last_chain_persist_height: AtomicUsize::new(self.highest_chain_height.load(Ordering::Acquire)), + }); + persist_res } /// Note that we persist the given `ChannelMonitor` update while holding the @@ -309,20 +635,32 @@ where C::Target: chain::Filter, #[cfg(not(any(test, feature = "fuzztarget")))] Err(ChannelMonitorUpdateErr::PermanentFailure) }, - Some(monitor) => { - log_trace!(self.logger, "Updating Channel Monitor for channel {}", log_funding_info!(monitor)); + Some(monitor_state) => { + let monitor = &monitor_state.monitor; + log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor)); let update_res = monitor.update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger); - if let Err(e) = &update_res { - log_error!(self.logger, "Failed to update channel monitor: {:?}", e); + if update_res.is_err() { + log_error!(self.logger, "Failed to update ChannelMonitor for channel {}.", log_funding_info!(monitor)); } // Even if updating the monitor returns an error, the monitor's state will // still be changed. So, persist the updated monitor despite the error. - let persist_res = self.persister.update_persisted_channel(funding_txo, &update, monitor); - if let Err(ref e) = persist_res { - log_error!(self.logger, "Failed to persist channel monitor update: {:?}", e); + let update_id = MonitorUpdateId::from_monitor_update(&update); + let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); + let persist_res = self.persister.update_persisted_channel(funding_txo, &Some(update), monitor, update_id); + if let Err(e) = persist_res { + if e == ChannelMonitorUpdateErr::TemporaryFailure { + pending_monitor_updates.push(update_id); + } else { + monitor_state.channel_perm_failed.store(true, Ordering::Release); + } + log_error!(self.logger, "Failed to persist ChannelMonitor update for channel {}: {:?}", log_funding_info!(monitor), e); + } else { + log_trace!(self.logger, "Finished persisting ChannelMonitor update for channel {}", log_funding_info!(monitor)); } if update_res.is_err() { Err(ChannelMonitorUpdateErr::PermanentFailure) + } else if monitor_state.channel_perm_failed.load(Ordering::Acquire) { + Err(ChannelMonitorUpdateErr::PermanentFailure) } else { persist_res } @@ -331,9 +669,33 @@ where C::Target: chain::Filter, } fn release_pending_monitor_events(&self) -> Vec { - let mut pending_monitor_events = Vec::new(); - for monitor in self.monitors.read().unwrap().values() { - pending_monitor_events.append(&mut monitor.get_and_clear_pending_monitor_events()); + let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0); + for monitor_state in self.monitors.read().unwrap().values() { + let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap()); + if is_pending_monitor_update && + monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize + > self.highest_chain_height.load(Ordering::Acquire) + { + log_info!(self.logger, "A Channel Monitor sync is still in progress, refusing to provide monitor events!"); + } else { + if monitor_state.channel_perm_failed.load(Ordering::Acquire) { + // If a `UpdateOrigin::ChainSync` persistence failed with `PermanantFailure`, + // we don't really know if the latest `ChannelMonitor` state is on disk or not. + // We're supposed to hold monitor updates until the latest state is on disk to + // avoid duplicate events, but the user told us persistence is screw-y and may + // not complete. We can't hold events forever because we may learn some payment + // preimage, so instead we just log and hope the user complied with the + // `PermanentFailure` requirements of having at least the local-disk copy + // updated. + log_info!(self.logger, "A Channel Monitor sync returned PermanentFailure. Returning monitor events but duplicate events may appear after reload!"); + } + if is_pending_monitor_update { + log_error!(self.logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS); + log_error!(self.logger, " To avoid funds-loss, we are allowing monitor updates to be released."); + log_error!(self.logger, " This may cause duplicate payment events to be generated."); + } + pending_monitor_events.append(&mut monitor_state.monitor.get_and_clear_pending_monitor_events()); + } } pending_monitor_events } @@ -344,7 +706,7 @@ impl even T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, - P::Target: channelmonitor::Persist, + P::Target: Persist, { /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity. /// @@ -354,8 +716,8 @@ impl even /// [`SpendableOutputs`]: events::Event::SpendableOutputs fn process_pending_events(&self, handler: H) where H::Target: EventHandler { let mut pending_events = Vec::new(); - for monitor in self.monitors.read().unwrap().values() { - pending_events.append(&mut monitor.get_and_clear_pending_events()); + for monitor_state in self.monitors.read().unwrap().values() { + pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events()); } for event in pending_events.drain(..) { handler.handle_event(&event); @@ -365,10 +727,18 @@ impl even #[cfg(test)] mod tests { - use ::{check_added_monitors, get_local_commitment_txn}; + use bitcoin::BlockHeader; + use ::{check_added_monitors, check_closed_broadcast, check_closed_event}; + use ::{expect_payment_sent, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg}; + use ::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err}; + use chain::{ChannelMonitorUpdateErr, Confirm, Watch}; + use chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS; + use ln::channelmanager::PaymentSendFailure; use ln::features::InitFeatures; use ln::functional_test_utils::*; - use util::events::MessageSendEventsProvider; + use ln::msgs::ChannelMessageHandler; + use util::errors::APIError; + use util::events::{ClosureReason, MessageSendEvent, MessageSendEventsProvider}; use util::test_utils::{OnRegisterOutput, TxOutReference}; /// Tests that in-block dependent transactions are processed by `block_connected` when not @@ -413,4 +783,180 @@ mod tests { nodes[1].node.get_and_clear_pending_msg_events(); nodes[1].node.get_and_clear_pending_events(); } + + #[test] + fn test_async_ooo_offchain_updates() { + // Test that if we have multiple offchain updates being persisted and they complete + // out-of-order, the ChainMonitor waits until all have completed before informing the + // ChannelManager. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + + // Route two payments to be claimed at the same time. + let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0; + let payment_preimage_2 = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0; + + chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear(); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + + nodes[1].node.claim_funds(payment_preimage_1); + check_added_monitors!(nodes[1], 1); + nodes[1].node.claim_funds(payment_preimage_2); + check_added_monitors!(nodes[1], 1); + + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + + let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone(); + assert_eq!(persistences.len(), 1); + let (funding_txo, updates) = persistences.iter().next().unwrap(); + assert_eq!(updates.len(), 2); + + // Note that updates is a HashMap so the ordering here is actually random. This shouldn't + // fail either way but if it fails intermittently it's depending on the ordering of updates. + let mut update_iter = updates.iter(); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap(); + assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap(); + + // Now manually walk the commitment signed dance - because we claimed two payments + // back-to-back it doesn't fit into the neat walk commitment_signed_dance does. + + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_sent_without_paths!(nodes[0], payment_preimage_1); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed); + check_added_monitors!(nodes[0], 1); + let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa); + check_added_monitors!(nodes[1], 1); + let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_update); + check_added_monitors!(nodes[1], 1); + let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]); + expect_payment_sent_without_paths!(nodes[0], payment_preimage_2); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed); + check_added_monitors!(nodes[0], 1); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa); + expect_payment_path_successful!(nodes[0]); + check_added_monitors!(nodes[0], 1); + let (as_second_raa, as_second_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update); + check_added_monitors!(nodes[1], 1); + let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa); + expect_payment_path_successful!(nodes[0]); + check_added_monitors!(nodes[0], 1); + } + + fn do_chainsync_pauses_events(block_timeout: bool) { + // When a chainsync monitor update occurs, any MonitorUpdates should be held before being + // passed upstream to a `ChannelManager` via `Watch::release_pending_monitor_events`. This + // tests that behavior, as well as some ways it might go wrong. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let channel = create_announced_chan_between_nodes( + &nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + + // Get a route for later and rebalance the channel somewhat + send_payment(&nodes[0], &[&nodes[1]], 10_000_000); + let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); + + // First route a payment that we will claim on chain and give the recipient the preimage. + let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0; + nodes[1].node.claim_funds(payment_preimage); + nodes[1].node.get_and_clear_pending_msg_events(); + check_added_monitors!(nodes[1], 1); + let remote_txn = get_local_commitment_txn!(nodes[1], channel.2); + assert_eq!(remote_txn.len(), 2); + + // Temp-fail the block connection which will hold the channel-closed event + chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear(); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + + // Connect B's commitment transaction, but only to the ChainMonitor/ChannelMonitor. The + // channel is now closed, but the ChannelManager doesn't know that yet. + let new_header = BlockHeader { + version: 2, time: 0, bits: 0, nonce: 0, + prev_blockhash: nodes[0].best_block_info().0, + merkle_root: Default::default() }; + nodes[0].chain_monitor.chain_monitor.transactions_confirmed(&new_header, + &[(0, &remote_txn[0]), (1, &remote_txn[1])], nodes[0].best_block_info().1 + 1); + assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty()); + nodes[0].chain_monitor.chain_monitor.best_block_updated(&new_header, nodes[0].best_block_info().1 + 1); + assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty()); + + // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass + // the update through to the ChannelMonitor which will refuse it (as the channel is closed). + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + unwrap_send_err!(nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)), + true, APIError::ChannelUnavailable { ref err }, + assert!(err.contains("ChannelMonitor storage failure"))); + check_added_monitors!(nodes[0], 2); // After the failure we generate a close-channel monitor update + check_closed_broadcast!(nodes[0], true); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); + + // However, as the ChainMonitor is still waiting for the original persistence to complete, + // it won't yet release the MonitorEvents. + assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty()); + + if block_timeout { + // After three blocks, pending MontiorEvents should be released either way. + let latest_header = BlockHeader { + version: 2, time: 0, bits: 0, nonce: 0, + prev_blockhash: nodes[0].best_block_info().0, + merkle_root: Default::default() }; + nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS); + } else { + let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone(); + for (funding_outpoint, update_ids) in persistences { + for update_id in update_ids { + nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_outpoint, update_id).unwrap(); + } + } + } + + expect_payment_sent!(nodes[0], payment_preimage); + } + + #[test] + fn chainsync_pauses_events() { + do_chainsync_pauses_events(false); + do_chainsync_pauses_events(true); + } + + #[test] + fn update_during_chainsync_fails_channel() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + + chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear(); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)); + + connect_blocks(&nodes[0], 1); + // Before processing events, the ChannelManager will still think the Channel is open and + // there won't be any ChannelMonitorUpdates + assert_eq!(nodes[0].node.list_channels().len(), 1); + check_added_monitors!(nodes[0], 0); + // ... however once we get events once, the channel will close, creating a channel-closed + // ChannelMonitorUpdate. + check_closed_broadcast!(nodes[0], true); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }); + check_added_monitors!(nodes[0], 1); + } } diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 413d184b598..8786f47db0d 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -115,84 +115,48 @@ impl Readable for ChannelMonitorUpdate { } } -/// An error enum representing a failure to persist a channel monitor update. -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum ChannelMonitorUpdateErr { - /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of - /// our state failed, but is expected to succeed at some point in the future). - /// - /// Such a failure will "freeze" a channel, preventing us from revoking old states or - /// submitting new commitment transactions to the counterparty. Once the update(s) which failed - /// have been successfully applied, ChannelManager::channel_monitor_updated can be used to - /// restore the channel to an operational state. - /// - /// Note that a given ChannelManager will *never* re-generate a given ChannelMonitorUpdate. If - /// you return a TemporaryFailure you must ensure that it is written to disk safely before - /// writing out the latest ChannelManager state. - /// - /// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur - /// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting - /// to claim it on this channel) and those updates must be applied wherever they can be. At - /// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should - /// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to - /// the channel which would invalidate previous ChannelMonitors are not made when a channel has - /// been "frozen". - /// - /// Note that even if updates made after TemporaryFailure succeed you must still call - /// channel_monitor_updated to ensure you have the latest monitor and re-enable normal channel - /// operation. - /// - /// Note that the update being processed here will not be replayed for you when you call - /// ChannelManager::channel_monitor_updated, so you must store the update itself along - /// with the persisted ChannelMonitor on your own local disk prior to returning a - /// TemporaryFailure. You may, of course, employ a journaling approach, storing only the - /// ChannelMonitorUpdate on disk without updating the monitor itself, replaying the journal at - /// reload-time. - /// - /// For deployments where a copy of ChannelMonitors and other local state are backed up in a - /// remote location (with local copies persisted immediately), it is anticipated that all - /// updates will return TemporaryFailure until the remote copies could be updated. - TemporaryFailure, - /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a - /// different watchtower and cannot update with all watchtowers that were previously informed - /// of this channel). - /// - /// At reception of this error, ChannelManager will force-close the channel and return at - /// least a final ChannelMonitorUpdate::ChannelForceClosed which must be delivered to at - /// least one ChannelMonitor copy. Revocation secret MUST NOT be released and offchain channel - /// update must be rejected. - /// - /// This failure may also signal a failure to update the local persisted copy of one of - /// the channel monitor instance. - /// - /// Note that even when you fail a holder commitment transaction update, you must store the - /// update to ensure you can claim from it in case of a duplicate copy of this ChannelMonitor - /// broadcasts it (e.g distributed channel-monitor deployment) - /// - /// In case of distributed watchtowers deployment, the new version must be written to disk, as - /// state may have been stored but rejected due to a block forcing a commitment broadcast. This - /// storage is used to claim outputs of rejected state confirmed onchain by another watchtower, - /// lagging behind on block processing. - PermanentFailure, -} - -/// General Err type for ChannelMonitor actions. Generally, this implies that the data provided is -/// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::update_monitor this -/// means you tried to update a monitor for a different channel or the ChannelMonitorUpdate was -/// corrupted. -/// Contains a developer-readable error message. -#[derive(Clone, Debug)] -pub struct MonitorUpdateError(pub &'static str); - /// An event to be processed by the ChannelManager. #[derive(Clone, PartialEq)] pub enum MonitorEvent { /// A monitor event containing an HTLCUpdate. HTLCEvent(HTLCUpdate), - /// A monitor event that the Channel's commitment transaction was broadcasted. - CommitmentTxBroadcasted(OutPoint), + /// A monitor event that the Channel's commitment transaction was confirmed. + CommitmentTxConfirmed(OutPoint), + + /// Indicates a [`ChannelMonitor`] update has completed. See + /// [`ChannelMonitorUpdateErr::TemporaryFailure`] for more information on how this is used. + /// + /// [`ChannelMonitorUpdateErr::TemporaryFailure`]: super::ChannelMonitorUpdateErr::TemporaryFailure + UpdateCompleted { + /// The funding outpoint of the [`ChannelMonitor`] that was updated + funding_txo: OutPoint, + /// The Update ID from [`ChannelMonitorUpdate::update_id`] which was applied or + /// [`ChannelMonitor::get_latest_update_id`]. + /// + /// Note that this should only be set to a given update's ID if all previous updates for the + /// same [`ChannelMonitor`] have been applied and persisted. + monitor_update_id: u64, + }, + + /// Indicates a [`ChannelMonitor`] update has failed. See + /// [`ChannelMonitorUpdateErr::PermanentFailure`] for more information on how this is used. + /// + /// [`ChannelMonitorUpdateErr::PermanentFailure`]: super::ChannelMonitorUpdateErr::PermanentFailure + UpdateFailed(OutPoint), } +impl_writeable_tlv_based_enum_upgradable!(MonitorEvent, + // Note that UpdateCompleted and UpdateFailed are currently never serialized to disk as they are + // generated only in ChainMonitor + (0, UpdateCompleted) => { + (0, funding_txo, required), + (2, monitor_update_id, required), + }, +; + (2, HTLCEvent), + (4, CommitmentTxConfirmed), + (6, UpdateFailed), +); /// Simple structure sent back by `chain::Watch` when an HTLC from a forward channel is detected on /// chain. Used to update the corresponding HTLC in the backward channel. Failing to pass the @@ -253,8 +217,6 @@ pub const ANTI_REORG_DELAY: u32 = 6; /// fail this HTLC, /// 2) if we receive an HTLC within this many blocks of its expiry (plus one to avoid a race /// condition with the above), we will fail this HTLC without telling the user we received it, -/// 3) if we are waiting on a connection or a channel state update to send an HTLC to a peer, and -/// that HTLC expires within this many blocks, we will simply fail the HTLC instead. /// /// (1) is all about protecting us - we need enough time to update the channel state before we hit /// CLTV_CLAIM_BUFFER, at which point we'd go on chain to claim the HTLC with the preimage. @@ -262,9 +224,6 @@ pub const ANTI_REORG_DELAY: u32 = 6; /// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately /// in a race condition between the user connecting a block (which would fail it) and the user /// providing us the preimage (which would claim it). -/// -/// (3) is about our counterparty - we don't want to relay an HTLC to a counterparty when they may -/// end up force-closing the channel on us to claim it. pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS; // TODO(devrandom) replace this with HolderCommitmentTransaction @@ -685,7 +644,17 @@ pub(crate) struct ChannelMonitorImpl { payment_preimages: HashMap, + // Note that `MonitorEvent`s MUST NOT be generated during update processing, only generated + // during chain data processing. This prevents a race in `ChainMonitor::update_channel` (and + // presumably user implementations thereof as well) where we update the in-memory channel + // object, then before the persistence finishes (as it's all under a read-lock), we return + // pending events to the user or to the relevant `ChannelManager`. Then, on reload, we'll have + // the pre-event state here, but have processed the event in the `ChannelManager`. + // Note that because the `event_lock` in `ChainMonitor` is only taken in + // block/transaction-connected events and *not* during block/transaction-disconnected events, + // we further MUST NOT generate events during block/transaction-disconnection. pending_monitor_events: Vec, + pending_events: Vec, // Used to track on-chain events (i.e., transactions part of channels confirmed on chain) on @@ -718,6 +687,11 @@ pub(crate) struct ChannelMonitorImpl { // remote monitor out-of-order with regards to the block view. holder_tx_signed: bool, + // If a spend of the funding output is seen, we set this to true and reject any further + // updates. This prevents any further changes in the offchain state no matter the order + // of block connection between ChannelMonitors and the ChannelManager. + funding_spend_seen: bool, + funding_spend_confirmed: Option, /// The set of HTLCs which have been either claimed or failed on chain and have reached /// the requisite confirmations on the claim/fail transaction (either ANTI_REORG_DELAY or the @@ -783,6 +757,7 @@ impl PartialEq for ChannelMonitorImpl { self.outputs_to_watch != other.outputs_to_watch || self.lockdown_from_offchain != other.lockdown_from_offchain || self.holder_tx_signed != other.holder_tx_signed || + self.funding_spend_seen != other.funding_spend_seen || self.funding_spend_confirmed != other.funding_spend_confirmed || self.htlcs_resolved_on_chain != other.htlcs_resolved_on_chain { @@ -911,14 +886,19 @@ impl Writeable for ChannelMonitorImpl { writer.write_all(&payment_preimage.0[..])?; } - writer.write_all(&byte_utils::be64_to_array(self.pending_monitor_events.len() as u64))?; + writer.write_all(&(self.pending_monitor_events.iter().filter(|ev| match ev { + MonitorEvent::HTLCEvent(_) => true, + MonitorEvent::CommitmentTxConfirmed(_) => true, + _ => false, + }).count() as u64).to_be_bytes())?; for event in self.pending_monitor_events.iter() { match event { MonitorEvent::HTLCEvent(upd) => { 0u8.write(writer)?; upd.write(writer)?; }, - MonitorEvent::CommitmentTxBroadcasted(_) => 1u8.write(writer)? + MonitorEvent::CommitmentTxConfirmed(_) => 1u8.write(writer)?, + _ => {}, // Covered in the TLV writes below } } @@ -952,6 +932,8 @@ impl Writeable for ChannelMonitorImpl { write_tlv_fields!(writer, { (1, self.funding_spend_confirmed, option), (3, self.htlcs_resolved_on_chain, vec_type), + (5, self.pending_monitor_events, vec_type), + (7, self.funding_spend_seen, required), }); Ok(()) @@ -1050,6 +1032,7 @@ impl ChannelMonitor { lockdown_from_offchain: false, holder_tx_signed: false, + funding_spend_seen: false, funding_spend_confirmed: None, htlcs_resolved_on_chain: Vec::new(), @@ -1061,7 +1044,7 @@ impl ChannelMonitor { } #[cfg(test)] - fn provide_secret(&self, idx: u64, secret: [u8; 32]) -> Result<(), MonitorUpdateError> { + fn provide_secret(&self, idx: u64, secret: [u8; 32]) -> Result<(), &'static str> { self.inner.lock().unwrap().provide_secret(idx, secret) } @@ -1083,12 +1066,10 @@ impl ChannelMonitor { #[cfg(test)] fn provide_latest_holder_commitment_tx( - &self, - holder_commitment_tx: HolderCommitmentTransaction, + &self, holder_commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>, - ) -> Result<(), MonitorUpdateError> { - self.inner.lock().unwrap().provide_latest_holder_commitment_tx( - holder_commitment_tx, htlc_outputs) + ) -> Result<(), ()> { + self.inner.lock().unwrap().provide_latest_holder_commitment_tx(holder_commitment_tx, htlc_outputs).map_err(|_| ()) } #[cfg(test)] @@ -1129,7 +1110,7 @@ impl ChannelMonitor { broadcaster: &B, fee_estimator: &F, logger: &L, - ) -> Result<(), MonitorUpdateError> + ) -> Result<(), ()> where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -1527,6 +1508,101 @@ impl ChannelMonitor { res } + + /// Gets the set of outbound HTLCs which are pending resolution in this channel. + /// This is used to reconstruct pending outbound payments on restart in the ChannelManager. + pub(crate) fn get_pending_outbound_htlcs(&self) -> HashMap { + let mut res = HashMap::new(); + let us = self.inner.lock().unwrap(); + + macro_rules! walk_htlcs { + ($holder_commitment: expr, $htlc_iter: expr) => { + for (htlc, source) in $htlc_iter { + if us.htlcs_resolved_on_chain.iter().any(|v| Some(v.input_idx) == htlc.transaction_output_index) { + // We should assert that funding_spend_confirmed is_some() here, but we + // have some unit tests which violate HTLC transaction CSVs entirely and + // would fail. + // TODO: Once tests all connect transactions at consensus-valid times, we + // should assert here like we do in `get_claimable_balances`. + } else if htlc.offered == $holder_commitment { + // If the payment was outbound, check if there's an HTLCUpdate + // indicating we have spent this HTLC with a timeout, claiming it back + // and awaiting confirmations on it. + let htlc_update_confd = us.onchain_events_awaiting_threshold_conf.iter().any(|event| { + if let OnchainEvent::HTLCUpdate { input_idx: Some(input_idx), .. } = event.event { + // If the HTLC was timed out, we wait for ANTI_REORG_DELAY blocks + // before considering it "no longer pending" - this matches when we + // provide the ChannelManager an HTLC failure event. + Some(input_idx) == htlc.transaction_output_index && + us.best_block.height() >= event.height + ANTI_REORG_DELAY - 1 + } else if let OnchainEvent::HTLCSpendConfirmation { input_idx, .. } = event.event { + // If the HTLC was fulfilled with a preimage, we consider the HTLC + // immediately non-pending, matching when we provide ChannelManager + // the preimage. + Some(input_idx) == htlc.transaction_output_index + } else { false } + }); + if !htlc_update_confd { + res.insert(source.clone(), htlc.clone()); + } + } + } + } + } + + // We're only concerned with the confirmation count of HTLC transactions, and don't + // actually care how many confirmations a commitment transaction may or may not have. Thus, + // we look for either a FundingSpendConfirmation event or a funding_spend_confirmed. + let confirmed_txid = us.funding_spend_confirmed.or_else(|| { + us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| { + if let OnchainEvent::FundingSpendConfirmation { .. } = event.event { + Some(event.txid) + } else { None } + }) + }); + if let Some(txid) = confirmed_txid { + if Some(txid) == us.current_counterparty_commitment_txid || Some(txid) == us.prev_counterparty_commitment_txid { + walk_htlcs!(false, us.counterparty_claimable_outpoints.get(&txid).unwrap().iter().filter_map(|(a, b)| { + if let &Some(ref source) = b { + Some((a, &**source)) + } else { None } + })); + } else if txid == us.current_holder_commitment_tx.txid { + walk_htlcs!(true, us.current_holder_commitment_tx.htlc_outputs.iter().filter_map(|(a, _, c)| { + if let Some(source) = c { Some((a, source)) } else { None } + })); + } else if let Some(prev_commitment) = &us.prev_holder_signed_commitment_tx { + if txid == prev_commitment.txid { + walk_htlcs!(true, prev_commitment.htlc_outputs.iter().filter_map(|(a, _, c)| { + if let Some(source) = c { Some((a, source)) } else { None } + })); + } + } + } else { + // If we have not seen a commitment transaction on-chain (ie the channel is not yet + // closed), just examine the available counterparty commitment transactions. See docs + // on `fail_unbroadcast_htlcs`, below, for justification. + macro_rules! walk_counterparty_commitment { + ($txid: expr) => { + if let Some(ref latest_outpoints) = us.counterparty_claimable_outpoints.get($txid) { + for &(ref htlc, ref source_option) in latest_outpoints.iter() { + if let &Some(ref source) = source_option { + res.insert((**source).clone(), htlc.clone()); + } + } + } + } + } + if let Some(ref txid) = us.current_counterparty_commitment_txid { + walk_counterparty_commitment!(txid); + } + if let Some(ref txid) = us.prev_counterparty_commitment_txid { + walk_counterparty_commitment!(txid); + } + } + + res + } } /// Compares a broadcasted commitment transaction's HTLCs with those in the latest state, @@ -1609,9 +1685,9 @@ impl ChannelMonitorImpl { /// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither /// needed by holder commitment transactions HTCLs nor by counterparty ones. Unless we haven't already seen /// counterparty commitment transaction's secret, they are de facto pruned (we can use revocation key). - fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), MonitorUpdateError> { + fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), &'static str> { if let Err(()) = self.commitment_secrets.provide_secret(idx, secret) { - return Err(MonitorUpdateError("Previous secret did not match new one")); + return Err("Previous secret did not match new one"); } // Prune HTLCs from the previous counterparty commitment tx so we don't generate failure/fulfill @@ -1703,7 +1779,7 @@ impl ChannelMonitorImpl { /// is important that any clones of this channel monitor (including remote clones) by kept /// up-to-date as our holder commitment transaction is updated. /// Panics if set_on_holder_tx_csv has never been called. - fn provide_latest_holder_commitment_tx(&mut self, holder_commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>) -> Result<(), MonitorUpdateError> { + fn provide_latest_holder_commitment_tx(&mut self, holder_commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>) -> Result<(), &'static str> { // block for Rust 1.34 compat let mut new_holder_commitment_tx = { let trusted_tx = holder_commitment_tx.trust(); @@ -1726,7 +1802,7 @@ impl ChannelMonitorImpl { mem::swap(&mut new_holder_commitment_tx, &mut self.current_holder_commitment_tx); self.prev_holder_signed_commitment_tx = Some(new_holder_commitment_tx); if self.holder_tx_signed { - return Err(MonitorUpdateError("Latest holder commitment signed has already been signed, update is rejected")); + return Err("Latest holder commitment signed has already been signed, update is rejected"); } Ok(()) } @@ -1787,10 +1863,10 @@ impl ChannelMonitorImpl { log_info!(logger, "Broadcasting local {}", log_tx!(tx)); broadcaster.broadcast_transaction(tx); } - self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0)); + self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0)); } - pub fn update_monitor(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), MonitorUpdateError> + pub fn update_monitor(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), ()> where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -1808,12 +1884,17 @@ impl ChannelMonitorImpl { } else if self.latest_update_id + 1 != updates.update_id { panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!"); } + let mut ret = Ok(()); for update in updates.updates.iter() { match update { ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs } => { log_trace!(logger, "Updating ChannelMonitor with latest holder commitment transaction info"); if self.lockdown_from_offchain { panic!(); } - self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone())? + if let Err(e) = self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone()) { + log_error!(logger, "Providing latest holder commitment transaction failed/was refused:"); + log_error!(logger, " {}", e); + ret = Err(()); + } } ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, htlc_outputs, commitment_number, their_revocation_point } => { log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info"); @@ -1825,7 +1906,11 @@ impl ChannelMonitorImpl { }, ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => { log_trace!(logger, "Updating ChannelMonitor with commitment secret"); - self.provide_secret(*idx, *secret)? + if let Err(e) = self.provide_secret(*idx, *secret) { + log_error!(logger, "Providing latest counterparty commitment secret failed/was refused:"); + log_error!(logger, " {}", e); + ret = Err(()); + } }, ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => { log_trace!(logger, "Updating ChannelMonitor: channel force closed, should broadcast: {}", should_broadcast); @@ -1835,7 +1920,7 @@ impl ChannelMonitorImpl { } else if !self.holder_tx_signed { log_error!(logger, "You have a toxic holder commitment transaction avaible in channel monitor, read comment in ChannelMonitor::get_latest_holder_commitment_txn to be informed of manual action to take"); } else { - // If we generated a MonitorEvent::CommitmentTxBroadcasted, the ChannelManager + // If we generated a MonitorEvent::CommitmentTxConfirmed, the ChannelManager // will still give us a ChannelForceClosed event with !should_broadcast, but we // shouldn't print the scary warning above. log_info!(logger, "Channel off-chain state closed after we broadcasted our latest commitment transaction."); @@ -1850,7 +1935,11 @@ impl ChannelMonitorImpl { } } self.latest_update_id = updates.update_id; - Ok(()) + + if ret.is_ok() && self.funding_spend_seen { + log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); + Err(()) + } else { ret } } pub fn get_latest_update_id(&self) -> u64 { @@ -1952,7 +2041,7 @@ impl ChannelMonitorImpl { tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 { return (claimable_outpoints, (commitment_txid, watch_outputs)); // Corrupted per_commitment_data, fuck this user } - let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone()); + let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), self.onchain_tx_handler.channel_transaction_parameters.opt_anchors.is_some()); let justice_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), htlc.cltv_expiry, true, height); claimable_outpoints.push(justice_package); } @@ -2277,7 +2366,9 @@ impl ChannelMonitorImpl { let prevout = &tx.input[0].previous_output; if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 { let mut balance_spendable_csv = None; - log_info!(logger, "Channel closed by funding output spend in txid {}.", log_bytes!(tx.txid())); + log_info!(logger, "Channel {} closed by funding output spend in txid {}.", + log_bytes!(self.funding_info.0.to_channel_id()), tx.txid()); + self.funding_spend_seen = true; if (tx.input[0].sequence >> 8*3) as u8 == 0x80 && (tx.lock_time >> 8*3) as u8 == 0x20 { let (mut new_outpoints, new_outputs) = self.check_spend_counterparty_transaction(&tx, height, &logger); if !new_outputs.1.is_empty() { @@ -2357,7 +2448,7 @@ impl ChannelMonitorImpl { let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone()); let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), false, self.best_block.height()); claimable_outpoints.push(commitment_package); - self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0)); + self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0)); let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript); self.holder_tx_signed = true; // Because we're broadcasting a commitment transaction, we should construct the package @@ -2615,8 +2706,10 @@ impl ChannelMonitorImpl { let revocation_sig_claim = (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC) && input.witness[1].len() == 33) || (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::AcceptedHTLC) && input.witness[1].len() == 33); let accepted_preimage_claim = input.witness.len() == 5 && HTLCType::scriptlen_to_htlctype(input.witness[4].len()) == Some(HTLCType::AcceptedHTLC); + #[cfg(not(fuzzing))] let accepted_timeout_claim = input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::AcceptedHTLC) && !revocation_sig_claim; let offered_preimage_claim = input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC) && !revocation_sig_claim; + #[cfg(not(fuzzing))] let offered_timeout_claim = input.witness.len() == 5 && HTLCType::scriptlen_to_htlctype(input.witness[4].len()) == Some(HTLCType::OfferedHTLC); let mut payment_preimage = PaymentPreimage([0; 32]); @@ -2875,53 +2968,6 @@ impl ChannelMonitorImpl { } } -/// `Persist` defines behavior for persisting channel monitors: this could mean -/// writing once to disk, and/or uploading to one or more backup services. -/// -/// Note that for every new monitor, you **must** persist the new `ChannelMonitor` -/// to disk/backups. And, on every update, you **must** persist either the -/// `ChannelMonitorUpdate` or the updated monitor itself. Otherwise, there is risk -/// of situations such as revoking a transaction, then crashing before this -/// revocation can be persisted, then unintentionally broadcasting a revoked -/// transaction and losing money. This is a risk because previous channel states -/// are toxic, so it's important that whatever channel state is persisted is -/// kept up-to-date. -pub trait Persist { - /// Persist a new channel's data. The data can be stored any way you want, but - /// the identifier provided by Rust-Lightning is the channel's outpoint (and - /// it is up to you to maintain a correct mapping between the outpoint and the - /// stored channel data). Note that you **must** persist every new monitor to - /// disk. See the `Persist` trait documentation for more details. - /// - /// See [`ChannelMonitor::write`] for writing out a `ChannelMonitor`, - /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors. - fn persist_new_channel(&self, id: OutPoint, data: &ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr>; - - /// Update one channel's data. The provided `ChannelMonitor` has already - /// applied the given update. - /// - /// Note that on every update, you **must** persist either the - /// `ChannelMonitorUpdate` or the updated monitor itself to disk/backups. See - /// the `Persist` trait documentation for more details. - /// - /// If an implementer chooses to persist the updates only, they need to make - /// sure that all the updates are applied to the `ChannelMonitors` *before* - /// the set of channel monitors is given to the `ChannelManager` - /// deserialization routine. See [`ChannelMonitor::update_monitor`] for - /// applying a monitor update to a monitor. If full `ChannelMonitors` are - /// persisted, then there is no need to persist individual updates. - /// - /// Note that there could be a performance tradeoff between persisting complete - /// channel monitors on every update vs. persisting only updates and applying - /// them in batches. The size of each monitor grows `O(number of state updates)` - /// whereas updates are small and `O(1)`. - /// - /// See [`ChannelMonitor::write`] for writing out a `ChannelMonitor`, - /// [`ChannelMonitorUpdate::write`] for writing out an update, and - /// [`ChannelMonitorUpdateErr`] for requirements when returning errors. - fn update_persisted_channel(&self, id: OutPoint, update: &ChannelMonitorUpdate, data: &ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr>; -} - impl chain::Listen for (ChannelMonitor, T, F, L) where T::Target: BroadcasterInterface, @@ -3106,14 +3152,15 @@ impl<'a, Signer: Sign, K: KeysInterface> ReadableArgs<&'a K> } let pending_monitor_events_len: u64 = Readable::read(reader)?; - let mut pending_monitor_events = Vec::with_capacity(cmp::min(pending_monitor_events_len as usize, MAX_ALLOC_SIZE / (32 + 8*3))); + let mut pending_monitor_events = Some( + Vec::with_capacity(cmp::min(pending_monitor_events_len as usize, MAX_ALLOC_SIZE / (32 + 8*3)))); for _ in 0..pending_monitor_events_len { let ev = match ::read(reader)? { 0 => MonitorEvent::HTLCEvent(Readable::read(reader)?), - 1 => MonitorEvent::CommitmentTxBroadcasted(funding_info.0), + 1 => MonitorEvent::CommitmentTxConfirmed(funding_info.0), _ => return Err(DecodeError::InvalidValue) }; - pending_monitor_events.push(ev); + pending_monitor_events.as_mut().unwrap().push(ev); } let pending_events_len: u64 = Readable::read(reader)?; @@ -3171,9 +3218,12 @@ impl<'a, Signer: Sign, K: KeysInterface> ReadableArgs<&'a K> let mut funding_spend_confirmed = None; let mut htlcs_resolved_on_chain = Some(Vec::new()); + let mut funding_spend_seen = Some(false); read_tlv_fields!(reader, { (1, funding_spend_confirmed, option), (3, htlcs_resolved_on_chain, vec_type), + (5, pending_monitor_events, vec_type), + (7, funding_spend_seen, option), }); let mut secp_ctx = Secp256k1::new(); @@ -3213,7 +3263,7 @@ impl<'a, Signer: Sign, K: KeysInterface> ReadableArgs<&'a K> current_holder_commitment_number, payment_preimages, - pending_monitor_events, + pending_monitor_events: pending_monitor_events.unwrap(), pending_events, onchain_events_awaiting_threshold_conf, @@ -3223,6 +3273,7 @@ impl<'a, Signer: Sign, K: KeysInterface> ReadableArgs<&'a K> lockdown_from_offchain, holder_tx_signed, + funding_spend_seen: funding_spend_seen.unwrap(), funding_spend_confirmed, htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(), @@ -3236,6 +3287,7 @@ impl<'a, Signer: Sign, K: KeysInterface> ReadableArgs<&'a K> #[cfg(test)] mod tests { + use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::script::{Script, Builder}; use bitcoin::blockdata::opcodes; use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, SigHashType}; @@ -3244,24 +3296,128 @@ mod tests { use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::hex::FromHex; - use bitcoin::hash_types::Txid; + use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::network::constants::Network; + use bitcoin::secp256k1::key::{SecretKey,PublicKey}; + use bitcoin::secp256k1::Secp256k1; + use hex; - use chain::BestBlock; + + use super::ChannelMonitorUpdateStep; + use ::{check_added_monitors, check_closed_broadcast, check_closed_event, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash, unwrap_send_err}; + use chain::{BestBlock, Confirm}; use chain::channelmonitor::ChannelMonitor; - use chain::package::{WEIGHT_OFFERED_HTLC, WEIGHT_RECEIVED_HTLC, WEIGHT_REVOKED_OFFERED_HTLC, WEIGHT_REVOKED_RECEIVED_HTLC, WEIGHT_REVOKED_OUTPUT}; + use chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT}; use chain::transaction::OutPoint; + use chain::keysinterface::InMemorySigner; use ln::{PaymentPreimage, PaymentHash}; use ln::chan_utils; use ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters}; + use ln::channelmanager::PaymentSendFailure; + use ln::features::InitFeatures; + use ln::functional_test_utils::*; use ln::script::ShutdownScript; + use util::errors::APIError; + use util::events::{ClosureReason, MessageSendEventsProvider}; use util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator}; - use bitcoin::secp256k1::key::{SecretKey,PublicKey}; - use bitcoin::secp256k1::Secp256k1; + use util::ser::{ReadableArgs, Writeable}; use sync::{Arc, Mutex}; - use chain::keysinterface::InMemorySigner; + use io; use prelude::*; + fn do_test_funding_spend_refuses_updates(use_local_txn: bool) { + // Previously, monitor updates were allowed freely even after a funding-spend transaction + // confirmed. This would allow a race condition where we could receive a payment (including + // the counterparty revoking their broadcasted state!) and accept it without recourse as + // long as the ChannelMonitor receives the block first, the full commitment update dance + // occurs after the block is connected, and before the ChannelManager receives the block. + // Obviously this is an incredibly contrived race given the counterparty would be risking + // their full channel balance for it, but its worth fixing nonetheless as it makes the + // potential ChannelMonitor states simpler to reason about. + // + // This test checks said behavior, as well as ensuring a ChannelMonitorUpdate with multiple + // updates is handled correctly in such conditions. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let channel = create_announced_chan_between_nodes( + &nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes( + &nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + + // Rebalance somewhat + send_payment(&nodes[0], &[&nodes[1]], 10_000_000); + + // First route two payments for testing at the end + let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000).0; + let payment_preimage_2 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000).0; + + let local_txn = get_local_commitment_txn!(nodes[1], channel.2); + assert_eq!(local_txn.len(), 1); + let remote_txn = get_local_commitment_txn!(nodes[0], channel.2); + assert_eq!(remote_txn.len(), 3); // Commitment and two HTLC-Timeouts + check_spends!(remote_txn[1], remote_txn[0]); + check_spends!(remote_txn[2], remote_txn[0]); + let broadcast_tx = if use_local_txn { &local_txn[0] } else { &remote_txn[0] }; + + // Connect a commitment transaction, but only to the ChainMonitor/ChannelMonitor. The + // channel is now closed, but the ChannelManager doesn't know that yet. + let new_header = BlockHeader { + version: 2, time: 0, bits: 0, nonce: 0, + prev_blockhash: nodes[0].best_block_info().0, + merkle_root: Default::default() }; + let conf_height = nodes[0].best_block_info().1 + 1; + nodes[1].chain_monitor.chain_monitor.transactions_confirmed(&new_header, + &[(0, broadcast_tx)], conf_height); + + let (_, pre_update_monitor) = <(BlockHash, ChannelMonitor)>::read( + &mut io::Cursor::new(&get_monitor!(nodes[1], channel.2).encode()), + &nodes[1].keys_manager.backing).unwrap(); + + // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass + // the update through to the ChannelMonitor which will refuse it (as the channel is closed). + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 100_000); + unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), + true, APIError::ChannelUnavailable { ref err }, + assert!(err.contains("ChannelMonitor storage failure"))); + check_added_monitors!(nodes[1], 2); // After the failure we generate a close-channel monitor update + check_closed_broadcast!(nodes[1], true); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); + + // Build a new ChannelMonitorUpdate which contains both the failing commitment tx update + // and provides the claim preimages for the two pending HTLCs. The first update generates + // an error, but the point of this test is to ensure the later updates are still applied. + let monitor_updates = nodes[1].chain_monitor.monitor_updates.lock().unwrap(); + let mut replay_update = monitor_updates.get(&channel.2).unwrap().iter().rev().skip(1).next().unwrap().clone(); + assert_eq!(replay_update.updates.len(), 1); + if let ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. } = replay_update.updates[0] { + } else { panic!(); } + replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_1 }); + replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_2 }); + + let broadcaster = TestBroadcaster::new(Arc::clone(&nodes[1].blocks)); + assert!( + pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &&chanmon_cfgs[1].fee_estimator, &nodes[1].logger) + .is_err()); + // Even though we error'd on the first update, we should still have generated an HTLC claim + // transaction + let txn_broadcasted = broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert!(txn_broadcasted.len() >= 2); + let htlc_txn = txn_broadcasted.iter().filter(|tx| { + assert_eq!(tx.input.len(), 1); + tx.input[0].previous_output.txid == broadcast_tx.txid() + }).collect::>(); + assert_eq!(htlc_txn.len(), 2); + check_spends!(htlc_txn[0], broadcast_tx); + check_spends!(htlc_txn[1], broadcast_tx); + } + #[test] + fn test_funding_spend_refuses_updates() { + do_test_funding_spend_refuses_updates(true); + do_test_funding_spend_refuses_updates(false); + } + #[test] fn test_prune_preimages() { let secp_ctx = Secp256k1::new(); @@ -3345,6 +3501,7 @@ mod tests { selected_contest_delay: 67, }), funding_outpoint: Some(funding_outpoint), + opt_anchors: None, }; // Prune with one old state and a holder commitment tx holding a few overlaps with the // old state. @@ -3407,28 +3564,27 @@ mod tests { let secp_ctx = Secp256k1::new(); let privkey = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap(); let pubkey = PublicKey::from_secret_key(&secp_ctx, &privkey); - let mut sum_actual_sigs = 0; macro_rules! sign_input { - ($sighash_parts: expr, $idx: expr, $amount: expr, $weight: expr, $sum_actual_sigs: expr) => { + ($sighash_parts: expr, $idx: expr, $amount: expr, $weight: expr, $sum_actual_sigs: expr, $opt_anchors: expr) => { let htlc = HTLCOutputInCommitment { - offered: if *$weight == WEIGHT_REVOKED_OFFERED_HTLC || *$weight == WEIGHT_OFFERED_HTLC { true } else { false }, + offered: if *$weight == weight_revoked_offered_htlc($opt_anchors) || *$weight == weight_offered_htlc($opt_anchors) { true } else { false }, amount_msat: 0, cltv_expiry: 2 << 16, payment_hash: PaymentHash([1; 32]), transaction_output_index: Some($idx as u32), }; - let redeem_script = if *$weight == WEIGHT_REVOKED_OUTPUT { chan_utils::get_revokeable_redeemscript(&pubkey, 256, &pubkey) } else { chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &pubkey, &pubkey, &pubkey) }; + let redeem_script = if *$weight == WEIGHT_REVOKED_OUTPUT { chan_utils::get_revokeable_redeemscript(&pubkey, 256, &pubkey) } else { chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, $opt_anchors, &pubkey, &pubkey, &pubkey) }; let sighash = hash_to_message!(&$sighash_parts.signature_hash($idx, &redeem_script, $amount, SigHashType::All)[..]); let sig = secp_ctx.sign(&sighash, &privkey); $sighash_parts.access_witness($idx).push(sig.serialize_der().to_vec()); $sighash_parts.access_witness($idx)[0].push(SigHashType::All as u8); - sum_actual_sigs += $sighash_parts.access_witness($idx)[0].len(); + $sum_actual_sigs += $sighash_parts.access_witness($idx)[0].len(); if *$weight == WEIGHT_REVOKED_OUTPUT { $sighash_parts.access_witness($idx).push(vec!(1)); - } else if *$weight == WEIGHT_REVOKED_OFFERED_HTLC || *$weight == WEIGHT_REVOKED_RECEIVED_HTLC { + } else if *$weight == weight_revoked_offered_htlc($opt_anchors) || *$weight == weight_revoked_received_htlc($opt_anchors) { $sighash_parts.access_witness($idx).push(pubkey.clone().serialize().to_vec()); - } else if *$weight == WEIGHT_RECEIVED_HTLC { + } else if *$weight == weight_received_htlc($opt_anchors) { $sighash_parts.access_witness($idx).push(vec![0]); } else { $sighash_parts.access_witness($idx).push(PaymentPreimage([1; 32]).0.to_vec()); @@ -3444,83 +3600,98 @@ mod tests { let txid = Txid::from_hex("56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d").unwrap(); // Justice tx with 1 to_holder, 2 revoked offered HTLCs, 1 revoked received HTLCs - let mut claim_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() }; - for i in 0..4 { - claim_tx.input.push(TxIn { - previous_output: BitcoinOutPoint { - txid, - vout: i, - }, - script_sig: Script::new(), - sequence: 0xfffffffd, - witness: Vec::new(), + for &opt_anchors in [false, true].iter() { + let mut claim_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() }; + let mut sum_actual_sigs = 0; + for i in 0..4 { + claim_tx.input.push(TxIn { + previous_output: BitcoinOutPoint { + txid, + vout: i, + }, + script_sig: Script::new(), + sequence: 0xfffffffd, + witness: Vec::new(), + }); + } + claim_tx.output.push(TxOut { + script_pubkey: script_pubkey.clone(), + value: 0, }); - } - claim_tx.output.push(TxOut { - script_pubkey: script_pubkey.clone(), - value: 0, - }); - let base_weight = claim_tx.get_weight(); - let inputs_weight = vec![WEIGHT_REVOKED_OUTPUT, WEIGHT_REVOKED_OFFERED_HTLC, WEIGHT_REVOKED_OFFERED_HTLC, WEIGHT_REVOKED_RECEIVED_HTLC]; - let mut inputs_total_weight = 2; // count segwit flags - { - let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx); - for (idx, inp) in inputs_weight.iter().enumerate() { - sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs); - inputs_total_weight += inp; + let base_weight = claim_tx.get_weight(); + let inputs_weight = vec![WEIGHT_REVOKED_OUTPUT, weight_revoked_offered_htlc(opt_anchors), weight_revoked_offered_htlc(opt_anchors), weight_revoked_received_htlc(opt_anchors)]; + let mut inputs_total_weight = 2; // count segwit flags + { + let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx); + for (idx, inp) in inputs_weight.iter().enumerate() { + sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs, opt_anchors); + inputs_total_weight += inp; + } } + assert_eq!(base_weight + inputs_total_weight as usize, claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_weight.len() - sum_actual_sigs)); } - assert_eq!(base_weight + inputs_total_weight as usize, claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_weight.len() - sum_actual_sigs)); // Claim tx with 1 offered HTLCs, 3 received HTLCs - claim_tx.input.clear(); - sum_actual_sigs = 0; - for i in 0..4 { + for &opt_anchors in [false, true].iter() { + let mut claim_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() }; + let mut sum_actual_sigs = 0; + for i in 0..4 { + claim_tx.input.push(TxIn { + previous_output: BitcoinOutPoint { + txid, + vout: i, + }, + script_sig: Script::new(), + sequence: 0xfffffffd, + witness: Vec::new(), + }); + } + claim_tx.output.push(TxOut { + script_pubkey: script_pubkey.clone(), + value: 0, + }); + let base_weight = claim_tx.get_weight(); + let inputs_weight = vec![weight_offered_htlc(opt_anchors), weight_received_htlc(opt_anchors), weight_received_htlc(opt_anchors), weight_received_htlc(opt_anchors)]; + let mut inputs_total_weight = 2; // count segwit flags + { + let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx); + for (idx, inp) in inputs_weight.iter().enumerate() { + sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs, opt_anchors); + inputs_total_weight += inp; + } + } + assert_eq!(base_weight + inputs_total_weight as usize, claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_weight.len() - sum_actual_sigs)); + } + + // Justice tx with 1 revoked HTLC-Success tx output + for &opt_anchors in [false, true].iter() { + let mut claim_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() }; + let mut sum_actual_sigs = 0; claim_tx.input.push(TxIn { previous_output: BitcoinOutPoint { txid, - vout: i, + vout: 0, }, script_sig: Script::new(), sequence: 0xfffffffd, witness: Vec::new(), }); - } - let base_weight = claim_tx.get_weight(); - let inputs_weight = vec![WEIGHT_OFFERED_HTLC, WEIGHT_RECEIVED_HTLC, WEIGHT_RECEIVED_HTLC, WEIGHT_RECEIVED_HTLC]; - let mut inputs_total_weight = 2; // count segwit flags - { - let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx); - for (idx, inp) in inputs_weight.iter().enumerate() { - sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs); - inputs_total_weight += inp; - } - } - assert_eq!(base_weight + inputs_total_weight as usize, claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_weight.len() - sum_actual_sigs)); - - // Justice tx with 1 revoked HTLC-Success tx output - claim_tx.input.clear(); - sum_actual_sigs = 0; - claim_tx.input.push(TxIn { - previous_output: BitcoinOutPoint { - txid, - vout: 0, - }, - script_sig: Script::new(), - sequence: 0xfffffffd, - witness: Vec::new(), - }); - let base_weight = claim_tx.get_weight(); - let inputs_weight = vec![WEIGHT_REVOKED_OUTPUT]; - let mut inputs_total_weight = 2; // count segwit flags - { - let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx); - for (idx, inp) in inputs_weight.iter().enumerate() { - sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs); - inputs_total_weight += inp; + claim_tx.output.push(TxOut { + script_pubkey: script_pubkey.clone(), + value: 0, + }); + let base_weight = claim_tx.get_weight(); + let inputs_weight = vec![WEIGHT_REVOKED_OUTPUT]; + let mut inputs_total_weight = 2; // count segwit flags + { + let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx); + for (idx, inp) in inputs_weight.iter().enumerate() { + sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs, opt_anchors); + inputs_total_weight += inp; + } } + assert_eq!(base_weight + inputs_total_weight as usize, claim_tx.get_weight() + /* max_length_isg */ (73 * inputs_weight.len() - sum_actual_sigs)); } - assert_eq!(base_weight + inputs_total_weight as usize, claim_tx.get_weight() + /* max_length_isg */ (73 * inputs_weight.len() - sum_actual_sigs)); } // Further testing is done in the ChannelManager integration tests. diff --git a/lightning/src/chain/keysinterface.rs b/lightning/src/chain/keysinterface.rs index 2da9e575047..2ca57dee225 100644 --- a/lightning/src/chain/keysinterface.rs +++ b/lightning/src/chain/keysinterface.rs @@ -43,6 +43,12 @@ use core::sync::atomic::{AtomicUsize, Ordering}; use io::{self, Error}; use ln::msgs::{DecodeError, MAX_VALUE_MSAT}; +/// Used as initial key material, to be expanded into multiple secret keys (but not to be used +/// directly). This is used within LDK to encrypt/decrypt inbound payment data. +/// (C-not exported) as we just use [u8; 32] directly +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] +pub struct KeyMaterial(pub [u8; 32]); + /// Information about a spendable output to a P2WSH script. See /// SpendableOutputDescriptor::DelayedPaymentOutput for more details on how to spend this. #[derive(Clone, Debug, PartialEq)] @@ -397,6 +403,11 @@ pub trait KeysInterface { /// this trait to parse the invoice and make sure they're signing what they expect, rather than /// blindly signing the hash. fn sign_invoice(&self, invoice_preimage: Vec) -> Result; + + /// Get secret key material as bytes for use in encrypting and decrypting inbound payment data. + /// + /// This method must return the same value each time it is called. + fn get_inbound_payment_key_material(&self) -> KeyMaterial; } #[derive(Clone)] @@ -505,6 +516,12 @@ impl InMemorySigner { self.channel_parameters.as_ref().unwrap() } + /// Whether anchors should be used. + /// Will panic if ready_channel wasn't called. + pub fn opt_anchors(&self) -> bool { + self.get_channel_parameters().opt_anchors.is_some() + } + /// Sign the single input of spend_tx at index `input_idx` which spends the output /// described by descriptor, returning the witness stack for the input. /// @@ -523,6 +540,9 @@ impl InMemorySigner { let witness_script = bitcoin::Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: remotepubkey}, Network::Testnet).script_pubkey(); let sighash = hash_to_message!(&bip143::SigHashCache::new(spend_tx).signature_hash(input_idx, &witness_script, descriptor.output.value, SigHashType::All)[..]); let remotesig = secp_ctx.sign(&sighash, &self.payment_key); + let payment_script = bitcoin::Address::p2wpkh(&::bitcoin::PublicKey{compressed: true, key: remotepubkey}, Network::Bitcoin).unwrap().script_pubkey(); + + if payment_script != descriptor.output.script_pubkey { return Err(()); } let mut witness = Vec::with_capacity(2); witness.push(remotesig.serialize_der().to_vec()); @@ -553,6 +573,9 @@ impl InMemorySigner { let witness_script = chan_utils::get_revokeable_redeemscript(&descriptor.revocation_pubkey, descriptor.to_self_delay, &delayed_payment_pubkey); let sighash = hash_to_message!(&bip143::SigHashCache::new(spend_tx).signature_hash(input_idx, &witness_script, descriptor.output.value, SigHashType::All)[..]); let local_delayedsig = secp_ctx.sign(&sighash, &delayed_payment_key); + let payment_script = bitcoin::Address::p2wsh(&witness_script, Network::Bitcoin).script_pubkey(); + + if descriptor.output.script_pubkey != payment_script { return Err(()); } let mut witness = Vec::with_capacity(3); witness.push(local_delayedsig.serialize_der().to_vec()); @@ -593,9 +616,10 @@ impl BaseSign for InMemorySigner { let mut htlc_sigs = Vec::with_capacity(commitment_tx.htlcs().len()); for htlc in commitment_tx.htlcs() { - let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_tx.feerate_per_kw(), self.holder_selected_contest_delay(), htlc, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); - let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &keys); - let htlc_sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, SigHashType::All)[..]); + let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_tx.feerate_per_kw(), self.holder_selected_contest_delay(), htlc, self.opt_anchors(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key); + let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &keys); + let htlc_sighashtype = if self.opt_anchors() { SigHashType::SinglePlusAnyoneCanPay } else { SigHashType::All }; + let htlc_sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype)[..]); let holder_htlc_key = chan_utils::derive_private_key(&secp_ctx, &keys.per_commitment_point, &self.htlc_base_key).map_err(|_| ())?; htlc_sigs.push(secp_ctx.sign(&htlc_sighash, &holder_htlc_key)); } @@ -648,7 +672,7 @@ impl BaseSign for InMemorySigner { let witness_script = { let counterparty_htlcpubkey = chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &self.counterparty_pubkeys().htlc_basepoint).map_err(|_| ())?; let holder_htlcpubkey = chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &self.pubkeys().htlc_basepoint).map_err(|_| ())?; - chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &counterparty_htlcpubkey, &holder_htlcpubkey, &revocation_pubkey) + chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, self.opt_anchors(), &counterparty_htlcpubkey, &holder_htlcpubkey, &revocation_pubkey) }; let mut sighash_parts = bip143::SigHashCache::new(justice_tx); let sighash = hash_to_message!(&sighash_parts.signature_hash(input, &witness_script, amount, SigHashType::All)[..]); @@ -660,7 +684,7 @@ impl BaseSign for InMemorySigner { let witness_script = if let Ok(revocation_pubkey) = chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &self.pubkeys().revocation_basepoint) { if let Ok(counterparty_htlcpubkey) = chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &self.counterparty_pubkeys().htlc_basepoint) { if let Ok(htlcpubkey) = chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &self.pubkeys().htlc_basepoint) { - chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &counterparty_htlcpubkey, &htlcpubkey, &revocation_pubkey) + chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, self.opt_anchors(), &counterparty_htlcpubkey, &htlcpubkey, &revocation_pubkey) } else { return Err(()) } } else { return Err(()) } } else { return Err(()) }; @@ -760,6 +784,7 @@ impl Readable for InMemorySigner { pub struct KeysManager { secp_ctx: Secp256k1, node_secret: SecretKey, + inbound_payment_key: KeyMaterial, destination_script: Script, shutdown_pubkey: PublicKey, channel_master_key: ExtendedPrivKey, @@ -815,6 +840,9 @@ impl KeysManager { }; let channel_master_key = master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(3).unwrap()).expect("Your RNG is busted"); let rand_bytes_master_key = master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(4).unwrap()).expect("Your RNG is busted"); + let inbound_payment_key: SecretKey = master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(5).unwrap()).expect("Your RNG is busted").private_key.key; + let mut inbound_pmt_key_bytes = [0; 32]; + inbound_pmt_key_bytes.copy_from_slice(&inbound_payment_key[..]); let mut rand_bytes_unique_start = Sha256::engine(); rand_bytes_unique_start.input(&byte_utils::be64_to_array(starting_time_secs)); @@ -824,6 +852,7 @@ impl KeysManager { let mut res = KeysManager { secp_ctx, node_secret, + inbound_payment_key: KeyMaterial(inbound_pmt_key_bytes), destination_script, shutdown_pubkey, @@ -960,7 +989,8 @@ impl KeysManager { input, output: outputs, }; - transaction_utils::maybe_add_change_output(&mut spend_tx, input_value, witness_weight, feerate_sat_per_1000_weight, change_destination_script)?; + let expected_max_weight = + transaction_utils::maybe_add_change_output(&mut spend_tx, input_value, witness_weight, feerate_sat_per_1000_weight, change_destination_script)?; let mut keys_cache: Option<(InMemorySigner, [u8; 32])> = None; let mut input_idx = 0; @@ -1005,6 +1035,8 @@ impl KeysManager { assert_eq!(pubkey.key, self.shutdown_pubkey); } let witness_script = bitcoin::Address::p2pkh(&pubkey, Network::Testnet).script_pubkey(); + let payment_script = bitcoin::Address::p2wpkh(&pubkey, Network::Testnet).expect("uncompressed key found").script_pubkey(); + assert_eq!(payment_script, output.script_pubkey); let sighash = hash_to_message!(&bip143::SigHashCache::new(&spend_tx).signature_hash(input_idx, &witness_script, output.value, SigHashType::All)[..]); let sig = secp_ctx.sign(&sighash, &secret.private_key.key); spend_tx.input[input_idx].witness.push(sig.serialize_der().to_vec()); @@ -1014,6 +1046,12 @@ impl KeysManager { } input_idx += 1; } + + debug_assert!(expected_max_weight >= spend_tx.get_weight()); + // Note that witnesses with a signature vary somewhat in size, so allow + // `expected_max_weight` to overshoot by up to 3 bytes per input. + debug_assert!(expected_max_weight <= spend_tx.get_weight() + descriptors.len() * 3); + Ok(spend_tx) } } @@ -1025,6 +1063,10 @@ impl KeysInterface for KeysManager { self.node_secret.clone() } + fn get_inbound_payment_key_material(&self) -> KeyMaterial { + self.inbound_payment_key.clone() + } + fn get_destination_script(&self) -> Script { self.destination_script.clone() } diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index cec09459233..25e5a97d288 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -16,7 +16,7 @@ use bitcoin::blockdata::transaction::{Transaction, TxOut}; use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::network::constants::Network; -use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, MonitorEvent}; +use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent}; use chain::keysinterface::Sign; use chain::transaction::{OutPoint, TransactionData}; @@ -175,6 +175,71 @@ pub trait Confirm { fn get_relevant_txids(&self) -> Vec; } +/// An error enum representing a failure to persist a channel monitor update. +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum ChannelMonitorUpdateErr { + /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of + /// our state failed, but is expected to succeed at some point in the future). + /// + /// Such a failure will "freeze" a channel, preventing us from revoking old states or + /// submitting new commitment transactions to the counterparty. Once the update(s) that failed + /// have been successfully applied, a [`MonitorEvent::UpdateCompleted`] event should be returned + /// via [`Watch::release_pending_monitor_events`] which will then restore the channel to an + /// operational state. + /// + /// Note that a given ChannelManager will *never* re-generate a given ChannelMonitorUpdate. If + /// you return a TemporaryFailure you must ensure that it is written to disk safely before + /// writing out the latest ChannelManager state. + /// + /// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur + /// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting + /// to claim it on this channel) and those updates must be applied wherever they can be. At + /// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should + /// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to + /// the channel which would invalidate previous ChannelMonitors are not made when a channel has + /// been "frozen". + /// + /// Note that even if updates made after TemporaryFailure succeed you must still provide a + /// [`MonitorEvent::UpdateCompleted`] to ensure you have the latest monitor and re-enable + /// normal channel operation. Note that this is normally generated through a call to + /// [`ChainMonitor::channel_monitor_updated`]. + /// + /// Note that the update being processed here will not be replayed for you when you return a + /// [`MonitorEvent::UpdateCompleted`] event via [`Watch::release_pending_monitor_events`], so + /// you must store the update itself on your own local disk prior to returning a + /// TemporaryFailure. You may, of course, employ a journaling approach, storing only the + /// ChannelMonitorUpdate on disk without updating the monitor itself, replaying the journal at + /// reload-time. + /// + /// For deployments where a copy of ChannelMonitors and other local state are backed up in a + /// remote location (with local copies persisted immediately), it is anticipated that all + /// updates will return TemporaryFailure until the remote copies could be updated. + /// + /// [`ChainMonitor::channel_monitor_updated`]: chainmonitor::ChainMonitor::channel_monitor_updated + TemporaryFailure, + /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a + /// different watchtower and cannot update with all watchtowers that were previously informed + /// of this channel). + /// + /// At reception of this error, ChannelManager will force-close the channel and return at + /// least a final ChannelMonitorUpdate::ChannelForceClosed which must be delivered to at + /// least one ChannelMonitor copy. Revocation secret MUST NOT be released and offchain channel + /// update must be rejected. + /// + /// This failure may also signal a failure to update the local persisted copy of one of + /// the channel monitor instance. + /// + /// Note that even when you fail a holder commitment transaction update, you must store the + /// update to ensure you can claim from it in case of a duplicate copy of this ChannelMonitor + /// broadcasts it (e.g distributed channel-monitor deployment) + /// + /// In case of distributed watchtowers deployment, the new version must be written to disk, as + /// state may have been stored but rejected due to a block forcing a commitment broadcast. This + /// storage is used to claim outputs of rejected state confirmed onchain by another watchtower, + /// lagging behind on block processing. + PermanentFailure, +} + /// The `Watch` trait defines behavior for watching on-chain activity pertaining to channels as /// blocks are connected and disconnected. /// @@ -193,9 +258,7 @@ pub trait Confirm { /// funds in the channel. See [`ChannelMonitorUpdateErr`] for more details about how to handle /// multiple instances. /// -/// [`ChannelMonitor`]: channelmonitor::ChannelMonitor -/// [`ChannelMonitorUpdateErr`]: channelmonitor::ChannelMonitorUpdateErr -/// [`PermanentFailure`]: channelmonitor::ChannelMonitorUpdateErr::PermanentFailure +/// [`PermanentFailure`]: ChannelMonitorUpdateErr::PermanentFailure pub trait Watch { /// Watches a channel identified by `funding_txo` using `monitor`. /// @@ -203,6 +266,9 @@ pub trait Watch { /// with any spends of outputs returned by [`get_outputs_to_watch`]. In practice, this means /// calling [`block_connected`] and [`block_disconnected`] on the monitor. /// + /// Note: this interface MUST error with `ChannelMonitorUpdateErr::PermanentFailure` if + /// the given `funding_txo` has previously been registered via `watch_channel`. + /// /// [`get_outputs_to_watch`]: channelmonitor::ChannelMonitor::get_outputs_to_watch /// [`block_connected`]: channelmonitor::ChannelMonitor::block_connected /// [`block_disconnected`]: channelmonitor::ChannelMonitor::block_disconnected @@ -214,11 +280,17 @@ pub trait Watch { /// [`ChannelMonitorUpdateErr`] for invariants around returning an error. /// /// [`update_monitor`]: channelmonitor::ChannelMonitor::update_monitor - /// [`ChannelMonitorUpdateErr`]: channelmonitor::ChannelMonitorUpdateErr fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>; /// Returns any monitor events since the last call. Subsequent calls must only return new /// events. + /// + /// Note that after any block- or transaction-connection calls to a [`ChannelMonitor`], no + /// further events may be returned here until the [`ChannelMonitor`] has been fully persisted + /// to disk. + /// + /// For details on asynchronous [`ChannelMonitor`] updating and returning + /// [`MonitorEvent::UpdateCompleted`] here, see [`ChannelMonitorUpdateErr::TemporaryFailure`]. fn release_pending_monitor_events(&self) -> Vec; } @@ -239,7 +311,7 @@ pub trait Watch { /// processed later. Then, in order to block until the data has been processed, any [`Watch`] /// invocation that has called the `Filter` must return [`TemporaryFailure`]. /// -/// [`TemporaryFailure`]: channelmonitor::ChannelMonitorUpdateErr::TemporaryFailure +/// [`TemporaryFailure`]: ChannelMonitorUpdateErr::TemporaryFailure /// [BIP 157]: https://github.com/bitcoin/bips/blob/master/bip-0157.mediawiki /// [BIP 158]: https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki pub trait Filter { diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index b4f5438adfb..ee6dc9c5c0b 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -387,8 +387,9 @@ impl OnchainTxHandler { // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it). let new_timer = Some(cached_request.get_height_timer(cur_height)); if cached_request.is_malleable() { - let predicted_weight = cached_request.package_weight(&self.destination_script); - if let Some((output_value, new_feerate)) = cached_request.compute_package_output(predicted_weight, fee_estimator, logger) { + let predicted_weight = cached_request.package_weight(&self.destination_script, self.channel_transaction_parameters.opt_anchors.is_some()); + if let Some((output_value, new_feerate)) = + cached_request.compute_package_output(predicted_weight, self.destination_script.dust_value().as_sat(), fee_estimator, logger) { assert!(new_feerate != 0); let transaction = cached_request.finalize_package(self, output_value, self.destination_script.clone(), logger).unwrap(); @@ -785,6 +786,10 @@ impl OnchainTxHandler { htlc_tx } + pub(crate) fn opt_anchors(&self) -> bool { + self.channel_transaction_parameters.opt_anchors.is_some() + } + #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))] pub(crate) fn unsafe_get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option) -> Option { let latest_had_sigs = self.holder_htlc_sigs.is_some(); diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index a86add4b9e5..1cc63a8c860 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -40,14 +40,34 @@ use core::ops::Deref; const MAX_ALLOC_SIZE: usize = 64*1024; -// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script -pub(crate) const WEIGHT_REVOKED_OFFERED_HTLC: u64 = 1 + 1 + 73 + 1 + 33 + 1 + 133; -// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script -pub(crate) const WEIGHT_REVOKED_RECEIVED_HTLC: u64 = 1 + 1 + 73 + 1 + 33 + 1 + 139; -// number_of_witness_elements + sig_length + counterpartyhtlc_sig + preimage_length + preimage + witness_script_length + witness_script -pub(crate) const WEIGHT_OFFERED_HTLC: u64 = 1 + 1 + 73 + 1 + 32 + 1 + 133; -// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script -pub(crate) const WEIGHT_RECEIVED_HTLC: u64 = 1 + 1 + 73 + 1 + 1 + 1 + 139; +pub(crate) fn weight_revoked_offered_htlc(opt_anchors: bool) -> u64 { + // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script + const WEIGHT_REVOKED_OFFERED_HTLC: u64 = 1 + 1 + 73 + 1 + 33 + 1 + 133; + const WEIGHT_REVOKED_OFFERED_HTLC_ANCHORS: u64 = WEIGHT_REVOKED_OFFERED_HTLC + 3; // + OP_1 + OP_CSV + OP_DROP + if opt_anchors { WEIGHT_REVOKED_OFFERED_HTLC_ANCHORS } else { WEIGHT_REVOKED_OFFERED_HTLC } +} + +pub(crate) fn weight_revoked_received_htlc(opt_anchors: bool) -> u64 { + // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script + const WEIGHT_REVOKED_RECEIVED_HTLC: u64 = 1 + 1 + 73 + 1 + 33 + 1 + 139; + const WEIGHT_REVOKED_RECEIVED_HTLC_ANCHORS: u64 = WEIGHT_REVOKED_RECEIVED_HTLC + 3; // + OP_1 + OP_CSV + OP_DROP + if opt_anchors { WEIGHT_REVOKED_RECEIVED_HTLC_ANCHORS } else { WEIGHT_REVOKED_RECEIVED_HTLC } +} + +pub(crate) fn weight_offered_htlc(opt_anchors: bool) -> u64 { + // number_of_witness_elements + sig_length + counterpartyhtlc_sig + preimage_length + preimage + witness_script_length + witness_script + const WEIGHT_OFFERED_HTLC: u64 = 1 + 1 + 73 + 1 + 32 + 1 + 133; + const WEIGHT_OFFERED_HTLC_ANCHORS: u64 = WEIGHT_OFFERED_HTLC + 3; // + OP_1 + OP_CSV + OP_DROP + if opt_anchors { WEIGHT_OFFERED_HTLC_ANCHORS } else { WEIGHT_OFFERED_HTLC } +} + +pub(crate) fn weight_received_htlc(opt_anchors: bool) -> u64 { + // number_of_witness_elements + sig_length + counterpartyhtlc_sig + empty_vec_length + empty_vec + witness_script_length + witness_script + const WEIGHT_RECEIVED_HTLC: u64 = 1 + 1 + 73 + 1 + 1 + 1 + 139; + const WEIGHT_RECEIVED_HTLC_ANCHORS: u64 = WEIGHT_RECEIVED_HTLC + 3; // + OP_1 + OP_CSV + OP_DROP + if opt_anchors { WEIGHT_RECEIVED_HTLC_ANCHORS } else { WEIGHT_RECEIVED_HTLC } +} + // number_of_witness_elements + sig_length + revocation_sig + true_length + op_true + witness_script_length + witness_script pub(crate) const WEIGHT_REVOKED_OUTPUT: u64 = 1 + 1 + 73 + 1 + 1 + 1 + 77; @@ -118,8 +138,8 @@ pub(crate) struct RevokedHTLCOutput { } impl RevokedHTLCOutput { - pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, per_commitment_key: SecretKey, amount: u64, htlc: HTLCOutputInCommitment) -> Self { - let weight = if htlc.offered { WEIGHT_REVOKED_OFFERED_HTLC } else { WEIGHT_REVOKED_RECEIVED_HTLC }; + pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, per_commitment_key: SecretKey, amount: u64, htlc: HTLCOutputInCommitment, opt_anchors: bool) -> Self { + let weight = if htlc.offered { weight_revoked_offered_htlc(opt_anchors) } else { weight_revoked_received_htlc(opt_anchors) }; RevokedHTLCOutput { per_commitment_point, counterparty_delayed_payment_base_key, @@ -292,12 +312,12 @@ impl PackageSolvingData { }; amt } - fn weight(&self) -> usize { + fn weight(&self, opt_anchors: bool) -> usize { let weight = match self { PackageSolvingData::RevokedOutput(ref outp) => { outp.weight as usize }, PackageSolvingData::RevokedHTLCOutput(ref outp) => { outp.weight as usize }, - PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { WEIGHT_OFFERED_HTLC as usize }, - PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { WEIGHT_RECEIVED_HTLC as usize }, + PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { weight_offered_htlc(opt_anchors) as usize }, + PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { weight_received_htlc(opt_anchors) as usize }, // Note: Currently, weights of holder outputs spending witnesses aren't used // as we can't malleate spending package to increase their feerate. This // should change with the remaining anchor output patchset. @@ -341,7 +361,7 @@ impl PackageSolvingData { }, PackageSolvingData::RevokedHTLCOutput(ref outp) => { if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) { - let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key); + let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, onchain_handler.opt_anchors(), &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key); //TODO: should we panic on signer failure ? if let Ok(sig) = onchain_handler.signer.sign_justice_revoked_htlc(&bumped_tx, i, outp.amount, &outp.per_commitment_key, &outp.htlc, &onchain_handler.secp_ctx) { bumped_tx.input[i].witness.push(sig.serialize_der().to_vec()); @@ -353,7 +373,7 @@ impl PackageSolvingData { }, PackageSolvingData::CounterpartyOfferedHTLCOutput(ref outp) => { if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) { - let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key); + let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, onchain_handler.opt_anchors(), &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key); if let Ok(sig) = onchain_handler.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &outp.htlc.amount_msat / 1000, &outp.per_commitment_point, &outp.htlc, &onchain_handler.secp_ctx) { bumped_tx.input[i].witness.push(sig.serialize_der().to_vec()); @@ -365,7 +385,7 @@ impl PackageSolvingData { }, PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => { if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) { - let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key); + let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, onchain_handler.opt_anchors(), &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key); bumped_tx.lock_time = outp.htlc.cltv_expiry; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation if let Ok(sig) = onchain_handler.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &outp.htlc.amount_msat / 1000, &outp.per_commitment_point, &outp.htlc, &onchain_handler.secp_ctx) { @@ -567,13 +587,13 @@ impl PackageTemplate { self.inputs.iter().map(|(_, outp)| outp.absolute_tx_timelock(self.height_original)) .max().expect("There must always be at least one output to spend in a PackageTemplate") } - pub(crate) fn package_weight(&self, destination_script: &Script) -> usize { + pub(crate) fn package_weight(&self, destination_script: &Script, opt_anchors: bool) -> usize { let mut inputs_weight = 0; let mut witnesses_weight = 2; // count segwit flags for (_, outp) in self.inputs.iter() { // previous_out_point: 36 bytes ; var_int: 1 byte ; sequence: 4 bytes inputs_weight += 41 * WITNESS_SCALE_FACTOR; - witnesses_weight += outp.weight(); + witnesses_weight += outp.weight(opt_anchors); } // version: 4 bytes ; count_tx_in: 1 byte ; count_tx_out: 1 byte ; lock_time: 4 bytes let transaction_weight = 10 * WITNESS_SCALE_FACTOR; @@ -636,26 +656,25 @@ impl PackageTemplate { } current_height + LOW_FREQUENCY_BUMP_INTERVAL } - /// Returns value in satoshis to be included as package outgoing output amount and feerate with which package finalization should be done. - pub(crate) fn compute_package_output(&self, predicted_weight: usize, fee_estimator: &F, logger: &L) -> Option<(u64, u64)> + + /// Returns value in satoshis to be included as package outgoing output amount and feerate + /// which was used to generate the value. Will not return less than `dust_limit_sats` for the + /// value. + pub(crate) fn compute_package_output(&self, predicted_weight: usize, dust_limit_sats: u64, fee_estimator: &F, logger: &L) -> Option<(u64, u64)> where F::Target: FeeEstimator, L::Target: Logger, { debug_assert!(self.malleability == PackageMalleability::Malleable, "The package output is fixed for non-malleable packages"); let input_amounts = self.package_amount(); + assert!(dust_limit_sats as i64 > 0, "Output script must be broadcastable/have a 'real' dust limit."); // If old feerate is 0, first iteration of this claim, use normal fee calculation if self.feerate_previous != 0 { if let Some((new_fee, feerate)) = feerate_bump(predicted_weight, input_amounts, self.feerate_previous, fee_estimator, logger) { - // If new computed fee is superior at the whole claimable amount burn all in fees - if new_fee > input_amounts { - return Some((0, feerate)); - } else { - return Some((input_amounts - new_fee, feerate)); - } + return Some((cmp::max(input_amounts as i64 - new_fee as i64, dust_limit_sats as i64) as u64, feerate)); } } else { if let Some((new_fee, feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) { - return Some((input_amounts - new_fee, feerate)); + return Some((cmp::max(input_amounts as i64 - new_fee as i64, dust_limit_sats as i64) as u64, feerate)); } } None @@ -822,7 +841,7 @@ fn feerate_bump(predicted_weight: usize, input_amounts: u64, #[cfg(test)] mod tests { - use chain::package::{CounterpartyReceivedHTLCOutput, HolderHTLCOutput, PackageTemplate, PackageSolvingData, RevokedOutput, WEIGHT_REVOKED_OUTPUT}; + use chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderHTLCOutput, PackageTemplate, PackageSolvingData, RevokedOutput, WEIGHT_REVOKED_OUTPUT, weight_offered_htlc, weight_received_htlc}; use chain::Txid; use ln::chan_utils::HTLCOutputInCommitment; use ln::{PaymentPreimage, PaymentHash}; @@ -858,6 +877,19 @@ mod tests { } } + macro_rules! dumb_counterparty_offered_output { + ($secp_ctx: expr, $amt: expr) => { + { + let dumb_scalar = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap(); + let dumb_point = PublicKey::from_secret_key(&$secp_ctx, &dumb_scalar); + let hash = PaymentHash([1; 32]); + let preimage = PaymentPreimage([2;32]); + let htlc = HTLCOutputInCommitment { offered: false, amount_msat: $amt, cltv_expiry: 1000, payment_hash: hash, transaction_output_index: None }; + PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput::build(dumb_point, dumb_point, dumb_point, preimage, htlc)) + } + } + } + macro_rules! dumb_htlc_output { () => { { @@ -1022,11 +1054,32 @@ mod tests { fn test_package_weight() { let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap(); let secp_ctx = Secp256k1::new(); - let revk_outp = dumb_revk_output!(secp_ctx); - let package = PackageTemplate::build_package(txid, 0, revk_outp, 0, true, 100); - // (nVersion (4) + nLocktime (4) + count_tx_in (1) + prevout (36) + sequence (4) + script_length (1) + count_tx_out (1) + value (8) + var_int (1)) * WITNESS_SCALE_FACTOR - // + witness marker (2) + WEIGHT_REVOKED_OUTPUT - assert_eq!(package.package_weight(&Script::new()), (4 + 4 + 1 + 36 + 4 + 1 + 1 + 8 + 1) * WITNESS_SCALE_FACTOR + 2 + WEIGHT_REVOKED_OUTPUT as usize); + // (nVersion (4) + nLocktime (4) + count_tx_in (1) + prevout (36) + sequence (4) + script_length (1) + count_tx_out (1) + value (8) + var_int (1)) * WITNESS_SCALE_FACTOR + witness marker (2) + let weight_sans_output = (4 + 4 + 1 + 36 + 4 + 1 + 1 + 8 + 1) * WITNESS_SCALE_FACTOR + 2; + + { + let revk_outp = dumb_revk_output!(secp_ctx); + let package = PackageTemplate::build_package(txid, 0, revk_outp, 0, true, 100); + for &opt_anchors in [false, true].iter() { + assert_eq!(package.package_weight(&Script::new(), opt_anchors), weight_sans_output + WEIGHT_REVOKED_OUTPUT as usize); + } + } + + { + let counterparty_outp = dumb_counterparty_output!(secp_ctx, 1_000_000); + let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100); + for &opt_anchors in [false, true].iter() { + assert_eq!(package.package_weight(&Script::new(), opt_anchors), weight_sans_output + weight_received_htlc(opt_anchors) as usize); + } + } + + { + let counterparty_outp = dumb_counterparty_offered_output!(secp_ctx, 1_000_000); + let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100); + for &opt_anchors in [false, true].iter() { + assert_eq!(package.package_weight(&Script::new(), opt_anchors), weight_sans_output + weight_offered_htlc(opt_anchors) as usize); + } + } } } diff --git a/lightning/src/debug_sync.rs b/lightning/src/debug_sync.rs new file mode 100644 index 00000000000..7ee5ee521bc --- /dev/null +++ b/lightning/src/debug_sync.rs @@ -0,0 +1,213 @@ +pub use ::alloc::sync::Arc; +use core::ops::{Deref, DerefMut}; +use core::time::Duration; + +use std::collections::HashSet; +use std::cell::RefCell; + +use std::sync::atomic::{AtomicUsize, Ordering}; + +use std::sync::Mutex as StdMutex; +use std::sync::MutexGuard as StdMutexGuard; +use std::sync::RwLock as StdRwLock; +use std::sync::RwLockReadGuard as StdRwLockReadGuard; +use std::sync::RwLockWriteGuard as StdRwLockWriteGuard; +use std::sync::Condvar as StdCondvar; + +#[cfg(feature = "backtrace")] +use backtrace::Backtrace; + +pub type LockResult = Result; + +pub struct Condvar { + inner: StdCondvar, +} + +impl Condvar { + pub fn new() -> Condvar { + Condvar { inner: StdCondvar::new() } + } + + pub fn wait<'a, T>(&'a self, guard: MutexGuard<'a, T>) -> LockResult> { + let mutex: &'a Mutex = guard.mutex; + self.inner.wait(guard.into_inner()).map(|lock| MutexGuard { mutex, lock }).map_err(|_| ()) + } + + #[allow(unused)] + pub fn wait_timeout<'a, T>(&'a self, guard: MutexGuard<'a, T>, dur: Duration) -> LockResult<(MutexGuard<'a, T>, ())> { + let mutex = guard.mutex; + self.inner.wait_timeout(guard.into_inner(), dur).map(|(lock, _)| (MutexGuard { mutex, lock }, ())).map_err(|_| ()) + } + + pub fn notify_all(&self) { self.inner.notify_all(); } +} + +thread_local! { + /// We track the set of locks currently held by a reference to their `MutexMetadata` + static MUTEXES_HELD: RefCell>> = RefCell::new(HashSet::new()); +} +static MUTEX_IDX: AtomicUsize = AtomicUsize::new(0); + +/// Metadata about a single mutex, by id, the set of things locked-before it, and the backtrace of +/// when the Mutex itself was constructed. +struct MutexMetadata { + mutex_idx: u64, + locked_before: StdMutex>>, + #[cfg(feature = "backtrace")] + mutex_construction_bt: Backtrace, +} +impl PartialEq for MutexMetadata { + fn eq(&self, o: &MutexMetadata) -> bool { self.mutex_idx == o.mutex_idx } +} +impl Eq for MutexMetadata {} +impl std::hash::Hash for MutexMetadata { + fn hash(&self, hasher: &mut H) { hasher.write_u64(self.mutex_idx); } +} + +pub struct Mutex { + inner: StdMutex, + deps: Arc, +} + +#[must_use = "if unused the Mutex will immediately unlock"] +pub struct MutexGuard<'a, T: Sized + 'a> { + mutex: &'a Mutex, + lock: StdMutexGuard<'a, T>, +} + +impl<'a, T: Sized> MutexGuard<'a, T> { + fn into_inner(self) -> StdMutexGuard<'a, T> { + // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509. + unsafe { + let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock); + std::mem::forget(self); + v + } + } +} + +impl Drop for MutexGuard<'_, T> { + fn drop(&mut self) { + MUTEXES_HELD.with(|held| { + held.borrow_mut().remove(&self.mutex.deps); + }); + } +} + +impl Deref for MutexGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + &self.lock.deref() + } +} + +impl DerefMut for MutexGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + self.lock.deref_mut() + } +} + +impl Mutex { + pub fn new(inner: T) -> Mutex { + Mutex { + inner: StdMutex::new(inner), + deps: Arc::new(MutexMetadata { + locked_before: StdMutex::new(HashSet::new()), + mutex_idx: MUTEX_IDX.fetch_add(1, Ordering::Relaxed) as u64, + #[cfg(feature = "backtrace")] + mutex_construction_bt: Backtrace::new(), + }), + } + } + + pub fn lock<'a>(&'a self) -> LockResult> { + MUTEXES_HELD.with(|held| { + // For each mutex which is currently locked, check that no mutex's locked-before + // set includes the mutex we're about to lock, which would imply a lockorder + // inversion. + for locked in held.borrow().iter() { + for locked_dep in locked.locked_before.lock().unwrap().iter() { + if *locked_dep == self.deps { + #[cfg(feature = "backtrace")] + panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n{:?}", locked.mutex_construction_bt); + #[cfg(not(feature = "backtrace"))] + panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info."); + } + } + // Insert any already-held mutexes in our locked-before set. + self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked)); + } + held.borrow_mut().insert(Arc::clone(&self.deps)); + }); + self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()) + } + + pub fn try_lock<'a>(&'a self) -> LockResult> { + let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()); + if res.is_ok() { + MUTEXES_HELD.with(|held| { + // Since a try-lock will simply fail if the lock is held already, we do not + // consider try-locks to ever generate lockorder inversions. However, if a try-lock + // succeeds, we do consider it to have created lockorder dependencies. + for locked in held.borrow().iter() { + self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked)); + } + held.borrow_mut().insert(Arc::clone(&self.deps)); + }); + } + res + } +} + +pub struct RwLock { + inner: StdRwLock +} + +pub struct RwLockReadGuard<'a, T: ?Sized + 'a> { + lock: StdRwLockReadGuard<'a, T>, +} + +pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> { + lock: StdRwLockWriteGuard<'a, T>, +} + +impl Deref for RwLockReadGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + &self.lock.deref() + } +} + +impl Deref for RwLockWriteGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + &self.lock.deref() + } +} + +impl DerefMut for RwLockWriteGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + self.lock.deref_mut() + } +} + +impl RwLock { + pub fn new(inner: T) -> RwLock { + RwLock { inner: StdRwLock::new(inner) } + } + + pub fn read<'a>(&'a self) -> LockResult> { + self.inner.read().map(|lock| RwLockReadGuard { lock }).map_err(|_| ()) + } + + pub fn write<'a>(&'a self) -> LockResult> { + self.inner.write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ()) + } + + pub fn try_write<'a>(&'a self) -> LockResult> { + self.inner.try_write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ()) + } +} diff --git a/lightning/src/lib.rs b/lightning/src/lib.rs index e6ecd1f3563..ca18d7bb5cc 100644 --- a/lightning/src/lib.rs +++ b/lightning/src/lib.rs @@ -18,7 +18,7 @@ //! generated/etc. This makes it a good candidate for tight integration into an existing wallet //! instead of having a rather-separate lightning appendage to a wallet. -#![cfg_attr(not(any(feature = "fuzztarget", feature = "_test_utils")), deny(missing_docs))] +#![cfg_attr(not(any(test, feature = "fuzztarget", feature = "_test_utils")), deny(missing_docs))] #![cfg_attr(not(any(test, feature = "fuzztarget", feature = "_test_utils")), forbid(unsafe_code))] #![deny(broken_intra_doc_links)] @@ -143,8 +143,16 @@ mod prelude { pub use alloc::string::ToString; } +#[cfg(all(feature = "std", test))] +mod debug_sync; +#[cfg(all(feature = "backtrace", feature = "std", test))] +extern crate backtrace; + #[cfg(feature = "std")] mod sync { + #[cfg(test)] + pub use debug_sync::*; + #[cfg(not(test))] pub use ::std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard}; } diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index 994e1c276c4..8f66de53555 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -42,8 +42,21 @@ use chain; pub(crate) const MAX_HTLCS: u16 = 483; -pub(super) const HTLC_SUCCESS_TX_WEIGHT: u64 = 703; -pub(super) const HTLC_TIMEOUT_TX_WEIGHT: u64 = 663; +/// Gets the weight for an HTLC-Success transaction. +#[inline] +pub fn htlc_success_tx_weight(opt_anchors: bool) -> u64 { + const HTLC_SUCCESS_TX_WEIGHT: u64 = 703; + const HTLC_SUCCESS_ANCHOR_TX_WEIGHT: u64 = 706; + if opt_anchors { HTLC_SUCCESS_ANCHOR_TX_WEIGHT } else { HTLC_SUCCESS_TX_WEIGHT } +} + +/// Gets the weight for an HTLC-Timeout transaction. +#[inline] +pub fn htlc_timeout_tx_weight(opt_anchors: bool) -> u64 { + const HTLC_TIMEOUT_TX_WEIGHT: u64 = 663; + const HTLC_TIMEOUT_ANCHOR_TX_WEIGHT: u64 = 666; + if opt_anchors { HTLC_TIMEOUT_ANCHOR_TX_WEIGHT } else { HTLC_TIMEOUT_TX_WEIGHT } +} #[derive(PartialEq)] pub(crate) enum HTLCType { @@ -482,10 +495,10 @@ impl_writeable_tlv_based!(HTLCOutputInCommitment, { }); #[inline] -pub(crate) fn get_htlc_redeemscript_with_explicit_keys(htlc: &HTLCOutputInCommitment, broadcaster_htlc_key: &PublicKey, countersignatory_htlc_key: &PublicKey, revocation_key: &PublicKey) -> Script { +pub(crate) fn get_htlc_redeemscript_with_explicit_keys(htlc: &HTLCOutputInCommitment, opt_anchors: bool, broadcaster_htlc_key: &PublicKey, countersignatory_htlc_key: &PublicKey, revocation_key: &PublicKey) -> Script { let payment_hash160 = Ripemd160::hash(&htlc.payment_hash.0[..]).into_inner(); if htlc.offered { - Builder::new().push_opcode(opcodes::all::OP_DUP) + let mut bldr = Builder::new().push_opcode(opcodes::all::OP_DUP) .push_opcode(opcodes::all::OP_HASH160) .push_slice(&PubkeyHash::hash(&revocation_key.serialize())[..]) .push_opcode(opcodes::all::OP_EQUAL) @@ -509,11 +522,16 @@ pub(crate) fn get_htlc_redeemscript_with_explicit_keys(htlc: &HTLCOutputInCommit .push_slice(&payment_hash160) .push_opcode(opcodes::all::OP_EQUALVERIFY) .push_opcode(opcodes::all::OP_CHECKSIG) - .push_opcode(opcodes::all::OP_ENDIF) - .push_opcode(opcodes::all::OP_ENDIF) - .into_script() + .push_opcode(opcodes::all::OP_ENDIF); + if opt_anchors { + bldr = bldr.push_opcode(opcodes::all::OP_PUSHNUM_1) + .push_opcode(opcodes::all::OP_CSV) + .push_opcode(opcodes::all::OP_DROP); + } + bldr.push_opcode(opcodes::all::OP_ENDIF) + .into_script() } else { - Builder::new().push_opcode(opcodes::all::OP_DUP) + let mut bldr = Builder::new().push_opcode(opcodes::all::OP_DUP) .push_opcode(opcodes::all::OP_HASH160) .push_slice(&PubkeyHash::hash(&revocation_key.serialize())[..]) .push_opcode(opcodes::all::OP_EQUAL) @@ -540,17 +558,22 @@ pub(crate) fn get_htlc_redeemscript_with_explicit_keys(htlc: &HTLCOutputInCommit .push_opcode(opcodes::all::OP_CLTV) .push_opcode(opcodes::all::OP_DROP) .push_opcode(opcodes::all::OP_CHECKSIG) - .push_opcode(opcodes::all::OP_ENDIF) - .push_opcode(opcodes::all::OP_ENDIF) - .into_script() + .push_opcode(opcodes::all::OP_ENDIF); + if opt_anchors { + bldr = bldr.push_opcode(opcodes::all::OP_PUSHNUM_1) + .push_opcode(opcodes::all::OP_CSV) + .push_opcode(opcodes::all::OP_DROP); + } + bldr.push_opcode(opcodes::all::OP_ENDIF) + .into_script() } } /// Gets the witness redeemscript for an HTLC output in a commitment transaction. Note that htlc /// does not need to have its previous_output_index filled. #[inline] -pub fn get_htlc_redeemscript(htlc: &HTLCOutputInCommitment, keys: &TxCreationKeys) -> Script { - get_htlc_redeemscript_with_explicit_keys(htlc, &keys.broadcaster_htlc_key, &keys.countersignatory_htlc_key, &keys.revocation_key) +pub fn get_htlc_redeemscript(htlc: &HTLCOutputInCommitment, opt_anchors: bool, keys: &TxCreationKeys) -> Script { + get_htlc_redeemscript_with_explicit_keys(htlc, opt_anchors, &keys.broadcaster_htlc_key, &keys.countersignatory_htlc_key, &keys.revocation_key) } /// Gets the redeemscript for a funding output from the two funding public keys. @@ -576,7 +599,7 @@ pub fn make_funding_redeemscript(broadcaster: &PublicKey, countersignatory: &Pub /// /// Panics if htlc.transaction_output_index.is_none() (as such HTLCs do not appear in the /// commitment transaction). -pub fn build_htlc_transaction(commitment_txid: &Txid, feerate_per_kw: u32, contest_delay: u16, htlc: &HTLCOutputInCommitment, broadcaster_delayed_payment_key: &PublicKey, revocation_key: &PublicKey) -> Transaction { +pub fn build_htlc_transaction(commitment_txid: &Txid, feerate_per_kw: u32, contest_delay: u16, htlc: &HTLCOutputInCommitment, opt_anchors: bool, broadcaster_delayed_payment_key: &PublicKey, revocation_key: &PublicKey) -> Transaction { let mut txins: Vec = Vec::new(); txins.push(TxIn { previous_output: OutPoint { @@ -584,15 +607,16 @@ pub fn build_htlc_transaction(commitment_txid: &Txid, feerate_per_kw: u32, conte vout: htlc.transaction_output_index.expect("Can't build an HTLC transaction for a dust output"), }, script_sig: Script::new(), - sequence: 0, + sequence: if opt_anchors { 1 } else { 0 }, witness: Vec::new(), }); - let total_fee = if htlc.offered { - feerate_per_kw as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000 - } else { - feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000 - }; + let weight = if htlc.offered { + htlc_timeout_tx_weight(opt_anchors) + } else { + htlc_success_tx_weight(opt_anchors) + }; + let total_fee = feerate_per_kw as u64 * weight / 1000; let mut txouts: Vec = Vec::new(); txouts.push(TxOut { @@ -608,6 +632,17 @@ pub fn build_htlc_transaction(commitment_txid: &Txid, feerate_per_kw: u32, conte } } +/// Gets the witnessScript for the to_remote output when anchors are enabled. +#[inline] +pub(crate) fn get_to_countersignatory_with_anchors_redeemscript(payment_point: &PublicKey) -> Script { + Builder::new() + .push_slice(&payment_point.serialize()[..]) + .push_opcode(opcodes::all::OP_CHECKSIGVERIFY) + .push_int(1) + .push_opcode(opcodes::all::OP_CSV) + .into_script() +} + /// Gets the witnessScript for an anchor output from the funding public key. /// The witness in the spending input must be: /// @@ -615,7 +650,7 @@ pub fn build_htlc_transaction(commitment_txid: &Txid, feerate_per_kw: u32, conte /// <> /// (empty vector required to satisfy compliance with MINIMALIF-standard rule) #[inline] -pub(crate) fn get_anchor_redeemscript(funding_pubkey: &PublicKey) -> Script { +pub fn get_anchor_redeemscript(funding_pubkey: &PublicKey) -> Script { Builder::new().push_slice(&funding_pubkey.serialize()[..]) .push_opcode(opcodes::all::OP_CHECKSIG) .push_opcode(opcodes::all::OP_IFDUP) @@ -645,6 +680,8 @@ pub struct ChannelTransactionParameters { pub counterparty_parameters: Option, /// The late-bound funding outpoint pub funding_outpoint: Option, + /// Are anchors used for this channel. Boolean is serialization backwards-compatible + pub opt_anchors: Option<()> } /// Late-bound per-channel counterparty data used to build transactions. @@ -698,6 +735,7 @@ impl_writeable_tlv_based!(ChannelTransactionParameters, { (4, is_outbound_from_holder, required), (6, counterparty_parameters, option), (8, funding_outpoint, option), + (10, opt_anchors, option), }); /// Static channel fields used to build transactions given per-commitment fields, organized by @@ -750,6 +788,11 @@ impl<'a> DirectedChannelTransactionParameters<'a> { pub fn funding_outpoint(&self) -> OutPoint { self.inner.funding_outpoint.unwrap().into_bitcoin_outpoint() } + + /// Whether to use anchors for this channel + pub fn opt_anchors(&self) -> bool { + self.inner.opt_anchors.is_some() + } } /// Information needed to build and sign a holder's commitment transaction. @@ -813,7 +856,8 @@ impl HolderCommitmentTransaction { holder_selected_contest_delay: 0, is_outbound_from_holder: false, counterparty_parameters: Some(CounterpartyChannelTransactionParameters { pubkeys: channel_pubkeys.clone(), selected_contest_delay: 0 }), - funding_outpoint: Some(chain::transaction::OutPoint { txid: Default::default(), index: 0 }) + funding_outpoint: Some(chain::transaction::OutPoint { txid: Default::default(), index: 0 }), + opt_anchors: None }; let mut htlcs_with_aux: Vec<(_, ())> = Vec::new(); let inner = CommitmentTransaction::new_with_auxiliary_htlc_data(0, 0, 0, false, dummy_key.clone(), dummy_key.clone(), keys, 0, &mut htlcs_with_aux, &channel_parameters.as_counterparty_broadcastable()); @@ -895,6 +939,7 @@ impl BuiltCommitmentTransaction { /// /// This class can be used inside a signer implementation to generate a signature given the relevant /// secret key. +#[derive(Clone, Hash, PartialEq)] pub struct ClosingTransaction { to_holder_value_sat: u64, to_counterparty_value_sat: u64, @@ -1130,7 +1175,11 @@ impl CommitmentTransaction { let mut txouts: Vec<(TxOut, Option<&mut HTLCOutputInCommitment>)> = Vec::new(); if to_countersignatory_value_sat > 0 { - let script = script_for_p2wpkh(&countersignatory_pubkeys.payment_point); + let script = if opt_anchors { + get_to_countersignatory_with_anchors_redeemscript(&countersignatory_pubkeys.payment_point).to_v0_p2wsh() + } else { + get_p2wpkh_redeemscript(&countersignatory_pubkeys.payment_point) + }; txouts.push(( TxOut { script_pubkey: script.clone(), @@ -1181,7 +1230,7 @@ impl CommitmentTransaction { let mut htlcs = Vec::with_capacity(htlcs_with_aux.len()); for (htlc, _) in htlcs_with_aux { - let script = chan_utils::get_htlc_redeemscript(&htlc, &keys); + let script = chan_utils::get_htlc_redeemscript(&htlc, opt_anchors, &keys); let txout = TxOut { script_pubkey: script.to_v0_p2wsh(), value: htlc.amount_msat / 1000, @@ -1335,10 +1384,17 @@ impl<'a> TrustedCommitmentTransaction<'a> { &self.inner.keys } + /// Should anchors be used. + pub fn opt_anchors(&self) -> bool { + self.opt_anchors.is_some() + } + /// Get a signature for each HTLC which was included in the commitment transaction (ie for /// which HTLCOutputInCommitment::transaction_output_index.is_some()). /// /// The returned Vec has one entry for each HTLC, and in the same order. + /// + /// This function is only valid in the holder commitment context, it always uses SigHashType::All. pub fn get_htlc_sigs(&self, htlc_base_key: &SecretKey, channel_parameters: &DirectedChannelTransactionParameters, secp_ctx: &Secp256k1) -> Result, ()> { let inner = self.inner; let keys = &inner.keys; @@ -1348,9 +1404,9 @@ impl<'a> TrustedCommitmentTransaction<'a> { for this_htlc in inner.htlcs.iter() { assert!(this_htlc.transaction_output_index.is_some()); - let htlc_tx = build_htlc_transaction(&txid, inner.feerate_per_kw, channel_parameters.contest_delay(), &this_htlc, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); + let htlc_tx = build_htlc_transaction(&txid, inner.feerate_per_kw, channel_parameters.contest_delay(), &this_htlc, self.opt_anchors(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key); - let htlc_redeemscript = get_htlc_redeemscript_with_explicit_keys(&this_htlc, &keys.broadcaster_htlc_key, &keys.countersignatory_htlc_key, &keys.revocation_key); + let htlc_redeemscript = get_htlc_redeemscript_with_explicit_keys(&this_htlc, self.opt_anchors(), &keys.broadcaster_htlc_key, &keys.countersignatory_htlc_key, &keys.revocation_key); let sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, this_htlc.amount_msat / 1000, SigHashType::All)[..]); ret.push(secp_ctx.sign(&sighash, &holder_htlc_key)); @@ -1370,16 +1426,18 @@ impl<'a> TrustedCommitmentTransaction<'a> { // Further, we should never be provided the preimage for an HTLC-Timeout transaction. if this_htlc.offered && preimage.is_some() { unreachable!(); } - let mut htlc_tx = build_htlc_transaction(&txid, inner.feerate_per_kw, channel_parameters.contest_delay(), &this_htlc, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); + let mut htlc_tx = build_htlc_transaction(&txid, inner.feerate_per_kw, channel_parameters.contest_delay(), &this_htlc, self.opt_anchors(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key); - let htlc_redeemscript = get_htlc_redeemscript_with_explicit_keys(&this_htlc, &keys.broadcaster_htlc_key, &keys.countersignatory_htlc_key, &keys.revocation_key); + let htlc_redeemscript = get_htlc_redeemscript_with_explicit_keys(&this_htlc, self.opt_anchors(), &keys.broadcaster_htlc_key, &keys.countersignatory_htlc_key, &keys.revocation_key); + + let sighashtype = if self.opt_anchors() { SigHashType::SinglePlusAnyoneCanPay } else { SigHashType::All }; // First push the multisig dummy, note that due to BIP147 (NULLDUMMY) it must be a zero-length element. htlc_tx.input[0].witness.push(Vec::new()); htlc_tx.input[0].witness.push(counterparty_signature.serialize_der().to_vec()); htlc_tx.input[0].witness.push(signature.serialize_der().to_vec()); - htlc_tx.input[0].witness[1].push(SigHashType::All as u8); + htlc_tx.input[0].witness[1].push(sighashtype as u8); htlc_tx.input[0].witness[2].push(SigHashType::All as u8); if this_htlc.offered { @@ -1424,7 +1482,7 @@ pub fn get_commitment_transaction_number_obscure_factor( | ((res[31] as u64) << 0 * 8) } -fn script_for_p2wpkh(key: &PublicKey) -> Script { +fn get_p2wpkh_redeemscript(key: &PublicKey) -> Script { Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0) .push_slice(&WPubkeyHash::hash(&key.serialize())[..]) .into_script() @@ -1435,12 +1493,13 @@ mod tests { use super::CounterpartyCommitmentSecrets; use ::{hex, chain}; use prelude::*; - use ln::chan_utils::{CommitmentTransaction, TxCreationKeys, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, HTLCOutputInCommitment}; + use ln::chan_utils::{get_htlc_redeemscript, get_to_countersignatory_with_anchors_redeemscript, get_p2wpkh_redeemscript, CommitmentTransaction, TxCreationKeys, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, HTLCOutputInCommitment}; use bitcoin::secp256k1::{PublicKey, SecretKey, Secp256k1}; use util::test_utils; use chain::keysinterface::{KeysInterface, BaseSign}; use bitcoin::Network; use ln::PaymentHash; + use bitcoin::hashes::hex::ToHex; #[test] fn test_anchors() { @@ -1458,12 +1517,13 @@ mod tests { let holder_pubkeys = signer.pubkeys(); let counterparty_pubkeys = counterparty_signer.pubkeys(); let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint).unwrap(); - let channel_parameters = ChannelTransactionParameters { + let mut channel_parameters = ChannelTransactionParameters { holder_pubkeys: holder_pubkeys.clone(), holder_selected_contest_delay: 0, is_outbound_from_holder: false, counterparty_parameters: Some(CounterpartyChannelTransactionParameters { pubkeys: counterparty_pubkeys.clone(), selected_contest_delay: 0 }), - funding_outpoint: Some(chain::transaction::OutPoint { txid: Default::default(), index: 0 }) + funding_outpoint: Some(chain::transaction::OutPoint { txid: Default::default(), index: 0 }), + opt_anchors: None }; let mut htlcs_with_aux: Vec<(_, ())> = Vec::new(); @@ -1478,6 +1538,7 @@ mod tests { &mut htlcs_with_aux, &channel_parameters.as_holder_broadcastable() ); assert_eq!(tx.built.transaction.output.len(), 2); + assert_eq!(tx.built.transaction.output[1].script_pubkey, get_p2wpkh_redeemscript(&counterparty_pubkeys.payment_point)); // Generate broadcaster and counterparty outputs as well as two anchors let tx = CommitmentTransaction::new_with_auxiliary_htlc_data( @@ -1489,6 +1550,7 @@ mod tests { &mut htlcs_with_aux, &channel_parameters.as_holder_broadcastable() ); assert_eq!(tx.built.transaction.output.len(), 4); + assert_eq!(tx.built.transaction.output[3].script_pubkey, get_to_countersignatory_with_anchors_redeemscript(&counterparty_pubkeys.payment_point).to_v0_p2wsh()); // Generate broadcaster output and anchor let tx = CommitmentTransaction::new_with_auxiliary_htlc_data( @@ -1512,25 +1574,58 @@ mod tests { ); assert_eq!(tx.built.transaction.output.len(), 2); - // Generate broadcaster output, an HTLC output and two anchors - let payment_hash = PaymentHash([42; 32]); - let htlc_info = HTLCOutputInCommitment { + let received_htlc = HTLCOutputInCommitment { offered: false, - amount_msat: 1000000, + amount_msat: 400000, cltv_expiry: 100, - payment_hash, + payment_hash: PaymentHash([42; 32]), transaction_output_index: None, }; + let offered_htlc = HTLCOutputInCommitment { + offered: true, + amount_msat: 600000, + cltv_expiry: 100, + payment_hash: PaymentHash([43; 32]), + transaction_output_index: None, + }; + + // Generate broadcaster output and received and offered HTLC outputs, w/o anchors + let tx = CommitmentTransaction::new_with_auxiliary_htlc_data( + 0, 3000, 0, + false, + holder_pubkeys.funding_pubkey, + counterparty_pubkeys.funding_pubkey, + keys.clone(), 1, + &mut vec![(received_htlc.clone(), ()), (offered_htlc.clone(), ())], + &channel_parameters.as_holder_broadcastable() + ); + assert_eq!(tx.built.transaction.output.len(), 3); + assert_eq!(tx.built.transaction.output[0].script_pubkey, get_htlc_redeemscript(&received_htlc, false, &keys).to_v0_p2wsh()); + assert_eq!(tx.built.transaction.output[1].script_pubkey, get_htlc_redeemscript(&offered_htlc, false, &keys).to_v0_p2wsh()); + assert_eq!(get_htlc_redeemscript(&received_htlc, false, &keys).to_v0_p2wsh().to_hex(), + "002085cf52e41ba7c099a39df504e7b61f6de122971ceb53b06731876eaeb85e8dc5"); + assert_eq!(get_htlc_redeemscript(&offered_htlc, false, &keys).to_v0_p2wsh().to_hex(), + "002049f0736bb335c61a04d2623a24df878a7592a3c51fa7258d41b2c85318265e73"); + + // Generate broadcaster output and received and offered HTLC outputs, with anchors + channel_parameters.opt_anchors = Some(()); let tx = CommitmentTransaction::new_with_auxiliary_htlc_data( 0, 3000, 0, true, holder_pubkeys.funding_pubkey, counterparty_pubkeys.funding_pubkey, keys.clone(), 1, - &mut vec![(htlc_info, ())], &channel_parameters.as_holder_broadcastable() + &mut vec![(received_htlc.clone(), ()), (offered_htlc.clone(), ())], + &channel_parameters.as_holder_broadcastable() ); - assert_eq!(tx.built.transaction.output.len(), 4); + assert_eq!(tx.built.transaction.output.len(), 5); + assert_eq!(tx.built.transaction.output[2].script_pubkey, get_htlc_redeemscript(&received_htlc, true, &keys).to_v0_p2wsh()); + assert_eq!(tx.built.transaction.output[3].script_pubkey, get_htlc_redeemscript(&offered_htlc, true, &keys).to_v0_p2wsh()); + assert_eq!(get_htlc_redeemscript(&received_htlc, true, &keys).to_v0_p2wsh().to_hex(), + "002067114123af3f95405bae4fd930fc95de03e3c86baaee8b2dd29b43dd26cf613c"); + assert_eq!(get_htlc_redeemscript(&offered_htlc, true, &keys).to_v0_p2wsh().to_hex(), + "0020a06e3b0d4fcf704f2b9c41e16a70099e39989466c3142b8573a1154542f28f57"); } #[test] diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 9a5d2eaa905..236b1e498f4 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -16,26 +16,20 @@ use bitcoin::blockdata::block::{Block, BlockHeader}; use bitcoin::blockdata::constants::genesis_block; use bitcoin::hash_types::BlockHash; use bitcoin::network::constants::Network; -use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr}; +use chain::channelmonitor::ChannelMonitor; use chain::transaction::OutPoint; -use chain::Listen; -use chain::Watch; -use ln::{PaymentPreimage, PaymentHash}; +use chain::{ChannelMonitorUpdateErr, Listen, Watch}; use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure}; -use ln::features::{InitFeatures, InvoiceFeatures}; +use ln::features::InitFeatures; use ln::msgs; -use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler}; -use routing::router::get_route; +use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; use util::config::UserConfig; use util::enforcing_trait_impls::EnforcingSigner; -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason}; use util::errors::APIError; use util::ser::{ReadableArgs, Writeable}; use util::test_utils::TestBroadcaster; -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; - use ln::functional_test_utils::*; use util::test_utils; @@ -44,25 +38,17 @@ use io; use prelude::*; use sync::{Arc, Mutex}; -// If persister_fail is true, we have the persister return a PermanentFailure -// instead of the higher-level ChainMonitor. -fn do_test_simple_monitor_permanent_update_fail(persister_fail: bool) { +#[test] +fn test_simple_monitor_permanent_update_fail() { // Test that we handle a simple permanent monitor update failure - let mut chanmon_cfgs = create_chanmon_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - let (_, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(&nodes[1]); - - match persister_fail { - true => chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)), - false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure)) - } - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), true, APIError::ChannelUnavailable {..}, {}); check_added_monitors!(nodes[0], 2); @@ -78,9 +64,10 @@ fn do_test_simple_monitor_permanent_update_fail(persister_fail: bool) { }; // TODO: Once we hit the chain with the failure transaction we should check that we get a - // PaymentFailed event + // PaymentPathFailed event assert_eq!(nodes[0].node.list_channels().len(), 0); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); } #[test] @@ -118,8 +105,7 @@ fn test_monitor_and_persister_update_fail() { blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet).header, 200); 200])), }; let chain_mon = { - let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap(); - let monitor = monitors.get(&outpoint).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); let new_monitor = <(BlockHash, ChannelMonitor)>::read( @@ -147,7 +133,7 @@ fn test_monitor_and_persister_update_fail() { // because the update is bogus, ultimately the error that's returned // should be a PermanentFailure. if let Err(ChannelMonitorUpdateErr::PermanentFailure) = chain_mon.chain_monitor.update_channel(outpoint, update.clone()) {} else { panic!("Expected monitor error to be permanent"); } - logger.assert_log_contains("lightning::chain::chainmonitor".to_string(), "Failed to persist channel monitor update: TemporaryFailure".to_string(), 1); + logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Failed to persist ChannelMonitor update for channel [0-9a-f]*: TemporaryFailure").unwrap(), 1); if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); } } else { assert!(false); } } else { assert!(false); }; @@ -157,36 +143,20 @@ fn test_monitor_and_persister_update_fail() { assert_eq!(events.len(), 1); } -#[test] -fn test_simple_monitor_permanent_update_fail() { - do_test_simple_monitor_permanent_update_fail(false); - - // Test behavior when the persister returns a PermanentFailure. - do_test_simple_monitor_permanent_update_fail(true); -} - -// If persister_fail is true, we have the persister return a TemporaryFailure instead of the -// higher-level ChainMonitor. -fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail: bool) { +fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // Test that we can recover from a simple temporary monitor update failure optionally with // a disconnect in between - let mut chanmon_cfgs = create_chanmon_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let logger = test_utils::TestLogger::new(); - let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(&nodes[1]); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); - match persister_fail { - true => chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)), - false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)) - } + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), false, APIError::MonitorUpdateFailed, {}); check_added_monitors!(nodes[0], 1); } @@ -201,12 +171,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } - match persister_fail { - true => chanmon_cfgs[0].persister.set_update_ret(Ok(())), - false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())) - } - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -238,14 +205,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); // Now set it to failed again... - let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]); + let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); { - match persister_fail { - true => chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)), - false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)) - } - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {}); check_added_monitors!(nodes[0], 1); } @@ -266,19 +228,16 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail check_closed_broadcast!(nodes[0], true); // TODO: Once we hit the chain with the failure transaction we should check that we get a - // PaymentFailed event + // PaymentPathFailed event assert_eq!(nodes[0].node.list_channels().len(), 0); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); } #[test] fn test_simple_monitor_temporary_update_fail() { - do_test_simple_monitor_temporary_update_fail(false, false); - do_test_simple_monitor_temporary_update_fail(true, false); - - // Test behavior when the persister returns a TemporaryFailure. - do_test_simple_monitor_temporary_update_fail(false, true); - do_test_simple_monitor_temporary_update_fail(true, true); + do_test_simple_monitor_temporary_update_fail(false); + do_test_simple_monitor_temporary_update_fail(true); } fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { @@ -306,16 +265,13 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let logger = test_utils::TestLogger::new(); - let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); // Now try to send a second payment which will fail to send - let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {}); check_added_monitors!(nodes[0], 1); } @@ -344,8 +300,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => { assert_eq!(*payment_preimage, payment_preimage_1); + assert_eq!(*payment_hash, payment_hash_1); }, _ => panic!("Unexpected event"), } @@ -367,9 +324,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } // Now fix monitor updating... - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); macro_rules! disconnect_reconnect_peers { () => { { @@ -436,8 +393,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => { assert_eq!(*payment_preimage, payment_preimage_1); + assert_eq!(*payment_hash, payment_hash_1); }, _ => panic!("Unexpected event"), } @@ -589,6 +547,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); expect_pending_htlcs_forwardable!(nodes[1]); @@ -647,12 +606,9 @@ fn test_monitor_update_fail_cs() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let logger = test_utils::TestLogger::new(); - let (payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); + let (route, our_payment_hash, payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -660,16 +616,16 @@ fn test_monitor_update_fail_cs() { let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(responses.len(), 2); @@ -691,7 +647,7 @@ fn test_monitor_update_fail_cs() { assert!(updates.update_fee.is_none()); assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -701,9 +657,9 @@ fn test_monitor_update_fail_cs() { _ => panic!("Unexpected event"), } - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -742,12 +698,9 @@ fn test_monitor_update_fail_no_rebroadcast() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let logger = test_utils::TestLogger::new(); - let (payment_preimage_1, our_payment_hash, payment_secret_1) = get_payment_preimage_hash!(nodes[1]); + let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash, &Some(payment_secret_1)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -756,7 +709,7 @@ fn test_monitor_update_fail_no_rebroadcast() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -764,9 +717,9 @@ fn test_monitor_update_fail_no_rebroadcast() { assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); check_added_monitors!(nodes[1], 1); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); @@ -792,22 +745,17 @@ fn test_monitor_update_raa_while_paused() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let logger = test_utils::TestLogger::new(); send_payment(&nodes[0], &[&nodes[1]], 5000000); - let (payment_preimage_1, our_payment_hash_1, our_payment_secret_1) = get_payment_preimage_hash!(nodes[1]); + let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash_1, &Some(our_payment_secret_1)).unwrap(); check_added_monitors!(nodes[0], 1); } let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - let (payment_preimage_2, our_payment_hash_2, our_payment_secret_2) = get_payment_preimage_hash!(nodes[0]); + let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000000); { - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[1].node.send_payment(&route, our_payment_hash_2, &Some(our_payment_secret_2)).unwrap(); check_added_monitors!(nodes[1], 1); } @@ -818,7 +766,7 @@ fn test_monitor_update_raa_while_paused() { check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -830,9 +778,9 @@ fn test_monitor_update_raa_while_paused() { nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1); check_added_monitors!(nodes[0], 1); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -870,7 +818,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // Rebalance a bit so that we can send backwards from 2 to 1. send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); @@ -896,10 +843,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // While the second channel is AwaitingRAA, forward a second payment to get it into the // holding cell. - let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[2]); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -913,7 +858,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Now fail monitor updating. - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -923,15 +868,13 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Forward a third payment which will also be added to the holding cell, despite the channel // being paused waiting a monitor update. - let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[2]); + let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap(); check_added_monitors!(nodes[0], 1); } - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); // We succeed in updating the monitor for the first channel + chanmon_cfgs[1].persister.set_update_ret(Ok(())); // We succeed in updating the monitor for the first channel send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); @@ -945,9 +888,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { // Try to route another payment backwards from 2 to make sure 1 holds off on responding - let (payment_preimage_4, payment_hash_4, payment_secret_4) = get_payment_preimage_hash!(nodes[0]); - let net_graph_msg_handler = &nodes[2].net_graph_msg_handler; - let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); nodes[2].node.send_payment(&route, payment_hash_4, &Some(payment_secret_4)).unwrap(); check_added_monitors!(nodes[2], 1); @@ -963,9 +904,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Restore monitor updating, ensuring we immediately get a fail-back update and a // update_add update. - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); @@ -1146,12 +1087,12 @@ fn test_monitor_update_fail_reestablish() { let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - assert!(nodes[2].node.claim_funds(our_payment_preimage)); + assert!(nodes[2].node.claim_funds(payment_preimage)); check_added_monitors!(nodes[2], 1); let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -1165,7 +1106,7 @@ fn test_monitor_update_fail_reestablish() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); @@ -1202,9 +1143,9 @@ fn test_monitor_update_fail_reestablish() { get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()) .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1215,13 +1156,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage), - _ => panic!("Unexpected event"), - } + expect_payment_sent!(nodes[0], payment_preimage); } #[test] @@ -1235,9 +1170,8 @@ fn raa_no_response_awaiting_raa_state() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let logger = test_utils::TestLogger::new(); - let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]); @@ -1247,8 +1181,6 @@ fn raa_no_response_awaiting_raa_state() { // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS // generation during RAA while in monitor-update-failed state. { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap(); check_added_monitors!(nodes[0], 1); nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); @@ -1276,7 +1208,7 @@ fn raa_no_response_awaiting_raa_state() { // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA, // then restore channel monitor updates. - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1288,9 +1220,9 @@ fn raa_no_response_awaiting_raa_state() { nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1); check_added_monitors!(nodes[1], 1); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); // nodes[1] should be AwaitingRAA here! check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1301,8 +1233,6 @@ fn raa_no_response_awaiting_raa_state() { // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync // commitment transaction states) whereas here we can explicitly check for it. { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap(); check_added_monitors!(nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1359,7 +1289,6 @@ fn claim_while_disconnected_monitor_update_fail() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let logger = test_utils::TestLogger::new(); // Forward a payment for B to claim let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -1381,7 +1310,7 @@ fn claim_while_disconnected_monitor_update_fail() { // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); @@ -1391,10 +1320,8 @@ fn claim_while_disconnected_monitor_update_fail() { // Send a second payment from A to B, resulting in a commitment update that gets swallowed with // the monitor still failed - let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1410,9 +1337,9 @@ fn claim_while_disconnected_monitor_update_fail() { // Now un-fail the monitor, which will result in B sending its original commitment update, // receiving the commitment update from A, and the resulting commitment dances. - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1459,15 +1386,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { ref payment_preimage } => { - assert_eq!(*payment_preimage, payment_preimage_1); - }, - _ => panic!("Unexpected event"), - } + expect_payment_sent!(nodes[0], payment_preimage_1); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } @@ -1483,19 +1402,16 @@ fn monitor_failed_no_reestablish_response() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let logger = test_utils::TestLogger::new(); // Route the payment and deliver the initial commitment_signed (with a monitor update failure // on receipt). - let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap(); check_added_monitors!(nodes[0], 1); } - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -1521,9 +1437,9 @@ fn monitor_failed_no_reestablish_response() { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1559,14 +1475,11 @@ fn first_message_on_recv_ordering() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let logger = test_utils::TestLogger::new(); // Route the first payment outbound, holding the last RAA for B until we are set up so that we // can deliver it and fail the monitor update. - let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1588,10 +1501,8 @@ fn first_message_on_recv_ordering() { let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // Route the second payment, generating an update_add_htlc/commitment_signed - let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1600,7 +1511,7 @@ fn first_message_on_recv_ordering() { let payment_event = SendEvent::from_event(events.pop().unwrap()); assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); // Deliver the final RAA for the first payment, which does not require a response. RAAs // generally require a commitment_signed, so the fact that we're expecting an opposite response @@ -1619,9 +1530,9 @@ fn first_message_on_recv_ordering() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); @@ -1657,14 +1568,13 @@ fn test_monitor_update_fail_claim() { let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // Rebalance a bit so that we can send backwards from 3 to 2. send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); assert!(nodes[1].node.claim_funds(payment_preimage_1)); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); @@ -1674,18 +1584,15 @@ fn test_monitor_update_fail_claim() { // already-signed commitment transaction and will instead wait for it to resolve before // forwarding the payment onwards. - let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]); - let route; + let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000); { - let net_graph_msg_handler = &nodes[2].net_graph_msg_handler; - route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[2], 1); } // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be // paused, so forward shouldn't succeed until we call channel_monitor_updated(). - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1708,8 +1615,8 @@ fn test_monitor_update_fail_claim() { commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); // Now restore monitor updating on the 0<->1 channel and claim the funds on B. - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1770,7 +1677,6 @@ fn test_monitor_update_on_pending_forwards() { let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // Rebalance a bit so that we can send backwards from 3 to 1. send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); @@ -1785,10 +1691,8 @@ fn test_monitor_update_on_pending_forwards() { commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); { - let net_graph_msg_handler = &nodes[2].net_graph_msg_handler; - let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[2], 1); } @@ -1799,15 +1703,15 @@ fn test_monitor_update_on_pending_forwards() { nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1817,7 +1721,7 @@ fn test_monitor_update_on_pending_forwards() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); - if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] { + if let Event::PaymentPathFailed { payment_hash, rejected_by_dest, .. } = events[0] { assert_eq!(payment_hash, payment_hash_1); assert!(rejected_by_dest); } else { panic!("Unexpected event!"); } @@ -1842,16 +1746,13 @@ fn monitor_update_claim_fail_no_response() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let logger = test_utils::TestLogger::new(); // Forward a payment for B to claim let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA - let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1862,16 +1763,16 @@ fn monitor_update_claim_fail_no_response() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); assert!(nodes[1].node.claim_funds(payment_preimage_1)); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1883,15 +1784,7 @@ fn monitor_update_claim_fail_no_response() { let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { ref payment_preimage } => { - assert_eq!(*payment_preimage, payment_preimage_1); - }, - _ => panic!("Unexpected event"), - } + expect_payment_sent!(nodes[0], payment_preimage_1); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } @@ -1915,21 +1808,21 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); let events = nodes[0].node.get_and_clear_pending_events(); @@ -1959,9 +1852,9 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); } - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { @@ -1985,6 +1878,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: send_payment(&nodes[0], &[&nodes[1]], 8000000); close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); } #[test] @@ -2007,10 +1902,8 @@ fn test_path_paused_mpp() { let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()); let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let logger = test_utils::TestLogger::new(); - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]); - let mut route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3: let path = route.paths[0].clone(); @@ -2024,19 +1917,19 @@ fn test_path_paused_mpp() { // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second // (for the path 0 -> 2 -> 3) fails. - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - *nodes[0].chain_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + chanmon_cfgs[0].persister.set_next_update_ret(Some(Err(ChannelMonitorUpdateErr::TemporaryFailure))); // Now check that we get the right return value, indicating that the first path succeeded but // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as // some paths succeeded, preventing retry. - if let Err(PaymentSendFailure::PartialFailure(results)) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { + if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { assert_eq!(results.len(), 2); if let Ok(()) = results[0] {} else { panic!(); } if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); } } else { panic!(); } check_added_monitors!(nodes[0], 2); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); // Pass the first HTLC of the payment along to nodes[3]. let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -2045,8 +1938,8 @@ fn test_path_paused_mpp() { // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None); @@ -2074,9 +1967,7 @@ fn test_pending_update_fee_ack_on_reconnect() { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); send_payment(&nodes[0], &[&nodes[1]], 100_000_00); - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]); - let route = get_route(&nodes[1].node.get_our_node_id(), &nodes[1].net_graph_msg_handler.network_graph, - &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap(); + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000); nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); check_added_monitors!(nodes[1], 1); let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2257,7 +2148,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000, InitFeatures::known(), InitFeatures::known()).2; - let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(&nodes[1]); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000); let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]); // Do a really complicated dance to get an HTLC into the holding cell, with MonitorUpdateFailed @@ -2281,11 +2172,6 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // will not be freed from the holding cell. let (payment_preimage_0, _, _) = route_payment(&nodes[1], &[&nodes[0]], 100000); - let route = { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), None, None, &Vec::new(), 100000, TEST_FINAL_CLTV, nodes[0].logger).unwrap() - }; - nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap(); check_added_monitors!(nodes[0], 1); let send = SendEvent::from_node(&nodes[0]); @@ -2294,7 +2180,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[0], 0); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); assert!(nodes[0].node.claim_funds(payment_preimage_0)); check_added_monitors!(nodes[0], 1); @@ -2314,7 +2200,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { if reload_a { let nodes_0_serialized = nodes[0].node.encode(); let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap(); persister = test_utils::TestPersister::new(); let keys_manager = &chanmon_cfgs[0].keys_manager; @@ -2387,9 +2273,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // If we finish updating the monitor, we should free the holding cell right away (this did // not occur prior to #756). - *nodes[0].chain_monitor.update_ret.lock().unwrap() = None; - let (funding_txo, mon_id) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&funding_txo, mon_id); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id); // New outbound messages should be generated immediately upon a call to // get_and_clear_pending_msg_events (but not before). @@ -2410,7 +2296,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - expect_payment_sent!(nodes[1], payment_preimage_0); + expect_payment_sent_without_paths!(nodes[1], payment_preimage_0); assert_eq!(updates.update_add_htlcs.len(), 1); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); updates.commitment_signed @@ -2429,7 +2315,18 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); - expect_pending_htlcs_forwardable!(nodes[1]); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + match events[1] { + Event::PaymentPathSuccessful { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.process_pending_htlc_forwards(); expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 100000); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); @@ -2469,9 +2366,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f if htlc_status == HTLCStatusAtDupClaim::HoldingCell { // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be // awaiting a remote revoke_and_ack from nodes[0]. - let (_, second_payment_hash, second_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, - &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap(); + let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -2516,9 +2411,10 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); - expect_payment_sent!(nodes[0], payment_preimage); + expect_payment_sent_without_paths!(nodes[0], payment_preimage); if htlc_status == HTLCStatusAtDupClaim::Cleared { commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); + expect_payment_path_successful!(nodes[0]); } } else { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2542,10 +2438,11 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); - expect_payment_sent!(nodes[0], payment_preimage); + expect_payment_sent_without_paths!(nodes[0], payment_preimage); } if htlc_status != HTLCStatusAtDupClaim::Cleared { commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); + expect_payment_path_successful!(nodes[0]); } } @@ -2573,8 +2470,8 @@ fn test_temporary_error_during_shutdown() { let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[0].node.close_channel(&channel_id).unwrap(); nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); @@ -2585,18 +2482,18 @@ fn test_temporary_error_during_shutdown() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = None; - *nodes[1].chain_monitor.update_ret.lock().unwrap() = None; + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id())); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = None; - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); @@ -2610,6 +2507,8 @@ fn test_temporary_error_during_shutdown() { assert_eq!(txn_a, txn_b); assert_eq!(txn_a.len(), 1); check_spends!(txn_a[0], funding_tx); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); } #[test] @@ -2625,11 +2524,12 @@ fn test_permanent_error_during_sending_shutdown() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure)); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)); assert!(nodes[0].node.close_channel(&channel_id).is_ok()); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 2); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); } #[test] @@ -2645,13 +2545,14 @@ fn test_permanent_error_during_handling_shutdown() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)); assert!(nodes[0].node.close_channel(&channel_id).is_ok()); let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &shutdown); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 2); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); } #[test] @@ -2667,24 +2568,24 @@ fn double_temp_error() { let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); let (payment_preimage_2, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); // `claim_funds` results in a ChannelMonitorUpdate. assert!(nodes[1].node.claim_funds(payment_preimage_1)); check_added_monitors!(nodes[1], 1); - let (funding_tx, latest_update_1) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, // which had some asserts that prevented it from being called twice. assert!(nodes[1].node.claim_funds(payment_preimage_2)); check_added_monitors!(nodes[1], 1); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); - let (_, latest_update_2) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&funding_tx, latest_update_1); + let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); - nodes[1].node.channel_monitor_updated(&funding_tx, latest_update_2); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2); // Complete the first HTLC. let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -2705,7 +2606,7 @@ fn double_temp_error() { assert_eq!(node_id, nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1); check_added_monitors!(nodes[0], 0); - expect_payment_sent!(nodes[0], payment_preimage_1); + expect_payment_sent_without_paths!(nodes[0], payment_preimage_1); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1); check_added_monitors!(nodes[0], 1); nodes[0].node.process_pending_htlc_forwards(); @@ -2743,6 +2644,7 @@ fn double_temp_error() { }; nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_b2); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_2); check_added_monitors!(nodes[0], 0); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 858e307b77e..03633cec188 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -23,18 +23,19 @@ use bitcoin::secp256k1::{Secp256k1,Signature}; use bitcoin::secp256k1; use ln::{PaymentPreimage, PaymentHash}; -use ln::features::{ChannelFeatures, InitFeatures}; +use ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures}; use ln::msgs; use ln::msgs::{DecodeError, OptionalField, DataLossProtect}; -use ln::script::ShutdownScript; +use ln::script::{self, ShutdownScript}; use ln::channelmanager::{CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT}; -use ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction}; +use ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction}; use ln::chan_utils; use chain::BestBlock; use chain::chaininterface::{FeeEstimator,ConfirmationTarget}; -use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER}; +use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS}; use chain::transaction::{OutPoint, TransactionData}; use chain::keysinterface::{Sign, KeysInterface}; +use util::events::ClosureReason; use util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter}; use util::logger::Logger; use util::errors::APIError; @@ -44,7 +45,6 @@ use util::scid_utils::scid_from_parts; use io; use prelude::*; use core::{cmp,mem,fmt}; -use core::convert::TryFrom; use core::ops::Deref; #[cfg(any(test, feature = "fuzztarget", debug_assertions))] use sync::Mutex; @@ -62,7 +62,7 @@ pub struct ChannelValueStat { pub counterparty_dust_limit_msat: u64, } -#[derive(Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq)] enum FeeUpdateState { // Inbound states mirroring InboundHTLCState RemoteAnnounced, @@ -293,6 +293,19 @@ struct HTLCStats { pending_htlcs_value_msat: u64, on_counterparty_tx_dust_exposure_msat: u64, on_holder_tx_dust_exposure_msat: u64, + holding_cell_msat: u64, + on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included +} + +/// An enum gathering stats on commitment transaction, either local or remote. +struct CommitmentStats<'a> { + tx: CommitmentTransaction, // the transaction info + feerate_per_kw: u32, // the feerate included to build the transaction + total_fee_sat: u64, // the total fee included in the transaction + num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included) + htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction + local_balance_msat: u64, // local balance before fees but considering dust limits + remote_balance_msat: u64, // remote balance before fees but considering dust limits } /// Used when calculating whether we or the remote can afford an additional HTLC. @@ -340,6 +353,29 @@ pub enum UpdateFulfillCommitFetch { DuplicateClaim {}, } +/// The return value of `revoke_and_ack` on success, primarily updates to other channels or HTLC +/// state. +pub(super) struct RAAUpdates { + pub commitment_update: Option, + pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, + pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + pub finalized_claimed_htlcs: Vec, + pub monitor_update: ChannelMonitorUpdate, + pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>, +} + +/// The return value of `monitor_updating_restored` +pub(super) struct MonitorRestoreUpdates { + pub raa: Option, + pub commitment_update: Option, + pub order: RAACommitmentOrder, + pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, + pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + pub finalized_claimed_htlcs: Vec, + pub funding_broadcastable: Option, + pub funding_locked: Option, +} + /// If the majority of the channels funds are to the fundee and the initiator holds only just /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the /// initiator controls the feerate, if they then go to increase the channel fee, they may have no @@ -351,11 +387,32 @@ pub enum UpdateFulfillCommitFetch { /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new /// HTLCs for days we may need this to suffice for feerate increases across days, but that may /// leave the channel less usable as we hold a bigger reserve. -#[cfg(fuzzing)] +#[cfg(any(fuzzing, test))] pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2; -#[cfg(not(fuzzing))] +#[cfg(not(any(fuzzing, test)))] const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2; +/// If we fail to see a funding transaction confirmed on-chain within this many blocks after the +/// channel creation on an inbound channel, we simply force-close and move on. +/// This constant is the one suggested in BOLT 2. +pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016; + +/// In case of a concurrent update_add_htlc proposed by our counterparty, we might +/// not have enough balance value remaining to cover the onchain cost of this new +/// HTLC weight. If this happens, our counterparty fails the reception of our +/// commitment_signed including this new HTLC due to infringement on the channel +/// reserve. +/// To prevent this case, we compute our outbound update_fee with an HTLC buffer of +/// size 2. However, if the number of concurrent update_add_htlc is higher, this still +/// leads to a channel force-close. Ultimately, this is an issue coming from the +/// design of LN state machines, allowing asynchronous updates. +pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2; + +/// When a channel is opened, we check that the funding amount is enough to pay for relevant +/// commitment transaction fees, with at least this many HTLCs present on the commitment +/// transaction (not counting the value of the HTLCs themselves). +pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4; + // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking // has been completed, and then turn into a Channel to get compiler-time enforcement of things like // calling channel_id() before we're set up or things like get_outbound_funding_signed on an @@ -407,6 +464,7 @@ pub(super) struct Channel { monitor_pending_commitment_signed: bool, monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + monitor_pending_finalized_fulfills: Vec, // pending_update_fee is filled when sending and receiving update_fee. // @@ -423,9 +481,14 @@ pub(super) struct Channel { holding_cell_update_fee: Option, next_holder_htlc_id: u64, next_counterparty_htlc_id: u64, - update_time_counter: u32, feerate_per_kw: u32, + /// The timestamp set on our latest `channel_update` message for this channel. It is updated + /// when the channel is updated in ways which may impact the `channel_update` message or when a + /// new block is received, ensuring it's always at least moderately close to the current real + /// time. + update_time_counter: u32, + #[cfg(debug_assertions)] /// Max to_local and to_remote outputs in a locally-generated commitment transaction holder_max_commitment_tx_output: Mutex<(u64, u64)>, @@ -452,20 +515,36 @@ pub(super) struct Channel { funding_tx_confirmed_in: Option, funding_tx_confirmation_height: u32, short_channel_id: Option, + /// Either the height at which this channel was created or the height at which it was last + /// serialized if it was serialized by versions prior to 0.0.103. + /// We use this to close if funding is never broadcasted. + channel_creation_height: u32, counterparty_dust_limit_satoshis: u64, + #[cfg(test)] pub(super) holder_dust_limit_satoshis: u64, #[cfg(not(test))] holder_dust_limit_satoshis: u64, + #[cfg(test)] pub(super) counterparty_max_htlc_value_in_flight_msat: u64, #[cfg(not(test))] counterparty_max_htlc_value_in_flight_msat: u64, - //get_holder_max_htlc_value_in_flight_msat(): u64, + + #[cfg(test)] + pub(super) holder_max_htlc_value_in_flight_msat: u64, + #[cfg(not(test))] + holder_max_htlc_value_in_flight_msat: u64, + /// minimum channel reserve for self to maintain - set by them. counterparty_selected_channel_reserve_satoshis: Option, - // get_holder_selected_channel_reserve_satoshis(channel_value_sats: u64): u64 + + #[cfg(test)] + pub(super) holder_selected_channel_reserve_satoshis: u64, + #[cfg(not(test))] + holder_selected_channel_reserve_satoshis: u64, + counterparty_htlc_minimum_msat: u64, holder_htlc_minimum_msat: u64, #[cfg(test)] @@ -527,6 +606,9 @@ pub(super) struct Channel { // is fine, but as a sanity check in our failure to generate the second claim, we check here // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC. historical_inbound_htlc_fulfills: HashSet, + + /// This channel's type, as negotiated during channel open + channel_type: ChannelTypeFeatures, } #[cfg(any(test, feature = "fuzztarget"))] @@ -540,10 +622,12 @@ struct CommitmentTxInfoCached { pub const OUR_MAX_HTLCS: u16 = 50; //TODO -#[cfg(not(test))] -const COMMITMENT_TX_BASE_WEIGHT: u64 = 724; -#[cfg(test)] -pub const COMMITMENT_TX_BASE_WEIGHT: u64 = 724; +pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 { + const COMMITMENT_TX_BASE_WEIGHT: u64 = 724; + const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124; + if opt_anchors { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT } +} + #[cfg(not(test))] const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172; #[cfg(test)] @@ -555,21 +639,24 @@ pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330; /// it's 2^24. pub const MAX_FUNDING_SATOSHIS: u64 = 1 << 24; -/// Maximum counterparty `dust_limit_satoshis` allowed. 2 * standard dust threshold on p2wsh output -/// Scales up on Bitcoin Core's proceeding policy with dust outputs. A typical p2wsh output is 43 -/// bytes to which Core's `GetDustThreshold()` sums up a minimal spend of 67 bytes (even if -/// a p2wsh witnessScript might be *effectively* smaller), `dustRelayFee` is set to 3000sat/kb, thus -/// 110 * 3000 / 1000 = 330. Per-protocol rules, all time-sensitive outputs are p2wsh, a value of -/// 330 sats is the lower bound desired to ensure good propagation of transactions. We give a bit -/// of margin to our counterparty and pick up 660 satoshis as an accepted `dust_limit_satoshis` -/// upper bound to avoid negotiation conflicts with other implementations. -pub const MAX_DUST_LIMIT_SATOSHIS: u64 = 2 * 330; - -/// A typical p2wsh output is 43 bytes to which Core's `GetDustThreshold()` sums up a minimal -/// spend of 67 bytes (even if a p2wsh witnessScript might be *effectively* smaller), `dustRelayFee` -/// is set to 3000sat/kb, thus 110 * 3000 / 1000 = 330. Per-protocol rules, all time-sensitive outputs -/// are p2wsh, a value of 330 sats is the lower bound desired to ensure good propagation of transactions. -pub const MIN_DUST_LIMIT_SATOSHIS: u64 = 330; +/// The maximum network dust limit for standard script formats. This currently represents the +/// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire +/// transaction non-standard and thus refuses to relay it. +/// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many +/// implementations use this value for their dust limit today. +pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546; + +/// The maximum channel dust limit we will accept from our counterparty. +pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS; + +/// The dust limit is used for both the commitment transaction outputs as well as the closing +/// transactions. For cooperative closing transactions, we require segwit outputs, though accept +/// *any* segwit scripts, which are allowed to be up to 42 bytes in length. +/// In order to avoid having to concern ourselves with standardness during the closing process, we +/// simply require our counterparty to use a dust limit which will leave any segwit output +/// standard. +/// See https://github.com/lightningnetwork/lightning-rfc/issues/905 for more details. +pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354; /// Used to return a simple Error back to ChannelManager. Will get converted to a /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our @@ -611,16 +698,29 @@ impl Channel { /// required by us. /// /// Guaranteed to return a value no larger than channel_value_satoshis + /// + /// This is used both for new channels and to figure out what reserve value we sent to peers + /// for channels serialized before we included our selected reserve value in the serialized + /// data explicitly. pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 { let (q, _) = channel_value_satoshis.overflowing_div(100); cmp::min(channel_value_satoshis, cmp::max(q, 1000)) //TODO } + pub(crate) fn opt_anchors(&self) -> bool { + self.channel_transaction_parameters.opt_anchors.is_some() + } + // Constructors: - pub fn new_outbound(fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, their_features: &InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u64, config: &UserConfig) -> Result, APIError> + pub fn new_outbound( + fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, their_features: &InitFeatures, + channel_value_satoshis: u64, push_msat: u64, user_id: u64, config: &UserConfig, current_chain_height: u32 + ) -> Result, APIError> where K::Target: KeysInterface, F::Target: FeeEstimator, { + let opt_anchors = false; // TODO - should be based on features + let holder_selected_contest_delay = config.own_channel_config.our_to_self_delay; let holder_signer = keys_provider.get_channel_signer(false, channel_value_satoshis); let pubkeys = holder_signer.pubkeys().clone(); @@ -636,12 +736,18 @@ impl Channel { return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)}); } let holder_selected_channel_reserve_satoshis = Channel::::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis); - if holder_selected_channel_reserve_satoshis < MIN_DUST_LIMIT_SATOSHIS { + if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) }); } let feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + let value_to_self_msat = channel_value_satoshis * 1000 - push_msat; + let commitment_tx_fee = Self::commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors); + if value_to_self_msat < commitment_tx_fee { + return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) }); + } + let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&keys_provider.get_secure_random_bytes()); @@ -672,7 +778,7 @@ impl Channel { cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - value_to_self_msat: channel_value_satoshis * 1000 - push_msat, + value_to_self_msat, pending_inbound_htlcs: Vec::new(), pending_outbound_htlcs: Vec::new(), @@ -690,6 +796,7 @@ impl Channel { monitor_pending_commitment_signed: false, monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), + monitor_pending_finalized_fulfills: Vec::new(), #[cfg(debug_assertions)] holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), @@ -704,12 +811,15 @@ impl Channel { funding_tx_confirmed_in: None, funding_tx_confirmation_height: 0, short_channel_id: None, + channel_creation_height: current_chain_height, feerate_per_kw: feerate, counterparty_dust_limit_satoshis: 0, - holder_dust_limit_satoshis: MIN_DUST_LIMIT_SATOSHIS, + holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, counterparty_max_htlc_value_in_flight_msat: 0, + holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis), counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel + holder_selected_channel_reserve_satoshis, counterparty_htlc_minimum_msat: 0, holder_htlc_minimum_msat: if config.own_channel_config.our_htlc_minimum_msat == 0 { 1 } else { config.own_channel_config.our_htlc_minimum_msat }, counterparty_max_accepted_htlcs: 0, @@ -722,7 +832,8 @@ impl Channel { holder_selected_contest_delay: config.own_channel_config.our_to_self_delay, is_outbound_from_holder: true, counterparty_parameters: None, - funding_outpoint: None + funding_outpoint: None, + opt_anchors: if opt_anchors { Some(()) } else { None }, }, funding_transaction: None, @@ -748,6 +859,11 @@ impl Channel { #[cfg(any(test, feature = "fuzztarget"))] historical_inbound_htlc_fulfills: HashSet::new(), + + // We currently only actually support one channel type, so don't retry with new types + // on error messages. When we support more we'll need fallback support (assuming we + // want to support old types). + channel_type: ChannelTypeFeatures::only_static_remote_key(), }) } @@ -755,8 +871,12 @@ impl Channel { where F::Target: FeeEstimator { let lower_limit = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background); - if feerate_per_kw < lower_limit { - return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit))); + // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing + // occasional issues with feerate disagreements between an initiator that wants a feerate + // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250 + // sat/kw before the comparison here. + if feerate_per_kw + 250 < lower_limit { + return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit))); } // We only bound the fee updates on the upper side to prevent completely absurd feerates, // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee. @@ -772,10 +892,33 @@ impl Channel { /// Creates a new channel from a remote sides' request for one. /// Assumes chain_hash has already been checked and corresponds with what we expect! - pub fn new_from_req(fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u64, config: &UserConfig) -> Result, ChannelError> + pub fn new_from_req( + fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, their_features: &InitFeatures, + msg: &msgs::OpenChannel, user_id: u64, config: &UserConfig, current_chain_height: u32, logger: &L + ) -> Result, ChannelError> where K::Target: KeysInterface, - F::Target: FeeEstimator + F::Target: FeeEstimator, + L::Target: Logger, { + let opt_anchors = false; // TODO - should be based on features + + // First check the channel type is known, failing before we do anything else if we don't + // support this channel type. + let channel_type = if let Some(channel_type) = &msg.channel_type { + if channel_type.supports_any_optional_bits() { + return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned())); + } + if *channel_type != ChannelTypeFeatures::only_static_remote_key() { + return Err(ChannelError::Close("Channel Type was not understood".to_owned())); + } + channel_type.clone() + } else { + ChannelTypeFeatures::from_counterparty_init(&their_features) + }; + if !channel_type.supports_static_remote_key() { + return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned())); + } + let holder_signer = keys_provider.get_channel_signer(true, msg.funding_satoshis); let pubkeys = holder_signer.pubkeys().clone(); let counterparty_pubkeys = ChannelPublicKeys { @@ -805,9 +948,6 @@ impl Channel { if msg.dust_limit_satoshis > msg.funding_satoshis { return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis))); } - if msg.dust_limit_satoshis > msg.channel_reserve_satoshis { - return Err(ChannelError::Close(format!("Bogus; channel reserve ({}) is less than dust limit ({})", msg.channel_reserve_satoshis, msg.dust_limit_satoshis))); - } let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; if msg.htlc_minimum_msat >= full_channel_value_msat { return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat))); @@ -841,11 +981,11 @@ impl Channel { if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs { return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.peer_channel_config_limits.min_max_accepted_htlcs))); } - if msg.dust_limit_satoshis < MIN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_DUST_LIMIT_SATOSHIS))); + if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); } - if msg.dust_limit_satoshis > MAX_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_DUST_LIMIT_SATOSHIS))); + if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); } // Convert things into internal flags and prep our state: @@ -859,31 +999,31 @@ impl Channel { // we either accept their preference or the preferences match local_config.announced_channel = announce; - let background_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background); - let holder_selected_channel_reserve_satoshis = Channel::::get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis); - if holder_selected_channel_reserve_satoshis < MIN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_DUST_LIMIT_SATOSHIS))); + if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); } - if msg.channel_reserve_satoshis < MIN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is smaller than our dust limit ({})", msg.channel_reserve_satoshis, MIN_DUST_LIMIT_SATOSHIS))); + if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.", + msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS); } if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis { return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis))); } // check if the funder's amount for the initial commitment tx is sufficient - // for full fee payment + // for full fee payment plus a few HTLCs to ensure the channel will be useful. let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat; - let lower_limit = background_feerate as u64 * COMMITMENT_TX_BASE_WEIGHT; - if funders_amount_msat < lower_limit { - return Err(ChannelError::Close(format!("Insufficient funding amount ({}) for initial commitment. Must be at least {}", funders_amount_msat, lower_limit))); + let commitment_tx_fee = Self::commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000; + if funders_amount_msat / 1000 < commitment_tx_fee { + return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee))); } - let to_local_msat = msg.push_msat; - let to_remote_msat = funders_amount_msat - background_feerate as u64 * COMMITMENT_TX_BASE_WEIGHT; - if to_local_msat <= msg.channel_reserve_satoshis * 1000 && to_remote_msat <= holder_selected_channel_reserve_satoshis * 1000 { - return Err(ChannelError::Close("Insufficient funding amount for initial commitment".to_owned())); + let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee; + // While it's reasonable for us to not meet the channel reserve initially (if they don't + // want to push much to us), our counterparty should always have more than our reserve. + if to_remote_satoshis < holder_selected_channel_reserve_satoshis { + return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned())); } let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { @@ -893,10 +1033,10 @@ impl Channel { if script.len() == 0 { None } else { - match ShutdownScript::try_from((script.clone(), their_features)) { - Ok(shutdown_script) => Some(shutdown_script.into_inner()), - Err(_) => return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))), + if !script::is_bolt2_compliant(&script, their_features) { + return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))) } + Some(script.clone()) } }, // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel @@ -953,6 +1093,7 @@ impl Channel { monitor_pending_commitment_signed: false, monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), + monitor_pending_finalized_fulfills: Vec::new(), #[cfg(debug_assertions)] holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), @@ -967,13 +1108,16 @@ impl Channel { funding_tx_confirmed_in: None, funding_tx_confirmation_height: 0, short_channel_id: None, + channel_creation_height: current_chain_height, feerate_per_kw: msg.feerate_per_kw, channel_value_satoshis: msg.funding_satoshis, counterparty_dust_limit_satoshis: msg.dust_limit_satoshis, - holder_dust_limit_satoshis: MIN_DUST_LIMIT_SATOSHIS, + holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000), + holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis), counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis), + holder_selected_channel_reserve_satoshis, counterparty_htlc_minimum_msat: msg.htlc_minimum_msat, holder_htlc_minimum_msat: if config.own_channel_config.our_htlc_minimum_msat == 0 { 1 } else { config.own_channel_config.our_htlc_minimum_msat }, counterparty_max_accepted_htlcs: msg.max_accepted_htlcs, @@ -989,7 +1133,8 @@ impl Channel { selected_contest_delay: msg.to_self_delay, pubkeys: counterparty_pubkeys, }), - funding_outpoint: None + funding_outpoint: None, + opt_anchors: if opt_anchors { Some(()) } else { None }, }, funding_transaction: None, @@ -1015,6 +1160,8 @@ impl Channel { #[cfg(any(test, feature = "fuzztarget"))] historical_inbound_htlc_fulfills: HashSet::new(), + + channel_type, }; Ok(chan) @@ -1033,12 +1180,10 @@ impl Channel { /// have not yet committed it. Such HTLCs will only be included in transactions which are being /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both /// which peer generated this transaction and "to whom" this transaction flows. - /// Returns (the transaction info, the number of HTLC outputs which were present in the - /// transaction, the list of HTLCs which were not ignored when building the transaction). - /// Note that below-dust HTLCs are included in the fourth return value, but not the third, and - /// sources are provided only for outbound HTLCs in the fourth return value. #[inline] - fn build_commitment_transaction(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> (CommitmentTransaction, u32, usize, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger { + fn build_commitment_transaction(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats + where L::Target: Logger + { let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new(); let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len(); let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs); @@ -1082,7 +1227,7 @@ impl Channel { ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => { if $outbound == local { // "offered HTLC output" let htlc_in_tx = get_htlc_in_commitment!($htlc, true); - if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + (feerate_per_kw as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) { + if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + (feerate_per_kw as u64 * htlc_timeout_tx_weight(self.opt_anchors()) / 1000) { log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); included_non_dust_htlcs.push((htlc_in_tx, $source)); } else { @@ -1091,7 +1236,7 @@ impl Channel { } } else { let htlc_in_tx = get_htlc_in_commitment!($htlc, false); - if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + (feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) { + if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + (feerate_per_kw as u64 * htlc_success_tx_weight(self.opt_anchors()) / 1000) { log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); included_non_dust_htlcs.push((htlc_in_tx, $source)); } else { @@ -1157,13 +1302,13 @@ impl Channel { } } - let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset; + let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset; assert!(value_to_self_msat >= 0); // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to // "violate" their reserve value by couting those against it. Thus, we have to convert // everything to i64 before subtracting as otherwise we can overflow. - let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset; + let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset; assert!(value_to_remote_msat >= 0); #[cfg(debug_assertions)] @@ -1177,15 +1322,16 @@ impl Channel { }; debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64); broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64); - debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= Channel::::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) as i64); + debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64); broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64); } - let total_fee = feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + (included_non_dust_htlcs.len() as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000; + let total_fee_sat = Channel::::commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.channel_transaction_parameters.opt_anchors.is_some()); + let anchors_val = if self.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64; let (value_to_self, value_to_remote) = if self.is_outbound() { - (value_to_self_msat / 1000 - total_fee as i64, value_to_remote_msat / 1000) + (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000) } else { - (value_to_self_msat / 1000, value_to_remote_msat / 1000 - total_fee as i64) + (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64) }; let mut value_to_a = if local { value_to_self } else { value_to_remote }; @@ -1216,7 +1362,7 @@ impl Channel { let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number, value_to_a as u64, value_to_b as u64, - false, + self.channel_transaction_parameters.opt_anchors.is_some(), funding_pubkey_a, funding_pubkey_b, keys.clone(), @@ -1229,7 +1375,19 @@ impl Channel { htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap()); htlcs_included.append(&mut included_dust_htlcs); - (tx, feerate_per_kw, num_nondust_htlcs, htlcs_included) + // For the stats, trimmed-to-0 the value in msats accordingly + value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat }; + value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat }; + + CommitmentStats { + tx, + feerate_per_kw, + total_fee_sat, + num_nondust_htlcs, + htlcs_included, + local_balance_msat: value_to_self_msat as u64, + remote_balance_msat: value_to_remote_msat as u64, + } } #[inline] @@ -1580,12 +1738,8 @@ impl Channel { if msg.channel_reserve_satoshis > self.channel_value_satoshis { return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.channel_value_satoshis))); } - if msg.channel_reserve_satoshis < self.holder_dust_limit_satoshis { - return Err(ChannelError::Close(format!("Peer never wants payout outputs? channel_reserve_satoshis was ({}). dust_limit is ({})", msg.channel_reserve_satoshis, self.holder_dust_limit_satoshis))); - } - let remote_reserve = Channel::::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis); - if msg.dust_limit_satoshis > remote_reserve { - return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, remote_reserve))); + if msg.dust_limit_satoshis > self.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.holder_selected_channel_reserve_satoshis))); } let full_channel_value_msat = (self.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000; if msg.htlc_minimum_msat >= full_channel_value_msat { @@ -1615,11 +1769,11 @@ impl Channel { if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs { return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.peer_channel_config_limits.min_max_accepted_htlcs))); } - if msg.dust_limit_satoshis < MIN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_DUST_LIMIT_SATOSHIS))); + if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); } - if msg.dust_limit_satoshis > MAX_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_DUST_LIMIT_SATOSHIS))); + if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); } if msg.minimum_depth > config.peer_channel_config_limits.max_minimum_depth { return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", config.peer_channel_config_limits.max_minimum_depth, msg.minimum_depth))); @@ -1638,10 +1792,10 @@ impl Channel { if script.len() == 0 { None } else { - match ShutdownScript::try_from((script.clone(), their_features)) { - Ok(shutdown_script) => Some(shutdown_script.into_inner()), - Err(_) => return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))), + if !script::is_bolt2_compliant(&script, their_features) { + return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); } + Some(script.clone()) } }, // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel @@ -1683,7 +1837,7 @@ impl Channel { let funding_script = self.get_funding_redeemscript(); let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?; - let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger).0; + let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; { let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); @@ -1697,7 +1851,7 @@ impl Channel { } let counterparty_keys = self.build_remote_transaction_keys()?; - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).0; + let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); @@ -1808,7 +1962,7 @@ impl Channel { let funding_script = self.get_funding_redeemscript(); let counterparty_keys = self.build_remote_transaction_keys()?; - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).0; + let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); @@ -1816,7 +1970,7 @@ impl Channel { log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); let holder_signer = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?; - let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).0; + let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; { let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); @@ -1901,17 +2055,28 @@ impl Channel { Ok(()) } + /// Returns transaction if there is pending funding transaction that is yet to broadcast + pub fn unbroadcasted_funding(&self) -> Option { + if self.channel_state & (ChannelState::FundingCreated as u32) != 0 { + self.funding_transaction.clone() + } else { + None + } + } + /// Returns a HTLCStats about inbound pending htlcs - fn get_inbound_pending_htlc_stats(&self) -> HTLCStats { + fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { let mut stats = HTLCStats { pending_htlcs: self.pending_inbound_htlcs.len() as u32, pending_htlcs_value_msat: 0, on_counterparty_tx_dust_exposure_msat: 0, on_holder_tx_dust_exposure_msat: 0, + holding_cell_msat: 0, + on_holder_tx_holding_cell_htlcs_count: 0, }; - let counterparty_dust_limit_timeout_sat = (self.get_dust_buffer_feerate() as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis; - let holder_dust_limit_success_sat = (self.get_dust_buffer_feerate() as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis; + let counterparty_dust_limit_timeout_sat = (self.get_dust_buffer_feerate(outbound_feerate_update) as u64 * htlc_timeout_tx_weight(self.opt_anchors()) / 1000) + self.counterparty_dust_limit_satoshis; + let holder_dust_limit_success_sat = (self.get_dust_buffer_feerate(outbound_feerate_update) as u64 * htlc_success_tx_weight(self.opt_anchors()) / 1000) + self.holder_dust_limit_satoshis; for ref htlc in self.pending_inbound_htlcs.iter() { stats.pending_htlcs_value_msat += htlc.amount_msat; if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { @@ -1925,16 +2090,18 @@ impl Channel { } /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell. - fn get_outbound_pending_htlc_stats(&self) -> HTLCStats { + fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { let mut stats = HTLCStats { pending_htlcs: self.pending_outbound_htlcs.len() as u32, pending_htlcs_value_msat: 0, on_counterparty_tx_dust_exposure_msat: 0, on_holder_tx_dust_exposure_msat: 0, + holding_cell_msat: 0, + on_holder_tx_holding_cell_htlcs_count: 0, }; - let counterparty_dust_limit_success_sat = (self.get_dust_buffer_feerate() as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis; - let holder_dust_limit_timeout_sat = (self.get_dust_buffer_feerate() as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis; + let counterparty_dust_limit_success_sat = (self.get_dust_buffer_feerate(outbound_feerate_update) as u64 * htlc_success_tx_weight(self.opt_anchors()) / 1000) + self.counterparty_dust_limit_satoshis; + let holder_dust_limit_timeout_sat = (self.get_dust_buffer_feerate(outbound_feerate_update) as u64 * htlc_timeout_tx_weight(self.opt_anchors()) / 1000) + self.holder_dust_limit_satoshis; for ref htlc in self.pending_outbound_htlcs.iter() { stats.pending_htlcs_value_msat += htlc.amount_msat; if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { @@ -1949,11 +2116,14 @@ impl Channel { if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { stats.pending_htlcs += 1; stats.pending_htlcs_value_msat += amount_msat; + stats.holding_cell_msat += amount_msat; if *amount_msat / 1000 < counterparty_dust_limit_success_sat { stats.on_counterparty_tx_dust_exposure_msat += amount_msat; } if *amount_msat / 1000 < holder_dust_limit_timeout_sat { stats.on_holder_tx_dust_exposure_msat += amount_msat; + } else { + stats.on_holder_tx_holding_cell_htlcs_count += 1; } } } @@ -1964,32 +2134,48 @@ impl Channel { /// Doesn't bother handling the /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC /// corner case properly. + /// The channel reserve is subtracted from each balance. + /// See also [`Channel::get_balance_msat`] pub fn get_inbound_outbound_available_balance_msat(&self) -> (u64, u64) { // Note that we have to handle overflow due to the above case. ( cmp::max(self.channel_value_satoshis as i64 * 1000 - self.value_to_self_msat as i64 - - self.get_inbound_pending_htlc_stats().pending_htlcs_value_msat as i64 - - Self::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) as i64 * 1000, + - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 + - self.holder_selected_channel_reserve_satoshis as i64 * 1000, 0) as u64, cmp::max(self.value_to_self_msat as i64 - - self.get_outbound_pending_htlc_stats().pending_htlcs_value_msat as i64 + - self.get_outbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 - self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000, 0) as u64 ) } + /// Get our total balance in msat. + /// This is the amount that would go to us if we close the channel, ignoring any on-chain fees. + /// See also [`Channel::get_inbound_outbound_available_balance_msat`] + pub fn get_balance_msat(&self) -> u64 { + self.value_to_self_msat + - self.get_outbound_pending_htlc_stats(None).pending_htlcs_value_msat + } + pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option) { - (Self::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis), - self.counterparty_selected_channel_reserve_satoshis) + (self.holder_selected_channel_reserve_satoshis, self.counterparty_selected_channel_reserve_satoshis) } - // Get the fee cost of a commitment tx with a given number of HTLC outputs. + // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs. // Note that num_htlcs should not include dust HTLCs. - fn commit_tx_fee_msat(&self, num_htlcs: usize) -> u64 { + fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 { // Note that we need to divide before multiplying to round properly, // since the lowest denomination of bitcoin on-chain is the satoshi. - (COMMITMENT_TX_BASE_WEIGHT + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * self.feerate_per_kw as u64 / 1000 * 1000 + (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000 + } + + // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs. + // Note that num_htlcs should not include dust HTLCs. + #[inline] + fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 { + feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 } // Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the @@ -1999,8 +2185,8 @@ impl Channel { fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { assert!(self.is_outbound()); - let real_dust_limit_success_sat = (self.feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis; - let real_dust_limit_timeout_sat = (self.feerate_per_kw as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis; + let real_dust_limit_success_sat = (self.feerate_per_kw as u64 * htlc_success_tx_weight(self.opt_anchors()) / 1000) + self.holder_dust_limit_satoshis; + let real_dust_limit_timeout_sat = (self.feerate_per_kw as u64 * htlc_timeout_tx_weight(self.opt_anchors()) / 1000) + self.holder_dust_limit_satoshis; let mut addl_htlcs = 0; if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } @@ -2056,12 +2242,12 @@ impl Channel { } let num_htlcs = included_htlcs + addl_htlcs; - let res = self.commit_tx_fee_msat(num_htlcs); + let res = Self::commit_tx_fee_msat(self.feerate_per_kw, num_htlcs, self.opt_anchors()); #[cfg(any(test, feature = "fuzztarget"))] { let mut fee = res; if fee_spike_buffer_htlc.is_some() { - fee = self.commit_tx_fee_msat(num_htlcs - 1); + fee = Self::commit_tx_fee_msat(self.feerate_per_kw, num_htlcs - 1, self.opt_anchors()); } let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len() + self.holding_cell_htlc_updates.len(); @@ -2090,8 +2276,8 @@ impl Channel { fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { assert!(!self.is_outbound()); - let real_dust_limit_success_sat = (self.feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis; - let real_dust_limit_timeout_sat = (self.feerate_per_kw as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis; + let real_dust_limit_success_sat = (self.feerate_per_kw as u64 * htlc_success_tx_weight(self.opt_anchors()) / 1000) + self.counterparty_dust_limit_satoshis; + let real_dust_limit_timeout_sat = (self.feerate_per_kw as u64 * htlc_timeout_tx_weight(self.opt_anchors()) / 1000) + self.counterparty_dust_limit_satoshis; let mut addl_htlcs = 0; if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } @@ -2134,12 +2320,12 @@ impl Channel { } let num_htlcs = included_htlcs + addl_htlcs; - let res = self.commit_tx_fee_msat(num_htlcs); + let res = Self::commit_tx_fee_msat(self.feerate_per_kw, num_htlcs, self.opt_anchors()); #[cfg(any(test, feature = "fuzztarget"))] { let mut fee = res; if fee_spike_buffer_htlc.is_some() { - fee = self.commit_tx_fee_msat(num_htlcs - 1); + fee = Self::commit_tx_fee_msat(self.feerate_per_kw, num_htlcs - 1, self.opt_anchors()); } let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len(); let commitment_tx_info = CommitmentTxInfoCached { @@ -2185,14 +2371,13 @@ impl Channel { return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.holder_htlc_minimum_msat, msg.amount_msat))); } - let inbound_stats = self.get_inbound_pending_htlc_stats(); - let outbound_stats = self.get_outbound_pending_htlc_stats(); + let inbound_stats = self.get_inbound_pending_htlc_stats(None); + let outbound_stats = self.get_outbound_pending_htlc_stats(None); if inbound_stats.pending_htlcs + 1 > OUR_MAX_HTLCS as u32 { return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", OUR_MAX_HTLCS))); } - let holder_max_htlc_value_in_flight_msat = Channel::::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis); - if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > holder_max_htlc_value_in_flight_msat { - return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", holder_max_htlc_value_in_flight_msat))); + if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.holder_max_htlc_value_in_flight_msat { + return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.holder_max_htlc_value_in_flight_msat))); } // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet // the reserve_satoshis we told them to always have as direct payment so that they lose @@ -2215,7 +2400,7 @@ impl Channel { } } - let exposure_dust_limit_timeout_sats = (self.get_dust_buffer_feerate() as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis; + let exposure_dust_limit_timeout_sats = (self.get_dust_buffer_feerate(None) as u64 * htlc_timeout_tx_weight(self.opt_anchors()) / 1000) + self.counterparty_dust_limit_satoshis; if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; if on_counterparty_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { @@ -2225,7 +2410,7 @@ impl Channel { } } - let exposure_dust_limit_success_sats = (self.get_dust_buffer_feerate() as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis; + let exposure_dust_limit_success_sats = (self.get_dust_buffer_feerate(None) as u64 * htlc_success_tx_weight(self.opt_anchors()) / 1000) + self.holder_dust_limit_satoshis; if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; if on_holder_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { @@ -2253,9 +2438,7 @@ impl Channel { return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned())); }; - let chan_reserve_msat = - Channel::::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) * 1000; - if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < chan_reserve_msat { + if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.holder_selected_channel_reserve_satoshis * 1000 { return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned())); } @@ -2270,7 +2453,7 @@ impl Channel { // sensitive to fee spikes. let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); - if pending_remote_value_msat - msg.amount_msat - chan_reserve_msat < remote_fee_cost_incl_stuck_buffer_msat { + if pending_remote_value_msat - msg.amount_msat - self.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat { // Note that if the pending_forward_status is not updated here, then it's because we're already failing // the HTLC, i.e. its status is already set to failing. log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.channel_id())); @@ -2389,36 +2572,32 @@ impl Channel { let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number).map_err(|e| (None, e))?; - let (num_htlcs, mut htlcs_cloned, commitment_tx, commitment_txid, feerate_per_kw) = { - let commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger); - let commitment_txid = { - let trusted_tx = commitment_tx.0.trust(); - let bitcoin_tx = trusted_tx.built_transaction(); - let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis); - - log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}", - log_bytes!(msg.signature.serialize_compact()[..]), - log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), - log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id())); - if let Err(_) = self.secp_ctx.verify(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) { - return Err((None, ChannelError::Close("Invalid commitment tx signature from peer".to_owned()))); - } - bitcoin_tx.txid - }; - let htlcs_cloned: Vec<_> = commitment_tx.3.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect(); - (commitment_tx.2, htlcs_cloned, commitment_tx.0, commitment_txid, commitment_tx.1) + let commitment_stats = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger); + let commitment_txid = { + let trusted_tx = commitment_stats.tx.trust(); + let bitcoin_tx = trusted_tx.built_transaction(); + let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis); + + log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}", + log_bytes!(msg.signature.serialize_compact()[..]), + log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), + log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id())); + if let Err(_) = self.secp_ctx.verify(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) { + return Err((None, ChannelError::Close("Invalid commitment tx signature from peer".to_owned()))); + } + bitcoin_tx.txid }; + let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect(); // If our counterparty updated the channel fee in this commitment transaction, check that // they can actually afford the new fee now. let update_fee = if let Some((_, update_state)) = self.pending_update_fee { update_state == FeeUpdateState::RemoteAnnounced } else { false }; - if update_fee { debug_assert!(!self.is_outbound()); } - let total_fee = feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000; if update_fee { - let counterparty_reserve_we_require = Channel::::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis); - if self.channel_value_satoshis - self.value_to_self_msat / 1000 < total_fee + counterparty_reserve_we_require { + debug_assert!(!self.is_outbound()); + let counterparty_reserve_we_require_msat = self.holder_selected_channel_reserve_satoshis * 1000; + if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat { return Err((None, ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()))); } } @@ -2434,26 +2613,27 @@ impl Channel { && info.next_holder_htlc_id == self.next_holder_htlc_id && info.next_counterparty_htlc_id == self.next_counterparty_htlc_id && info.feerate == self.feerate_per_kw { - assert_eq!(total_fee, info.fee / 1000); + assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000); } } } } - if msg.htlc_signatures.len() != num_htlcs { - return Err((None, ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), num_htlcs)))); + if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs { + return Err((None, ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)))); } // TODO: Sadly, we pass HTLCs twice to ChannelMonitor: once via the HolderCommitmentTransaction and once via the update let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len()); for (idx, (htlc, source)) in htlcs_cloned.drain(..).enumerate() { if let Some(_) = htlc.transaction_output_index { - let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, feerate_per_kw, - self.get_counterparty_selected_contest_delay().unwrap(), &htlc, + let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw, + self.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.opt_anchors(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key); - let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &keys); - let htlc_sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, SigHashType::All)[..]); + let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &keys); + let htlc_sighashtype = if self.opt_anchors() { SigHashType::SinglePlusAnyoneCanPay } else { SigHashType::All }; + let htlc_sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype)[..]); log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id())); @@ -2467,7 +2647,7 @@ impl Channel { } let holder_commitment_tx = HolderCommitmentTransaction::new( - commitment_tx, + commitment_stats.tx, msg.signature, msg.htlc_signatures.clone(), &self.get_holder_pubkeys().funding_pubkey, @@ -2603,7 +2783,7 @@ impl Channel { // to rebalance channels. match &htlc_update { &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => { - match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone()) { + match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), logger) { Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()), Err(e) => { match e { @@ -2662,12 +2842,7 @@ impl Channel { return Ok((None, htlcs_to_fail)); } let update_fee = if let Some(feerate) = self.holding_cell_update_fee.take() { - assert!(self.is_outbound()); - self.pending_update_fee = Some((feerate, FeeUpdateState::Outbound)); - Some(msgs::UpdateFee { - channel_id: self.channel_id, - feerate_per_kw: feerate as u32, - }) + self.send_update_fee(feerate, logger) } else { None }; @@ -2700,7 +2875,7 @@ impl Channel { /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail, /// generating an appropriate error *after* the channel state has been updated based on the /// revoke_and_ack message. - pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Option, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>), ChannelError> + pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result where L::Target: Logger, { if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { @@ -2766,6 +2941,7 @@ impl Channel { log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.channel_id())); let mut to_forward_infos = Vec::new(); let mut revoked_htlcs = Vec::new(); + let mut finalized_claimed_htlcs = Vec::new(); let mut update_fail_htlcs = Vec::new(); let mut update_fail_malformed_htlcs = Vec::new(); let mut require_commitment = false; @@ -2792,6 +2968,7 @@ impl Channel { if let Some(reason) = fail_reason.clone() { // We really want take() here, but, again, non-mut ref :( revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason)); } else { + finalized_claimed_htlcs.push(htlc.source.clone()); // They fulfilled, so we sent them money value_to_self_msat_diff -= htlc.amount_msat as i64; } @@ -2888,8 +3065,14 @@ impl Channel { } self.monitor_pending_forwards.append(&mut to_forward_infos); self.monitor_pending_failures.append(&mut revoked_htlcs); + self.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs); log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id())); - return Ok((None, Vec::new(), Vec::new(), monitor_update, Vec::new())) + return Ok(RAAUpdates { + commitment_update: None, finalized_claimed_htlcs: Vec::new(), + accepted_htlcs: Vec::new(), failed_htlcs: Vec::new(), + monitor_update, + holding_cell_failed_htlcs: Vec::new() + }); } match self.free_holding_cell_htlcs(logger)? { @@ -2908,7 +3091,14 @@ impl Channel { self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - Ok((Some(commitment_update), to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail)) + Ok(RAAUpdates { + commitment_update: Some(commitment_update), + finalized_claimed_htlcs, + accepted_htlcs: to_forward_infos, + failed_htlcs: revoked_htlcs, + monitor_update, + holding_cell_failed_htlcs: htlcs_to_fail + }) }, (None, htlcs_to_fail) => { if require_commitment { @@ -2921,17 +3111,27 @@ impl Channel { log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.", log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len()); - Ok((Some(msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs, - update_fail_malformed_htlcs, - update_fee: None, - commitment_signed - }), to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail)) + Ok(RAAUpdates { + commitment_update: Some(msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs, + update_fail_malformed_htlcs, + update_fee: None, + commitment_signed + }), + finalized_claimed_htlcs, + accepted_htlcs: to_forward_infos, failed_htlcs: revoked_htlcs, + monitor_update, holding_cell_failed_htlcs: htlcs_to_fail + }) } else { log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id())); - Ok((None, to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail)) + Ok(RAAUpdates { + commitment_update: None, + finalized_claimed_htlcs, + accepted_htlcs: to_forward_infos, failed_htlcs: revoked_htlcs, + monitor_update, holding_cell_failed_htlcs: htlcs_to_fail + }) } } } @@ -2939,8 +3139,10 @@ impl Channel { /// Adds a pending update to this channel. See the doc for send_htlc for /// further details on the optionness of the return value. + /// If our balance is too low to cover the cost of the next commitment transaction at the + /// new feerate, the update is cancelled. /// You MUST call send_commitment prior to any other calls on this Channel - fn send_update_fee(&mut self, feerate_per_kw: u32) -> Option { + fn send_update_fee(&mut self, feerate_per_kw: u32, logger: &L) -> Option where L::Target: Logger { if !self.is_outbound() { panic!("Cannot send fee from inbound channel"); } @@ -2951,6 +3153,31 @@ impl Channel { panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)"); } + // Before proposing a feerate update, check that we can actually afford the new fee. + let inbound_stats = self.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); + let outbound_stats = self.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); + let keys = if let Ok(keys) = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number) { keys } else { return None; }; + let commitment_stats = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, true, logger); + let buffer_fee_msat = Channel::::commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.opt_anchors()) * 1000; + let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; + if holder_balance_msat < buffer_fee_msat + self.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { + //TODO: auto-close after a number of failures? + log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw); + return None; + } + + // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`. + let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; + let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; + if holder_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() { + log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); + return None; + } + if counterparty_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() { + log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); + return None; + } + if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 { self.holding_cell_update_fee = Some(feerate_per_kw); return None; @@ -2966,7 +3193,7 @@ impl Channel { } pub fn send_update_fee_and_commit(&mut self, feerate_per_kw: u32, logger: &L) -> Result, ChannelError> where L::Target: Logger { - match self.send_update_fee(feerate_per_kw) { + match self.send_update_fee(feerate_per_kw, logger) { Some(update_fee) => { let (commitment_signed, monitor_update) = self.send_commitment_no_status_check(logger)?; Ok(Some((update_fee, commitment_signed, monitor_update))) @@ -3046,18 +3273,23 @@ impl Channel { /// which failed. The messages which were generated from that call which generated the /// monitor update failure must *not* have been sent to the remote end, and must instead /// have been dropped. They will be regenerated when monitor_updating_restored is called. - pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) { + pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool, + mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, + mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + mut pending_finalized_claimed_htlcs: Vec + ) { self.monitor_pending_revoke_and_ack |= resend_raa; self.monitor_pending_commitment_signed |= resend_commitment; self.monitor_pending_forwards.append(&mut pending_forwards); self.monitor_pending_failures.append(&mut pending_fails); + self.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs); self.channel_state |= ChannelState::MonitorUpdateFailed as u32; } /// Indicates that the latest ChannelMonitor update has been committed by the client /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. - pub fn monitor_updating_restored(&mut self, logger: &L) -> (Option, Option, RAACommitmentOrder, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, Option, Option) where L::Target: Logger { + pub fn monitor_updating_restored(&mut self, logger: &L) -> MonitorRestoreUpdates where L::Target: Logger { assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32); self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32); @@ -3080,15 +3312,20 @@ impl Channel { }) } else { None }; - let mut forwards = Vec::new(); - mem::swap(&mut forwards, &mut self.monitor_pending_forwards); - let mut failures = Vec::new(); - mem::swap(&mut failures, &mut self.monitor_pending_failures); + let mut accepted_htlcs = Vec::new(); + mem::swap(&mut accepted_htlcs, &mut self.monitor_pending_forwards); + let mut failed_htlcs = Vec::new(); + mem::swap(&mut failed_htlcs, &mut self.monitor_pending_failures); + let mut finalized_claimed_htlcs = Vec::new(); + mem::swap(&mut finalized_claimed_htlcs, &mut self.monitor_pending_finalized_fulfills); if self.channel_state & (ChannelState::PeerDisconnected as u32) != 0 { self.monitor_pending_revoke_and_ack = false; self.monitor_pending_commitment_signed = false; - return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures, funding_broadcastable, funding_locked); + return MonitorRestoreUpdates { + raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst, + accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, funding_locked + }; } let raa = if self.monitor_pending_revoke_and_ack { @@ -3105,7 +3342,9 @@ impl Channel { log_bytes!(self.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); - (raa, commitment_update, order, forwards, failures, funding_broadcastable, funding_locked) + MonitorRestoreUpdates { + raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, funding_locked + } } pub fn update_fee(&mut self, fee_estimator: &F, msg: &msgs::UpdateFee) -> Result<(), ChannelError> @@ -3118,7 +3357,7 @@ impl Channel { return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned())); } Channel::::check_remote_fee(fee_estimator, msg.feerate_per_kw)?; - let feerate_over_dust_buffer = msg.feerate_per_kw > self.get_dust_buffer_feerate(); + let feerate_over_dust_buffer = msg.feerate_per_kw > self.get_dust_buffer_feerate(None); self.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced)); self.update_time_counter += 1; @@ -3126,8 +3365,8 @@ impl Channel { // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we // won't be pushed over our dust exposure limit by the feerate increase. if feerate_over_dust_buffer { - let inbound_stats = self.get_inbound_pending_htlc_stats(); - let outbound_stats = self.get_outbound_pending_htlc_stats(); + let inbound_stats = self.get_inbound_pending_htlc_stats(None); + let outbound_stats = self.get_outbound_pending_htlc_stats(None); let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; if holder_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() { @@ -3491,17 +3730,16 @@ impl Channel { } assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0); - let shutdown_scriptpubkey = match ShutdownScript::try_from((msg.scriptpubkey.clone(), their_features)) { - Ok(script) => script.into_inner(), - Err(_) => return Err(ChannelError::Close(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex()))), - }; + if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) { + return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex()))); + } if self.counterparty_shutdown_scriptpubkey.is_some() { - if Some(&shutdown_scriptpubkey) != self.counterparty_shutdown_scriptpubkey.as_ref() { - return Err(ChannelError::Close(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", shutdown_scriptpubkey.to_bytes().to_hex()))); + if Some(&msg.scriptpubkey) != self.counterparty_shutdown_scriptpubkey.as_ref() { + return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex()))); } } else { - self.counterparty_shutdown_scriptpubkey = Some(shutdown_scriptpubkey); + self.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone()); } // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc @@ -3628,6 +3866,12 @@ impl Channel { }, }; + for outp in closing_tx.trust().built_transaction().output.iter() { + if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned())); + } + } + assert!(self.shutdown_scriptpubkey.is_some()); if let Some((last_fee, sig)) = self.last_sent_closing_fee { if last_fee == msg.fee_satoshis { @@ -3794,7 +4038,7 @@ impl Channel { // channel might have been used to route very small values (either by honest users or as DoS). self.channel_value_satoshis * 1000 * 9 / 10, - Channel::::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis) + self.holder_max_htlc_value_in_flight_msat ); } @@ -3823,7 +4067,7 @@ impl Channel { self.feerate_per_kw } - pub fn get_dust_buffer_feerate(&self) -> u32 { + pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option) -> u32 { // When calculating our exposure to dust HTLCs, we assume that the channel feerate // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%, // whichever is higher. This ensures that we aren't suddenly exposed to significantly @@ -3835,6 +4079,9 @@ impl Channel { if let Some((feerate, _)) = self.pending_update_fee { feerate_per_kw = cmp::max(feerate_per_kw, feerate); } + if let Some(feerate) = outbound_feerate_update { + feerate_per_kw = cmp::max(feerate_per_kw, feerate); + } cmp::max(2530, feerate_per_kw * 1250 / 1000) } @@ -3958,6 +4205,7 @@ impl Channel { } pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) { + self.update_time_counter += 1; self.channel_update_status = status; } @@ -4011,7 +4259,7 @@ impl Channel { /// In the first case, we store the confirmation height and calculating the short channel id. /// In the second, we simply return an Err indicating we need to be force-closed now. pub fn transactions_confirmed(&mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData, logger: &L) - -> Result, msgs::ErrorMessage> where L::Target: Logger { + -> Result, ClosureReason> where L::Target: Logger { let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); for &(index_in_block, tx) in txdata.iter() { if let Some(funding_txo) = self.get_funding_txo() { @@ -4032,10 +4280,8 @@ impl Channel { panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!"); } self.update_time_counter += 1; - return Err(msgs::ErrorMessage { - channel_id: self.channel_id(), - data: "funding tx had wrong script/value or output index".to_owned() - }); + let err_reason = "funding tx had wrong script/value or output index"; + return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() }); } else { if self.is_outbound() { for input in tx.input.iter() { @@ -4066,10 +4312,7 @@ impl Channel { for inp in tx.input.iter() { if inp.previous_output == funding_txo.into_bitcoin_outpoint() { log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id())); - return Err(msgs::ErrorMessage { - channel_id: self.channel_id(), - data: "Commitment or closing transaction was confirmed on chain.".to_owned() - }); + return Err(ClosureReason::CommitmentTxConfirmed); } } } @@ -4089,9 +4332,12 @@ impl Channel { /// May return some HTLCs (and their payment_hash) which have timed out and should be failed /// back. pub fn best_block_updated(&mut self, height: u32, highest_header_time: u32, logger: &L) - -> Result<(Option, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> where L::Target: Logger { + -> Result<(Option, Vec<(HTLCSource, PaymentHash)>), ClosureReason> where L::Target: Logger { let mut timed_out_htlcs = Vec::new(); - let unforwarded_htlc_cltv_limit = height + HTLC_FAIL_BACK_BUFFER; + // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to + // forward an HTLC when our counterparty should almost certainly just fail it for expiring + // ~now. + let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS; self.holding_cell_htlc_updates.retain(|htlc_update| { match htlc_update { &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => { @@ -4127,11 +4373,17 @@ impl Channel { // close the channel and hope we can get the latest state on chain (because presumably // the funding transaction is at least still in the mempool of most nodes). if funding_tx_confirmations < self.minimum_depth.unwrap() as i64 / 2 { - return Err(msgs::ErrorMessage { - channel_id: self.channel_id(), - data: format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", self.minimum_depth.unwrap(), funding_tx_confirmations), - }); + let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", + self.minimum_depth.unwrap(), funding_tx_confirmations); + return Err(ClosureReason::ProcessingError { err: err_reason }); } + } else if !self.is_outbound() && self.funding_tx_confirmed_in.is_none() && + height >= self.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { + log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.channel_id)); + // If funding_tx_confirmed_in is unset, the channel must not be active + assert!(non_shutdown_state <= ChannelState::ChannelFunded as u32); + assert_eq!(non_shutdown_state & ChannelState::OurFundingLocked as u32, 0); + return Err(ClosureReason::FundingTimedOut); } Ok((None, timed_out_htlcs)) @@ -4140,7 +4392,7 @@ impl Channel { /// Indicates the funding transaction is no longer confirmed in the main chain. This may /// force-close the channel, but may also indicate a harmless reorganization of a block or two /// before the channel has reached funding_locked and we can just wait for more blocks. - pub fn funding_transaction_unconfirmed(&mut self, logger: &L) -> Result<(), msgs::ErrorMessage> where L::Target: Logger { + pub fn funding_transaction_unconfirmed(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger { if self.funding_tx_confirmation_height != 0 { // We handle the funding disconnection by calling best_block_updated with a height one // below where our funding was connected, implying a reorg back to conf_height - 1. @@ -4187,8 +4439,8 @@ impl Channel { funding_satoshis: self.channel_value_satoshis, push_msat: self.channel_value_satoshis * 1000 - self.value_to_self_msat, dust_limit_satoshis: self.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: Channel::::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis), - channel_reserve_satoshis: Channel::::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis), + max_htlc_value_in_flight_msat: self.holder_max_htlc_value_in_flight_msat, + channel_reserve_satoshis: self.holder_selected_channel_reserve_satoshis, htlc_minimum_msat: self.holder_htlc_minimum_msat, feerate_per_kw: self.feerate_per_kw as u32, to_self_delay: self.get_holder_selected_contest_delay(), @@ -4204,6 +4456,7 @@ impl Channel { Some(script) => script.clone().into_inner(), None => Builder::new().into_script(), }), + channel_type: Some(self.channel_type.clone()), } } @@ -4224,8 +4477,8 @@ impl Channel { msgs::AcceptChannel { temporary_channel_id: self.channel_id, dust_limit_satoshis: self.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: Channel::::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis), - channel_reserve_satoshis: Channel::::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis), + max_htlc_value_in_flight_msat: self.holder_max_htlc_value_in_flight_msat, + channel_reserve_satoshis: self.holder_selected_channel_reserve_satoshis, htlc_minimum_msat: self.holder_htlc_minimum_msat, minimum_depth: self.minimum_depth.unwrap(), to_self_delay: self.get_holder_selected_contest_delay(), @@ -4246,7 +4499,7 @@ impl Channel { /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) fn get_outbound_funding_created_signature(&mut self, logger: &L) -> Result where L::Target: Logger { let counterparty_keys = self.build_remote_transaction_keys()?; - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).0; + let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; Ok(self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, &self.secp_ctx) .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0) } @@ -4465,7 +4718,7 @@ impl Channel { /// You MUST call send_commitment prior to calling any other methods on this Channel! /// /// If an Err is returned, it's a ChannelError::Ignore! - pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result, ChannelError> { + pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result, ChannelError> where L::Target: Logger { if (self.channel_state & (ChannelState::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelFunded as u32) { return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned())); } @@ -4492,8 +4745,8 @@ impl Channel { return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned())); } - let inbound_stats = self.get_inbound_pending_htlc_stats(); - let outbound_stats = self.get_outbound_pending_htlc_stats(); + let inbound_stats = self.get_inbound_pending_htlc_stats(None); + let outbound_stats = self.get_outbound_pending_htlc_stats(None); if outbound_stats.pending_htlcs + 1 > self.counterparty_max_accepted_htlcs as u32 { return Err(ChannelError::Ignore(format!("Cannot push more than their max accepted HTLCs ({})", self.counterparty_max_accepted_htlcs))); } @@ -4502,18 +4755,19 @@ impl Channel { return Err(ChannelError::Ignore(format!("Cannot send value that would put us over the max HTLC value in flight our peer will accept ({})", self.counterparty_max_htlc_value_in_flight_msat))); } + let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?; + let commitment_stats = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, true, logger); if !self.is_outbound() { // Check that we won't violate the remote channel reserve by adding this HTLC. - let counterparty_balance_msat = self.channel_value_satoshis * 1000 - self.value_to_self_msat; - let holder_selected_chan_reserve_msat = Channel::::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) * 1000; let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered); let counterparty_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_candidate, None); - if counterparty_balance_msat < holder_selected_chan_reserve_msat + counterparty_commit_tx_fee_msat { + let holder_selected_chan_reserve_msat = self.holder_selected_channel_reserve_satoshis * 1000; + if commitment_stats.remote_balance_msat < counterparty_commit_tx_fee_msat + holder_selected_chan_reserve_msat { return Err(ChannelError::Ignore("Cannot send value that would put counterparty balance under holder-announced channel reserve value".to_owned())); } } - let exposure_dust_limit_success_sats = (self.get_dust_buffer_feerate() as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis; + let exposure_dust_limit_success_sats = (self.get_dust_buffer_feerate(None) as u64 * htlc_success_tx_weight(self.opt_anchors()) / 1000) + self.counterparty_dust_limit_satoshis; if amount_msat / 1000 < exposure_dust_limit_success_sats { let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + amount_msat; if on_counterparty_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { @@ -4522,7 +4776,7 @@ impl Channel { } } - let exposure_dust_limit_timeout_sats = (self.get_dust_buffer_feerate() as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis; + let exposure_dust_limit_timeout_sats = (self.get_dust_buffer_feerate(None) as u64 * htlc_timeout_tx_weight(self.opt_anchors()) / 1000) + self.holder_dust_limit_satoshis; if amount_msat / 1000 < exposure_dust_limit_timeout_sats { let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + amount_msat; if on_holder_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { @@ -4531,9 +4785,9 @@ impl Channel { } } - let pending_value_to_self_msat = self.value_to_self_msat - outbound_stats.pending_htlcs_value_msat; - if pending_value_to_self_msat < amount_msat { - return Err(ChannelError::Ignore(format!("Cannot send value that would overdraw remaining funds. Amount: {}, pending value to self {}", amount_msat, pending_value_to_self_msat))); + let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; + if holder_balance_msat < amount_msat { + return Err(ChannelError::Ignore(format!("Cannot send value that would overdraw remaining funds. Amount: {}, pending value to self {}", amount_msat, holder_balance_msat))); } // `2 *` and extra HTLC are for the fee spike buffer. @@ -4541,14 +4795,14 @@ impl Channel { let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered); FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_candidate, Some(())) } else { 0 }; - if pending_value_to_self_msat - amount_msat < commit_tx_fee_msat { - return Err(ChannelError::Ignore(format!("Cannot send value that would not leave enough to pay for fees. Pending value to self: {}. local_commit_tx_fee {}", pending_value_to_self_msat, commit_tx_fee_msat))); + if holder_balance_msat - amount_msat < commit_tx_fee_msat { + return Err(ChannelError::Ignore(format!("Cannot send value that would not leave enough to pay for fees. Pending value to self: {}. local_commit_tx_fee {}", holder_balance_msat, commit_tx_fee_msat))); } // Check self.counterparty_selected_channel_reserve_satoshis (the amount we must keep as // reserve for the remote to have something to claim if we misbehave) let chan_reserve_msat = self.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000; - if pending_value_to_self_msat - amount_msat - commit_tx_fee_msat < chan_reserve_msat { + if holder_balance_msat - amount_msat - commit_tx_fee_msat < chan_reserve_msat { return Err(ChannelError::Ignore(format!("Cannot send value that would put our balance under counterparty-announced channel reserve value ({})", chan_reserve_msat))); } @@ -4682,9 +4936,8 @@ impl Channel { /// when we shouldn't change HTLC/channel state. fn send_commitment_no_state_update(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger { let counterparty_keys = self.build_remote_transaction_keys()?; - let counterparty_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); - let feerate_per_kw = counterparty_commitment_tx.1; - let counterparty_commitment_txid = counterparty_commitment_tx.0.trust().txid(); + let commitment_stats = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); + let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); let (signature, htlc_signatures); #[cfg(any(test, feature = "fuzztarget"))] @@ -4698,7 +4951,7 @@ impl Channel { && info.next_holder_htlc_id == self.next_holder_htlc_id && info.next_counterparty_htlc_id == self.next_counterparty_htlc_id && info.feerate == self.feerate_per_kw { - let actual_fee = self.commit_tx_fee_msat(counterparty_commitment_tx.2); + let actual_fee = Self::commit_tx_fee_msat(self.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.opt_anchors()); assert_eq!(actual_fee, info.fee); } } @@ -4706,25 +4959,25 @@ impl Channel { } { - let mut htlcs = Vec::with_capacity(counterparty_commitment_tx.3.len()); - for &(ref htlc, _) in counterparty_commitment_tx.3.iter() { + let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len()); + for &(ref htlc, _) in commitment_stats.htlcs_included.iter() { htlcs.push(htlc); } - let res = self.holder_signer.sign_counterparty_commitment(&counterparty_commitment_tx.0, &self.secp_ctx) + let res = self.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, &self.secp_ctx) .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?; signature = res.0; htlc_signatures = res.1; log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", - encode::serialize_hex(&counterparty_commitment_tx.0.trust().built_transaction().transaction), + encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction), &counterparty_commitment_txid, encode::serialize_hex(&self.get_funding_redeemscript()), log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.channel_id())); for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) { log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", - encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), - encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &counterparty_keys)), + encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, self.opt_anchors(), &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), + encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &counterparty_keys)), log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()), log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.channel_id())); } @@ -4734,7 +4987,7 @@ impl Channel { channel_id: self.channel_id, signature, htlc_signatures, - }, (counterparty_commitment_txid, counterparty_commitment_tx.3))) + }, (counterparty_commitment_txid, commitment_stats.htlcs_included))) } /// Adds a pending outbound HTLC to this channel, and creates a signed commitment transaction @@ -4742,7 +4995,7 @@ impl Channel { /// Shorthand for calling send_htlc() followed by send_commitment(), see docs on those for /// more info. pub fn send_htlc_and_commit(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result, ChannelError> where L::Target: Logger { - match self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet)? { + match self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, logger)? { Some(update_add_htlc) => { let (commitment_signed, monitor_update) = self.send_commitment_no_status_check(logger)?; Ok(Some((update_add_htlc, commitment_signed, monitor_update))) @@ -5147,6 +5400,22 @@ impl Writeable for Channel { htlc.write(writer)?; } + // If the channel type is something other than only-static-remote-key, then we need to have + // older clients fail to deserialize this channel at all. If the type is + // only-static-remote-key, we simply consider it "default" and don't write the channel type + // out at all. + let chan_type = if self.channel_type != ChannelTypeFeatures::only_static_remote_key() { + Some(&self.channel_type) } else { None }; + + // The same logic applies for `holder_selected_channel_reserve_satoshis` and + // `holder_max_htlc_value_in_flight_msat` values other than the defaults. + let serialized_holder_selected_reserve = + if self.holder_selected_channel_reserve_satoshis != Self::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) + { Some(self.holder_selected_channel_reserve_satoshis) } else { None }; + let serialized_holder_htlc_max_in_flight = + if self.holder_max_htlc_value_in_flight_msat != Self::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis) + { Some(self.holder_max_htlc_value_in_flight_msat) } else { None }; + write_tlv_fields!(writer, { (0, self.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -5156,10 +5425,15 @@ impl Writeable for Channel { // and new versions map the default values to None and allow the TLV entries here to // override that. (1, self.minimum_depth, option), + (2, chan_type, option), (3, self.counterparty_selected_channel_reserve_satoshis, option), + (4, serialized_holder_selected_reserve, option), (5, self.config, required), + (6, serialized_holder_htlc_max_in_flight, option), (7, self.shutdown_scriptpubkey, option), (9, self.target_closing_feerate_sats_per_kw, option), + (11, self.monitor_pending_finalized_fulfills, vec_type), + (13, self.channel_creation_height, required), }); Ok(()) @@ -5167,9 +5441,10 @@ impl Writeable for Channel { } const MAX_ALLOC_SIZE: usize = 64*1024; -impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel +impl<'a, Signer: Sign, K: Deref> ReadableArgs<(&'a K, u32)> for Channel where K::Target: KeysInterface { - fn read(reader: &mut R, keys_source: &'a K) -> Result { + fn read(reader: &mut R, args: (&'a K, u32)) -> Result { + let (keys_source, serialized_height) = args; let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); let user_id = Readable::read(reader)?; @@ -5393,15 +5668,39 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel let mut announcement_sigs = None; let mut target_closing_feerate_sats_per_kw = None; + let mut monitor_pending_finalized_fulfills = Some(Vec::new()); + let mut holder_selected_channel_reserve_satoshis = Some(Self::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis)); + let mut holder_max_htlc_value_in_flight_msat = Some(Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis)); + // Prior to supporting channel type negotiation, all of our channels were static_remotekey + // only, so we default to that if none was written. + let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key()); + let mut channel_creation_height = Some(serialized_height); read_tlv_fields!(reader, { (0, announcement_sigs, option), (1, minimum_depth, option), + (2, channel_type, option), (3, counterparty_selected_channel_reserve_satoshis, option), + (4, holder_selected_channel_reserve_satoshis, option), (5, config, option), // Note that if none is provided we will *not* overwrite the existing one. + (6, holder_max_htlc_value_in_flight_msat, option), (7, shutdown_scriptpubkey, option), (9, target_closing_feerate_sats_per_kw, option), + (11, monitor_pending_finalized_fulfills, vec_type), + (13, channel_creation_height, option), }); + let chan_features = channel_type.as_ref().unwrap(); + if chan_features.supports_unknown_bits() || chan_features.requires_unknown_bits() { + // If the channel was written by a new version and negotiated with features we don't + // understand yet, refuse to read it. + return Err(DecodeError::UnknownRequiredFeature); + } + + if channel_parameters.opt_anchors.is_some() { + // Relax this check when ChannelTypeFeatures supports anchors. + return Err(DecodeError::InvalidValue); + } + let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&keys_source.get_secure_random_bytes()); @@ -5435,6 +5734,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel monitor_pending_commitment_signed, monitor_pending_forwards, monitor_pending_failures, + monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), pending_update_fee, holding_cell_update_fee, @@ -5456,11 +5756,14 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel funding_tx_confirmed_in, funding_tx_confirmation_height, short_channel_id, + channel_creation_height: channel_creation_height.unwrap(), counterparty_dust_limit_satoshis, holder_dust_limit_satoshis, counterparty_max_htlc_value_in_flight_msat, + holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(), counterparty_selected_channel_reserve_satoshis, + holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(), counterparty_htlc_minimum_msat, holder_htlc_minimum_msat, counterparty_max_accepted_htlcs, @@ -5493,6 +5796,8 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel #[cfg(any(test, feature = "fuzztarget"))] historical_inbound_htlc_fulfills, + + channel_type: channel_type.unwrap(), }) } } @@ -5509,17 +5814,17 @@ mod tests { use bitcoin::hashes::hex::FromHex; use hex; use ln::{PaymentPreimage, PaymentHash}; - use ln::channelmanager::{HTLCSource, MppId}; + use ln::channelmanager::{HTLCSource, PaymentId}; use ln::channel::{Channel,InboundHTLCOutput,OutboundHTLCOutput,InboundHTLCState,OutboundHTLCState,HTLCOutputInCommitment,HTLCCandidate,HTLCInitiator,TxCreationKeys}; use ln::channel::MAX_FUNDING_SATOSHIS; use ln::features::InitFeatures; use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate}; use ln::script::ShutdownScript; use ln::chan_utils; - use ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT}; + use ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters, htlc_success_tx_weight, htlc_timeout_tx_weight}; use chain::BestBlock; use chain::chaininterface::{FeeEstimator,ConfirmationTarget}; - use chain::keysinterface::{InMemorySigner, KeysInterface, BaseSign}; + use chain::keysinterface::{InMemorySigner, KeyMaterial, KeysInterface, BaseSign}; use chain::transaction::OutPoint; use util::config::UserConfig; use util::enforcing_trait_impls::EnforcingSigner; @@ -5560,6 +5865,7 @@ mod tests { type Signer = InMemorySigner; fn get_node_secret(&self) -> SecretKey { panic!(); } + fn get_inbound_payment_key_material(&self) -> KeyMaterial { panic!(); } fn get_destination_script(&self) -> Script { let secp_ctx = Secp256k1::signing_only(); let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(); @@ -5602,7 +5908,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - match Channel::::new_outbound(&&fee_estimator, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config) { + match Channel::::new_outbound(&&fee_estimator, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0) { Err(APIError::IncompatibleShutdownScript { script }) => { assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner()); }, @@ -5624,7 +5930,7 @@ mod tests { let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let node_a_chan = Channel::::new_outbound(&&fee_est, &&keys_provider, node_a_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config).unwrap(); + let node_a_chan = Channel::::new_outbound(&&fee_est, &&keys_provider, node_a_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0).unwrap(); // Now change the fee so we can check that the fee in the open_channel message is the // same as the old fee. @@ -5642,6 +5948,7 @@ mod tests { let seed = [42; 32]; let network = Network::Testnet; let keys_provider = test_utils::TestKeysInterface::new(&seed, network); + let logger = test_utils::TestLogger::new(); // Go through the flow of opening a channel between two nodes, making sure // they have different dust limits. @@ -5649,13 +5956,13 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = Channel::::new_outbound(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config).unwrap(); + let mut node_a_chan = Channel::::new_outbound(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let node_b_chan = Channel::::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config).unwrap(); + let node_b_chan = Channel::::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger).unwrap(); // Node B --> Node A: accept channel, explicitly setting B's dust limit. let mut accept_channel_msg = node_b_chan.get_accept_channel(); @@ -5683,7 +5990,9 @@ mod tests { path: Vec::new(), session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(), first_hop_htlc_msat: 548, - mpp_id: MppId([42; 32]), + payment_id: PaymentId([42; 32]), + payment_secret: None, + payee: None, } }); @@ -5691,13 +6000,13 @@ mod tests { // the dust limit check. let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered); let local_commit_tx_fee = node_a_chan.next_local_commit_tx_fee_msat(htlc_candidate, None); - let local_commit_fee_0_htlcs = node_a_chan.commit_tx_fee_msat(0); + let local_commit_fee_0_htlcs = Channel::::commit_tx_fee_msat(node_a_chan.feerate_per_kw, 0, node_a_chan.opt_anchors()); assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs); // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all // of the HTLCs are seen to be above the dust limit. node_a_chan.channel_transaction_parameters.is_outbound_from_holder = false; - let remote_commit_fee_3_htlcs = node_a_chan.commit_tx_fee_msat(3); + let remote_commit_fee_3_htlcs = Channel::::commit_tx_fee_msat(node_a_chan.feerate_per_kw, 3, node_a_chan.opt_anchors()); let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered); let remote_commit_tx_fee = node_a_chan.next_remote_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs); @@ -5717,20 +6026,20 @@ mod tests { let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut chan = Channel::::new_outbound(&&fee_est, &&keys_provider, node_id, &InitFeatures::known(), 10000000, 100000, 42, &config).unwrap(); + let mut chan = Channel::::new_outbound(&&fee_est, &&keys_provider, node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0).unwrap(); - let commitment_tx_fee_0_htlcs = chan.commit_tx_fee_msat(0); - let commitment_tx_fee_1_htlc = chan.commit_tx_fee_msat(1); + let commitment_tx_fee_0_htlcs = Channel::::commit_tx_fee_msat(chan.feerate_per_kw, 0, chan.opt_anchors()); + let commitment_tx_fee_1_htlc = Channel::::commit_tx_fee_msat(chan.feerate_per_kw, 1, chan.opt_anchors()); // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be // counted as dust when it shouldn't be. - let htlc_amt_above_timeout = ((253 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + chan.holder_dust_limit_satoshis + 1) * 1000; + let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.opt_anchors()) / 1000) + chan.holder_dust_limit_satoshis + 1) * 1000; let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered); let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc); // If swapped: this HTLC would be counted as non-dust when it shouldn't be. - let dust_htlc_amt_below_success = ((253 * HTLC_SUCCESS_TX_WEIGHT / 1000) + chan.holder_dust_limit_satoshis - 1) * 1000; + let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.opt_anchors()) / 1000) + chan.holder_dust_limit_satoshis - 1) * 1000; let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered); let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs); @@ -5738,13 +6047,13 @@ mod tests { chan.channel_transaction_parameters.is_outbound_from_holder = false; // If swapped: this HTLC would be counted as non-dust when it shouldn't be. - let dust_htlc_amt_above_timeout = ((253 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + chan.counterparty_dust_limit_satoshis + 1) * 1000; + let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.opt_anchors()) / 1000) + chan.counterparty_dust_limit_satoshis + 1) * 1000; let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered); let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs); // If swapped: this HTLC would be counted as dust when it shouldn't be. - let htlc_amt_below_success = ((253 * HTLC_SUCCESS_TX_WEIGHT / 1000) + chan.counterparty_dust_limit_satoshis - 1) * 1000; + let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.opt_anchors()) / 1000) + chan.counterparty_dust_limit_satoshis - 1) * 1000; let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered); let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc); @@ -5766,12 +6075,12 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = Channel::::new_outbound(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config).unwrap(); + let mut node_a_chan = Channel::::new_outbound(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0).unwrap(); // Create Node B's channel by receiving Node A's open_channel message let open_channel_msg = node_a_chan.get_open_channel(chain_hash); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = Channel::::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config).unwrap(); + let mut node_b_chan = Channel::::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger).unwrap(); // Node B --> Node A: accept channel let accept_channel_msg = node_b_chan.get_accept_channel(); @@ -5828,7 +6137,7 @@ mod tests { // Create a channel. let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = Channel::::new_outbound(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config).unwrap(); + let mut node_a_chan = Channel::::new_outbound(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0).unwrap(); assert!(node_a_chan.counterparty_forwarding_info.is_none()); assert_eq!(node_a_chan.holder_htlc_minimum_msat, 1); // the default assert!(node_a_chan.counterparty_forwarding_info().is_none()); @@ -5866,7 +6175,7 @@ mod tests { #[test] fn outbound_commitment_test() { - // Test vectors from BOLT 3 Appendix C: + // Test vectors from BOLT 3 Appendices C and F (anchors): let feeest = TestFeeEstimator{fee_est: 15000}; let logger : Arc = Arc::new(test_utils::TestLogger::new()); let secp_ctx = Secp256k1::new(); @@ -5892,7 +6201,7 @@ mod tests { let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let mut config = UserConfig::default(); config.channel_options.announced_channel = false; - let mut chan = Channel::::new_outbound(&&feeest, &&keys_provider, counterparty_node_id, &InitFeatures::known(), 10_000_000, 100000, 42, &config).unwrap(); // Nothing uses their network key in this test + let mut chan = Channel::::new_outbound(&&feeest, &&keys_provider, counterparty_node_id, &InitFeatures::known(), 10_000_000, 100000, 42, &config, 0).unwrap(); // Nothing uses their network key in this test chan.holder_dust_limit_satoshis = 546; chan.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel @@ -5932,23 +6241,38 @@ mod tests { let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint).unwrap(); macro_rules! test_commitment { - ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, { + ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => { + chan.channel_transaction_parameters.opt_anchors = None; + test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, false, $($remain)*); + }; + } + + macro_rules! test_commitment_with_anchors { + ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => { + chan.channel_transaction_parameters.opt_anchors = Some(()); + test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, true, $($remain)*); + }; + } + + macro_rules! test_commitment_common { + ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, { $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), * } ) => { { let (commitment_tx, htlcs): (_, Vec) = { - let mut res = chan.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger); + let mut commitment_stats = chan.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger); - let htlcs = res.3.drain(..) + let htlcs = commitment_stats.htlcs_included.drain(..) .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None }) .collect(); - (res.0, htlcs) + (commitment_stats.tx, htlcs) }; let trusted_tx = commitment_tx.trust(); let unsigned_tx = trusted_tx.built_transaction(); let redeemscript = chan.get_funding_redeemscript(); let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap(); let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.channel_value_satoshis); - secp_ctx.verify(&sighash, &counterparty_signature, chan.counterparty_funding_pubkey()).unwrap(); + log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction))); + assert!(secp_ctx.verify(&sighash, &counterparty_signature, chan.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig"); let mut per_htlc: Vec<(HTLCOutputInCommitment, Option)> = Vec::new(); per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls @@ -5979,15 +6303,17 @@ mod tests { let mut htlc_sig_iter = holder_commitment_tx.htlcs().iter().zip(&holder_commitment_tx.counterparty_htlc_sigs).zip(htlc_sigs.iter().enumerate()); $({ + log_trace!(logger, "verifying htlc {}", $htlc_idx); let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap(); let ref htlc = htlcs[$htlc_idx]; let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.feerate_per_kw, chan.get_counterparty_selected_contest_delay().unwrap(), - &htlc, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); - let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &keys); - let htlc_sighash = Message::from_slice(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, SigHashType::All)[..]).unwrap(); - secp_ctx.verify(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).unwrap(); + &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); + let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys); + let htlc_sighashtype = if $opt_anchors { SigHashType::SinglePlusAnyoneCanPay } else { SigHashType::All }; + let htlc_sighash = Message::from_slice(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype)[..]).unwrap(); + assert!(secp_ctx.verify(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig"); let mut preimage: Option = None; if !htlc.offered { @@ -6002,13 +6328,15 @@ mod tests { } let htlc_sig = htlc_sig_iter.next().unwrap(); - assert_eq!((htlc_sig.0).0.transaction_output_index, Some($htlc_idx), "output index"); + let num_anchors = if $opt_anchors { 2 } else { 0 }; + assert_eq!((htlc_sig.0).0.transaction_output_index, Some($htlc_idx + num_anchors), "output index"); let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap(); assert_eq!(signature, *(htlc_sig.1).1, "htlc sig"); let index = (htlc_sig.1).0; let channel_parameters = chan.channel_transaction_parameters.as_holder_broadcastable(); let trusted_tx = holder_commitment_tx.trust(); + log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage)))); assert_eq!(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))[..], hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx"); })* @@ -6023,6 +6351,11 @@ mod tests { "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142", "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); + // anchors: simple commitment tx with no HTLCs + test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3", + "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); + chan.pending_inbound_htlcs.push({ let mut out = InboundHTLCOutput{ htlc_id: 0, @@ -6149,6 +6482,40 @@ mod tests { "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" } } ); + // anchors: commitment tx with seven outputs untrimmed (maximum feerate) + chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.feerate_per_kw = 644; + + test_commitment_with_anchors!("3045022100e0106830467a558c07544a3de7715610c1147062e7d091deeebe8b5c661cda9402202ad049c1a6d04834317a78483f723c205c9f638d17222aafc620800cc1b6ae35", + "3045022100ef82a405364bfc4007e63a7cc82925a513d79065bdbc216d60b6a4223a323f8a02200716730b8561f3c6d362eaf47f202e99fb30d0557b61b92b5f9134f8e2de3681", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80094a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994e80300000000000022002010f88bf09e56f14fb4543fd26e47b0db50ea5de9cf3fc46434792471082621aed0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a4f996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ef82a405364bfc4007e63a7cc82925a513d79065bdbc216d60b6a4223a323f8a02200716730b8561f3c6d362eaf47f202e99fb30d0557b61b92b5f9134f8e2de368101483045022100e0106830467a558c07544a3de7715610c1147062e7d091deeebe8b5c661cda9402202ad049c1a6d04834317a78483f723c205c9f638d17222aafc620800cc1b6ae3501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { + + { 0, + "304402205912d91c58016f593d9e46fefcdb6f4125055c41a17b03101eaaa034b9028ab60220520d4d239c85c66e4c75c5b413620b62736e227659d7821b308e2b8ced3e728e", + "30440220473166a5adcca68550bab80403f410a726b5bd855030527e3fefa8c1e4b4fd7b02203b1dc91d8d69039473036cb5c34398b99e8eb90ae500c22130a557b62294b188", + "02000000000101b8cefef62ea66f5178b9361b2371be0759cbc8c689bcfa7a8e6746d497ec221a0200000000010000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205912d91c58016f593d9e46fefcdb6f4125055c41a17b03101eaaa034b9028ab60220520d4d239c85c66e4c75c5b413620b62736e227659d7821b308e2b8ced3e728e834730440220473166a5adcca68550bab80403f410a726b5bd855030527e3fefa8c1e4b4fd7b02203b1dc91d8d69039473036cb5c34398b99e8eb90ae500c22130a557b62294b188012000000000000000000000000000000000000000000000000000000000000000008d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac6851b2756800000000" }, + + { 1, + "3045022100c6b4113678039ee1e43a6cba5e3224ed2355ffc05e365a393afe8843dc9a76860220566d01fd52d65a89ba8595023884f9e8f2e9a310a6b9b85281c0bce06863430c", + "3045022100d0d86307ea55d5daa80f453ad6d64b78fe8a6504aac25407c73e8502c0702c1602206a0809a02aa00c8dc4a53d976bb05d4605d8bb0b7b26b973a5c4e2734d8afbb4", + "02000000000101b8cefef62ea66f5178b9361b2371be0759cbc8c689bcfa7a8e6746d497ec221a0300000000010000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c6b4113678039ee1e43a6cba5e3224ed2355ffc05e365a393afe8843dc9a76860220566d01fd52d65a89ba8595023884f9e8f2e9a310a6b9b85281c0bce06863430c83483045022100d0d86307ea55d5daa80f453ad6d64b78fe8a6504aac25407c73e8502c0702c1602206a0809a02aa00c8dc4a53d976bb05d4605d8bb0b7b26b973a5c4e2734d8afbb401008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" }, + + { 2, + "304402203c3a699fb80a38112aafd73d6e3a9b7d40bc2c3ed8b7fbc182a20f43b215172202204e71821b984d1af52c4b8e2cd4c572578c12a965866130c2345f61e4c2d3fef4", + "304402205bcfa92f83c69289a412b0b6dd4f2a0fe0b0fc2d45bd74706e963257a09ea24902203783e47883e60b86240e877fcbf33d50b1742f65bc93b3162d1be26583b367ee", + "02000000000101b8cefef62ea66f5178b9361b2371be0759cbc8c689bcfa7a8e6746d497ec221a040000000001000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402203c3a699fb80a38112aafd73d6e3a9b7d40bc2c3ed8b7fbc182a20f43b215172202204e71821b984d1af52c4b8e2cd4c572578c12a965866130c2345f61e4c2d3fef48347304402205bcfa92f83c69289a412b0b6dd4f2a0fe0b0fc2d45bd74706e963257a09ea24902203783e47883e60b86240e877fcbf33d50b1742f65bc93b3162d1be26583b367ee012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" }, + + { 3, + "304402200f089bcd20f25475216307d32aa5b6c857419624bfba1da07335f51f6ba4645b02206ce0f7153edfba23b0d4b2afc26bb3157d404368cb8ea0ca7cf78590dcdd28cf", + "3045022100e4516da08f72c7a4f7b2f37aa84a0feb54ae2cc5b73f0da378e81ae0ca8119bf02207751b2628d8e2f62b4b9abccda4866246c1bfcc82e3d416ad562fd212102c28f", + "02000000000101b8cefef62ea66f5178b9361b2371be0759cbc8c689bcfa7a8e6746d497ec221a050000000001000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402200f089bcd20f25475216307d32aa5b6c857419624bfba1da07335f51f6ba4645b02206ce0f7153edfba23b0d4b2afc26bb3157d404368cb8ea0ca7cf78590dcdd28cf83483045022100e4516da08f72c7a4f7b2f37aa84a0feb54ae2cc5b73f0da378e81ae0ca8119bf02207751b2628d8e2f62b4b9abccda4866246c1bfcc82e3d416ad562fd212102c28f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" }, + + { 4, + "3045022100aa72cfaf0965020c73a12c77276c6411ca68c4de36ac1998adf86c917a899a43022060da0a159fecfe0bed37c3962d767f12f90e30fed8a8f34b1301775c21a2bd3a", + "304402203cd12065c2a42963c762e6b1a981e17695616ecb6f9fb33d8b0717cdd7ca0ee4022065500005c491c1dcf2fe9c4024f74b1c90785d572527055a491278f901143904", + "02000000000101b8cefef62ea66f5178b9361b2371be0759cbc8c689bcfa7a8e6746d497ec221a06000000000100000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100aa72cfaf0965020c73a12c77276c6411ca68c4de36ac1998adf86c917a899a43022060da0a159fecfe0bed37c3962d767f12f90e30fed8a8f34b1301775c21a2bd3a8347304402203cd12065c2a42963c762e6b1a981e17695616ecb6f9fb33d8b0717cdd7ca0ee4022065500005c491c1dcf2fe9c4024f74b1c90785d572527055a491278f901143904012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" } + } ); + // commitment tx with six outputs untrimmed (minimum feerate) chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 chan.feerate_per_kw = 648; @@ -6178,6 +6545,35 @@ mod tests { "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" } } ); + // anchors: commitment tx with six outputs untrimmed (minimum feerate) + chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.feerate_per_kw = 645; + + test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312", + "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { + + { 0, + "30440220446f9e5c375db6a61d6eeee8b59219a30a4a37372afc2670a1a2889c78e9b943022061895f6088fb48b490ab2140a4842c277b64bf25ff591625dd0356e0c96ab7a8", + "3045022100c1621ba26a99c263fd885feff5fda5ca2cc73df080b3a49ecf15164ee244d2a5022037f4cc7fd4441af39a83a0e44c3b1db7d64a4c8080e8697f9e952f85421a34d8", + "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b28534856132000200000000010000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220446f9e5c375db6a61d6eeee8b59219a30a4a37372afc2670a1a2889c78e9b943022061895f6088fb48b490ab2140a4842c277b64bf25ff591625dd0356e0c96ab7a883483045022100c1621ba26a99c263fd885feff5fda5ca2cc73df080b3a49ecf15164ee244d2a5022037f4cc7fd4441af39a83a0e44c3b1db7d64a4c8080e8697f9e952f85421a34d801008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" }, + + { 1, + "3044022027a3ffcb8a007e3349d75382efbd4b3fb99fcbd479a18555e58697bd1278d5c402205c8303d46211c3ae8975fe84a0df08b4623119fecd03bc93b49d7f7a0c64c710", + "3045022100b697aca55c6fb15e5348bb7387b584815fd15e8dd306afe0c477cb550d0c2d40022050b0f7e370f7604d2fec781fefe86715dbe95dff4dab88d628f509d62f854de1", + "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b28534856132000300000000010000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022027a3ffcb8a007e3349d75382efbd4b3fb99fcbd479a18555e58697bd1278d5c402205c8303d46211c3ae8975fe84a0df08b4623119fecd03bc93b49d7f7a0c64c71083483045022100b697aca55c6fb15e5348bb7387b584815fd15e8dd306afe0c477cb550d0c2d40022050b0f7e370f7604d2fec781fefe86715dbe95dff4dab88d628f509d62f854de1012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" }, + + { 2, + "30440220013975ae356e6daf22a86a29f21c4f35aca82ed8f731a1103c60c74f5ed1c5aa02200350d4e5455cdbcacb7ccf174db5bed8286019e509a113f6b4c5e606ee12c9d7", + "3045022100e69a29f78779577830e73f327073c93168896f1b89432124b7846f5def9cd9cb02204433db3697e6ed7ac89574ca066a749640e0c9e114ac2e0ee4545741fcf7b7e9", + "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b2853485613200040000000001000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220013975ae356e6daf22a86a29f21c4f35aca82ed8f731a1103c60c74f5ed1c5aa02200350d4e5455cdbcacb7ccf174db5bed8286019e509a113f6b4c5e606ee12c9d783483045022100e69a29f78779577830e73f327073c93168896f1b89432124b7846f5def9cd9cb02204433db3697e6ed7ac89574ca066a749640e0c9e114ac2e0ee4545741fcf7b7e901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" }, + + { 3, + "304402205257017423644c7e831f30bc0c334eecfe66e9a6d2e92d157c5bece576b2be4f022047b21cf8e955e22b7471940563922d1a5852fb95459ca32905c7d46a19141664", + "304402204f5de65a624e3f757adffb678bd887eb4e656538c5ea7044922f6ee3eed8a06202206ff6f7bfe73b565343cae76131ac658f1a9c60d3ca2343358cda60b9e35f94c8", + "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205257017423644c7e831f30bc0c334eecfe66e9a6d2e92d157c5bece576b2be4f022047b21cf8e955e22b7471940563922d1a5852fb95459ca32905c7d46a191416648347304402204f5de65a624e3f757adffb678bd887eb4e656538c5ea7044922f6ee3eed8a06202206ff6f7bfe73b565343cae76131ac658f1a9c60d3ca2343358cda60b9e35f94c8012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" } + } ); + // commitment tx with six outputs untrimmed (maximum feerate) chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 chan.feerate_per_kw = 2069; @@ -6207,6 +6603,35 @@ mod tests { "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" } } ); + // anchors: commitment tx with six outputs untrimmed (maximum feerate) + chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.feerate_per_kw = 2060; + + test_commitment_with_anchors!("304402206208aeb34e404bd052ce3f298dfa832891c9d42caec99fe2a0d2832e9690b94302201b034bfcc6fa9faec667a9b7cbfe0b8d85e954aa239b66277887b5088aff08c3", + "304402201ce37a44b95213358c20f44404d6db7a6083bea6f58de6c46547ae41a47c9f8202206db1d45be41373e92f90d346381febbea8c78671b28c153e30ad1db3441a9497", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ab88f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201ce37a44b95213358c20f44404d6db7a6083bea6f58de6c46547ae41a47c9f8202206db1d45be41373e92f90d346381febbea8c78671b28c153e30ad1db3441a94970147304402206208aeb34e404bd052ce3f298dfa832891c9d42caec99fe2a0d2832e9690b94302201b034bfcc6fa9faec667a9b7cbfe0b8d85e954aa239b66277887b5088aff08c301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { + + { 0, + "30440220011f999016570bbab9f3125377d0f35096b4dbe155f97c20f71829ead2817d1602201f23f7e17f6928734601c5d8613431eed5c90aa41c3106e8c1cb02ce32aacb5d", + "3044022017da96dfb0eb4061fa0162dc6fa6b2e07ecc5040ab5e6cb07be59838460b3e58022079371ffc95002cc1dc2891ec38198c9c25aca8164304fe114f1b55e2ffd1ddd5", + "02000000000101e7f364cf3a554b670767e723ef14b2af7a3eac70bd79dbde9256f384369c062d0200000000010000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220011f999016570bbab9f3125377d0f35096b4dbe155f97c20f71829ead2817d1602201f23f7e17f6928734601c5d8613431eed5c90aa41c3106e8c1cb02ce32aacb5d83473044022017da96dfb0eb4061fa0162dc6fa6b2e07ecc5040ab5e6cb07be59838460b3e58022079371ffc95002cc1dc2891ec38198c9c25aca8164304fe114f1b55e2ffd1ddd501008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" }, + + { 1, + "304402202d2d9681409b0a0987bd4a268ffeb112df85c4c988ac2a3a2475cb00a61912c302206aa4f4d1388b7d3282bc847871af3cca30766cc8f1064e3a41ec7e82221e10f7", + "304402206426d67911aa6ff9b1cb147b093f3f65a37831a86d7c741d999afc0666e1773d022000bb71821650c70ea58d9bcdd03af736c41a5a8159d436c3ee0408a07394dcce", + "02000000000101e7f364cf3a554b670767e723ef14b2af7a3eac70bd79dbde9256f384369c062d0300000000010000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202d2d9681409b0a0987bd4a268ffeb112df85c4c988ac2a3a2475cb00a61912c302206aa4f4d1388b7d3282bc847871af3cca30766cc8f1064e3a41ec7e82221e10f78347304402206426d67911aa6ff9b1cb147b093f3f65a37831a86d7c741d999afc0666e1773d022000bb71821650c70ea58d9bcdd03af736c41a5a8159d436c3ee0408a07394dcce012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" }, + + { 2, + "3045022100f51cdaa525b7d4304548c642bb7945215eb5ae7d32874517cde67ca23ab0a12202206286d59e4b19926c6ac844be6f3ab8149a1ddb9c70f5026b7e83e40a6c08e6e1", + "304502210091b16b1ac63b867e7a5ca0344f7b2aa1cdd49d4b72eac86a31e7ec6f069e20640220402bfb571ba3a9c49e3b0061c89303453803d0241059d899222aaac4799b5076", + "02000000000101e7f364cf3a554b670767e723ef14b2af7a3eac70bd79dbde9256f384369c062d040000000001000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f51cdaa525b7d4304548c642bb7945215eb5ae7d32874517cde67ca23ab0a12202206286d59e4b19926c6ac844be6f3ab8149a1ddb9c70f5026b7e83e40a6c08e6e18348304502210091b16b1ac63b867e7a5ca0344f7b2aa1cdd49d4b72eac86a31e7ec6f069e20640220402bfb571ba3a9c49e3b0061c89303453803d0241059d899222aaac4799b507601008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" }, + + { 3, + "304402202f058d99cb5a54f90773d43ba4e7a0089efd9f8269ef2da1b85d48a3e230555402205acc4bd6561830867d45cd7b84bba9fa35ad2b345016471c1737142bc99782c4", + "304402202913f9cacea54efd2316cffa91219def9e0e111977216c1e76e9da80befab14f022000a9a69e8f37ebe4a39107ab50fab0dde537334588f8f412bbaca57b179b87a6", + "02000000000101e7f364cf3a554b670767e723ef14b2af7a3eac70bd79dbde9256f384369c062d05000000000100000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202f058d99cb5a54f90773d43ba4e7a0089efd9f8269ef2da1b85d48a3e230555402205acc4bd6561830867d45cd7b84bba9fa35ad2b345016471c1737142bc99782c48347304402202913f9cacea54efd2316cffa91219def9e0e111977216c1e76e9da80befab14f022000a9a69e8f37ebe4a39107ab50fab0dde537334588f8f412bbaca57b179b87a6012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" } + } ); + // commitment tx with five outputs untrimmed (minimum feerate) chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 chan.feerate_per_kw = 2070; @@ -6231,6 +6656,30 @@ mod tests { "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" } } ); + // anchors: commitment tx with five outputs untrimmed (minimum feerate) + chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.feerate_per_kw = 2061; + + test_commitment_with_anchors!("3045022100a2faf2ad7e323b2a82e07dc40b6847207ca6ad7b089f2c21dea9a4d37e52d59d02204c9480ce0358eb51d92a4342355a97e272e3cc45f86c612a76a3fe32fc3c4cb4", + "304402204ab07c659412dd2cd6043b1ad811ab215e901b6b5653e08cb3d2fe63d3e3dc57022031c7b3d130f9380ef09581f4f5a15cb6f359a2e0a597146b96c3533a26d6f4cd", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837eab80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a18916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402204ab07c659412dd2cd6043b1ad811ab215e901b6b5653e08cb3d2fe63d3e3dc57022031c7b3d130f9380ef09581f4f5a15cb6f359a2e0a597146b96c3533a26d6f4cd01483045022100a2faf2ad7e323b2a82e07dc40b6847207ca6ad7b089f2c21dea9a4d37e52d59d02204c9480ce0358eb51d92a4342355a97e272e3cc45f86c612a76a3fe32fc3c4cb401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { + + { 0, + "3045022100e10744f572a2cd1d787c969e894b792afaed21217ee0480df0112d2fa3ef96ea02202af4f66eb6beebc36d8e98719ed6b4be1b181659fcb561fc491d8cfebff3aa85", + "3045022100c3dc3ea50a0ca20e350f97b50c52c5514717cfa36cb9600918caac5cb556842b022049af018d676dde0c8e28ecf325f3ff5c1594261c4f7511d501f9d62d0594d2a2", + "02000000000101cf32732fe2d1387ed4e2335f69ddd3c0f337dabc03269e742531f89d35e161d10200000000010000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e10744f572a2cd1d787c969e894b792afaed21217ee0480df0112d2fa3ef96ea02202af4f66eb6beebc36d8e98719ed6b4be1b181659fcb561fc491d8cfebff3aa8583483045022100c3dc3ea50a0ca20e350f97b50c52c5514717cfa36cb9600918caac5cb556842b022049af018d676dde0c8e28ecf325f3ff5c1594261c4f7511d501f9d62d0594d2a201008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" }, + + { 1, + "3045022100e1f51fb72fec604b029b348a3bb6363454e1869f5b1e24fd736f860c8039f8070220030a2c90186437d8c9b47d4897798c024521b1274991c4cdc125970b346094b1", + "3045022100ec7ade6037e531629f24390ca9713782a04d648065d17fbe6b015981cdb296c202202d61049a6ecba2fb5314f3edcda2361cad187a89bea6e5d15185354d80c0c085", + "02000000000101cf32732fe2d1387ed4e2335f69ddd3c0f337dabc03269e742531f89d35e161d1030000000001000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e1f51fb72fec604b029b348a3bb6363454e1869f5b1e24fd736f860c8039f8070220030a2c90186437d8c9b47d4897798c024521b1274991c4cdc125970b346094b183483045022100ec7ade6037e531629f24390ca9713782a04d648065d17fbe6b015981cdb296c202202d61049a6ecba2fb5314f3edcda2361cad187a89bea6e5d15185354d80c0c08501008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" }, + + { 2, + "304402203479f81a1d83c516957679dc98bf91d35deada967739a8e3869e3e8db08246130220053c8e154b97e3019048dcec3d51bfaf396f36861fbda6d33f0e2a57155c8b9f", + "3045022100a558eb5caa04e35a4417c1f0123ac12eec5f6badee28f5764dc6b69486e594f802201589b12784e242f205832d2d032149bd4e79433ec304c05394241fc7dcba5a71", + "02000000000101cf32732fe2d1387ed4e2335f69ddd3c0f337dabc03269e742531f89d35e161d104000000000100000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402203479f81a1d83c516957679dc98bf91d35deada967739a8e3869e3e8db08246130220053c8e154b97e3019048dcec3d51bfaf396f36861fbda6d33f0e2a57155c8b9f83483045022100a558eb5caa04e35a4417c1f0123ac12eec5f6badee28f5764dc6b69486e594f802201589b12784e242f205832d2d032149bd4e79433ec304c05394241fc7dcba5a71012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" } + } ); + // commitment tx with five outputs untrimmed (maximum feerate) chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 chan.feerate_per_kw = 2194; @@ -6255,6 +6704,30 @@ mod tests { "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" } } ); + // anchors: commitment tx with five outputs untrimmed (maximum feerate) + chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.feerate_per_kw = 2184; + + test_commitment_with_anchors!("3044022013d326f80ff7607cf366c823fcbbcb7a2b10322484825f151e6c4c756af24b8f02201ba05b9d8beb7cea2947f9f4d9e03f90435e93db2dd48b32eb9ca3f3dd042c79", + "30440220555c05261f72c5b4702d5c83a608630822b473048724b08640d6e75e345094250220448950b74a96a56963928ba5db8b457661a742c855e69d239b3b6ab73de307a3", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837eab80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a4f906a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220555c05261f72c5b4702d5c83a608630822b473048724b08640d6e75e345094250220448950b74a96a56963928ba5db8b457661a742c855e69d239b3b6ab73de307a301473044022013d326f80ff7607cf366c823fcbbcb7a2b10322484825f151e6c4c756af24b8f02201ba05b9d8beb7cea2947f9f4d9e03f90435e93db2dd48b32eb9ca3f3dd042c7901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { + + { 0, + "304402202e03ba1390998b3487e9a7fefcb66814c09abea0ef1bcc915dbaefbcf310569a02206bd10493a105ac69048e9bcedcb8e3301ef81b55018d911a4afd297297f98d30", + "304402200c3952ca04be0c60dcc0b7873a0829f560607524943554ae4a27d8d967166199022021a68657b88e22f9bf9ac6065be412685aff643d17049f04f2e99e86197dabb1", + "020000000001015b03043e20eb467029305a22af4c3b915e793743f192c5d225cf1d3c6e8c03010200000000010000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202e03ba1390998b3487e9a7fefcb66814c09abea0ef1bcc915dbaefbcf310569a02206bd10493a105ac69048e9bcedcb8e3301ef81b55018d911a4afd297297f98d308347304402200c3952ca04be0c60dcc0b7873a0829f560607524943554ae4a27d8d967166199022021a68657b88e22f9bf9ac6065be412685aff643d17049f04f2e99e86197dabb101008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" }, + + { 1, + "304402201f8a6adda2403bc400c919ea69d72d315337291e00d02cde085ea32953dbc50002202d65230da98df7af8ebefd2b60b457d0945232988ee2d7460a94a77d414a9acc", + "3045022100ea69c9273b8914ac62b5b7082d6ac1da2b7b065ebf2ef3cd6403f5305ce3f26802203d98736ea97638895a898dfcc5ee0d0c55eb496b3964df0bb25d223688ea8b87", + "020000000001015b03043e20eb467029305a22af4c3b915e793743f192c5d225cf1d3c6e8c0301030000000001000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402201f8a6adda2403bc400c919ea69d72d315337291e00d02cde085ea32953dbc50002202d65230da98df7af8ebefd2b60b457d0945232988ee2d7460a94a77d414a9acc83483045022100ea69c9273b8914ac62b5b7082d6ac1da2b7b065ebf2ef3cd6403f5305ce3f26802203d98736ea97638895a898dfcc5ee0d0c55eb496b3964df0bb25d223688ea8b8701008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" }, + + { 2, + "3045022100ea6e4c9b8f56dd9cf5799492a201cdd65b8bc9bc089c3cff34107896ae313f90022034760f7760975cc68e8917a7f62894e25583da7be11af557c4fc402661d0cbf8", + "30440220717012f2f7ef6cac590aaf66c2109132c93ffba245959ac62d82e394ba80191302203f00fd9cb37c92c6b0ad4b33e62c3e55b04e5c2cfa0adcca5a9bc49774eeca8a", + "020000000001015b03043e20eb467029305a22af4c3b915e793743f192c5d225cf1d3c6e8c0301040000000001000000019b090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ea6e4c9b8f56dd9cf5799492a201cdd65b8bc9bc089c3cff34107896ae313f90022034760f7760975cc68e8917a7f62894e25583da7be11af557c4fc402661d0cbf8834730440220717012f2f7ef6cac590aaf66c2109132c93ffba245959ac62d82e394ba80191302203f00fd9cb37c92c6b0ad4b33e62c3e55b04e5c2cfa0adcca5a9bc49774eeca8a012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" } + } ); + // commitment tx with four outputs untrimmed (minimum feerate) chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 chan.feerate_per_kw = 2195; @@ -6274,6 +6747,25 @@ mod tests { "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" } } ); + // anchors: commitment tx with four outputs untrimmed (minimum feerate) + chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.feerate_per_kw = 2185; + + test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0", + "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { + + { 0, + "304502210094480e38afb41d10fae299224872f19c53abe23c7033a1c0642c48713e7863a10220726dd9456407682667dc4bd9c66975acb3744961770b5002f7eb9c0df9ef2f3e", + "304402203148dac61513dc0361738cba30cb341a1e580f8acd5ab0149bf65bd670688cf002207e5d9a0fcbbea2c263bc714fa9e9c44d7f582ea447f366119fc614a23de32f1f", + "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc0200000000010000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050048304502210094480e38afb41d10fae299224872f19c53abe23c7033a1c0642c48713e7863a10220726dd9456407682667dc4bd9c66975acb3744961770b5002f7eb9c0df9ef2f3e8347304402203148dac61513dc0361738cba30cb341a1e580f8acd5ab0149bf65bd670688cf002207e5d9a0fcbbea2c263bc714fa9e9c44d7f582ea447f366119fc614a23de32f1f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" }, + + { 1, + "304402200dbde868dbc20c6a2433fe8979ba5e3f966b1c2d1aeb615f1c42e9c938b3495402202eec5f663c8b601c2061c1453d35de22597c137d1907a2feaf714d551035cb6e", + "3045022100b896bded41d7feac7af25c19e35c53037c53b50e73cfd01eb4ba139c7fdf231602203a3be049d3d89396c4dc766d82ce31e237da8bc3a93e2c7d35992d1932d9cfeb", + "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc030000000001000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402200dbde868dbc20c6a2433fe8979ba5e3f966b1c2d1aeb615f1c42e9c938b3495402202eec5f663c8b601c2061c1453d35de22597c137d1907a2feaf714d551035cb6e83483045022100b896bded41d7feac7af25c19e35c53037c53b50e73cfd01eb4ba139c7fdf231602203a3be049d3d89396c4dc766d82ce31e237da8bc3a93e2c7d35992d1932d9cfeb012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" } + } ); + // commitment tx with four outputs untrimmed (maximum feerate) chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 chan.feerate_per_kw = 3702; @@ -6293,6 +6785,25 @@ mod tests { "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" } } ); + // anchors: commitment tx with four outputs untrimmed (maximum feerate) + chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.feerate_per_kw = 3686; + + test_commitment_with_anchors!("30440220784485cf7a0ad7979daf2c858ffdaf5298d0020cea7aea466843e7948223bd9902206031b81d25e02a178c64e62f843577fdcdfc7a1decbbfb54cd895de692df85ca", + "3045022100c268496aad5c3f97f25cf41c1ba5483a12982de29b222051b6de3daa2229413b02207f3c82d77a2c14f0096ed9bb4c34649483bb20fa71f819f71af44de6593e8bb2", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a29896a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c268496aad5c3f97f25cf41c1ba5483a12982de29b222051b6de3daa2229413b02207f3c82d77a2c14f0096ed9bb4c34649483bb20fa71f819f71af44de6593e8bb2014730440220784485cf7a0ad7979daf2c858ffdaf5298d0020cea7aea466843e7948223bd9902206031b81d25e02a178c64e62f843577fdcdfc7a1decbbfb54cd895de692df85ca01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { + + { 0, + "304402202cfe6618926ca9f1574f8c4659b425e9790b4677ba2248d77901290806130ffe02204ab37bb0287abcdb8b750b018d41a09effe37cb65ff801fa70d3f1a416599841", + "3044022030b318139715e3b34f19be852cc01c1c0e1599e8b926a73df2bfb70dd186ddee022062a2b7398aed9f563b4014da04a1a99debd0ff663ceece68a547df5982dc2d72", + "020000000001012c32e55722e4b96324d8e5b398d583a20780b25202816adc32dc3157dee731c90200000000010000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202cfe6618926ca9f1574f8c4659b425e9790b4677ba2248d77901290806130ffe02204ab37bb0287abcdb8b750b018d41a09effe37cb65ff801fa70d3f1a41659984183473044022030b318139715e3b34f19be852cc01c1c0e1599e8b926a73df2bfb70dd186ddee022062a2b7398aed9f563b4014da04a1a99debd0ff663ceece68a547df5982dc2d7201008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" }, + + { 1, + "30440220687af8544d335376620a6f4b5412bfd0da48de047c1785674f26e669d4a3ff82022058591c1e3a6c50017427d38a8f756eb685bdab88ec73838eed3530048861f9d5", + "30440220109f1a62b5a13d28d5b7634dd7693b1d5994eb404c4bb4a9a80aa540d3984d170220307251107ff8499a23e99abce7dda4f1c707c98abddb9405a83de0081cde8ace", + "020000000001012c32e55722e4b96324d8e5b398d583a20780b25202816adc32dc3157dee731c90300000000010000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220687af8544d335376620a6f4b5412bfd0da48de047c1785674f26e669d4a3ff82022058591c1e3a6c50017427d38a8f756eb685bdab88ec73838eed3530048861f9d5834730440220109f1a62b5a13d28d5b7634dd7693b1d5994eb404c4bb4a9a80aa540d3984d170220307251107ff8499a23e99abce7dda4f1c707c98abddb9405a83de0081cde8ace012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" } + } ); + // commitment tx with three outputs untrimmed (minimum feerate) chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 chan.feerate_per_kw = 3703; @@ -6307,6 +6818,20 @@ mod tests { "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" } } ); + // anchors: commitment tx with three outputs untrimmed (minimum feerate) + chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.feerate_per_kw = 3687; + + test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377", + "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { + + { 0, + "3045022100b287bb8e079a62dcb3aaa8b6c67c0f434a87ebf64ab0bcfb2fc14b55576b859f02206d37c2eb5fd04cfc9eb0534c76a28a98da251b84a931377cce307af39dfaed74", + "3045022100a497c64faea286ec4221f48628086dc6403fd7b60a23c4176e8ebbca15ae70dc0220754e20e968e96cf6421fd2a672c8c26d3bc6e19218cfc8fc2aa51fce026c14b1", + "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a600200000000010000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b287bb8e079a62dcb3aaa8b6c67c0f434a87ebf64ab0bcfb2fc14b55576b859f02206d37c2eb5fd04cfc9eb0534c76a28a98da251b84a931377cce307af39dfaed7483483045022100a497c64faea286ec4221f48628086dc6403fd7b60a23c4176e8ebbca15ae70dc0220754e20e968e96cf6421fd2a672c8c26d3bc6e19218cfc8fc2aa51fce026c14b1012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" } + } ); + // commitment tx with three outputs untrimmed (maximum feerate) chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 chan.feerate_per_kw = 4914; @@ -6321,6 +6846,20 @@ mod tests { "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" } } ); + // anchors: commitment tx with three outputs untrimmed (maximum feerate) + chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.feerate_per_kw = 4893; + + test_commitment_with_anchors!("3045022100a8771147109e4d3f44a5976c3c3de98732bbb77308d21444dbe0d76faf06480e02200b4e916e850c3d1f918de87bbbbb07843ffea1d4658dfe060b6f9ccd96d34be8", + "30440220086288faceab47461eb2d808e9e9b0cb3ffc24a03c2f18db7198247d38f10e58022031d1c2782a58c8c6ce187d0019eb47a83babdf3040e2caff299ab48f7e12b1fa", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a87856a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220086288faceab47461eb2d808e9e9b0cb3ffc24a03c2f18db7198247d38f10e58022031d1c2782a58c8c6ce187d0019eb47a83babdf3040e2caff299ab48f7e12b1fa01483045022100a8771147109e4d3f44a5976c3c3de98732bbb77308d21444dbe0d76faf06480e02200b4e916e850c3d1f918de87bbbbb07843ffea1d4658dfe060b6f9ccd96d34be801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { + + { 0, + "30450221008db80f8531104820b3e894492b4463f074f965b542e1b5c153ddfb108a5ea642022030b203d857a2b3581c2087a7bf17c95d04fadc1c6cdae88c620477f2dccb1ee4", + "3045022100e5fbae857c47dbfc050a05924bd449fc9804798bd6442002c578437dc34450810220296589bc387645512345299e307116aaac4ce9fc752abcd1936b802d03526312", + "02000000000101d515a15e9175fd315bb8d4e768f28684801a9e5a9acdfeba34f7b3b3b3a9ba1d0200000000010000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008db80f8531104820b3e894492b4463f074f965b542e1b5c153ddfb108a5ea642022030b203d857a2b3581c2087a7bf17c95d04fadc1c6cdae88c620477f2dccb1ee483483045022100e5fbae857c47dbfc050a05924bd449fc9804798bd6442002c578437dc34450810220296589bc387645512345299e307116aaac4ce9fc752abcd1936b802d03526312012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" } + } ); + // commitment tx with two outputs untrimmed (minimum feerate) chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 chan.feerate_per_kw = 4915; @@ -6329,6 +6868,14 @@ mod tests { "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5", "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); + // anchors: commitment tx with two outputs untrimmed (minimum feerate) + chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.feerate_per_kw = 4894; + + test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95", + "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); + // commitment tx with two outputs untrimmed (maximum feerate) chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 chan.feerate_per_kw = 9651180; @@ -6345,6 +6892,14 @@ mod tests { "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379", "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); + // anchors: commitment tx with one output untrimmed (minimum feerate) + chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.feerate_per_kw = 6216010; + + test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf", + "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); + // commitment tx with fee greater than funder amount chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 chan.feerate_per_kw = 9651936; @@ -6411,6 +6966,24 @@ mod tests { "30440220073de0067b88e425b3018b30366bfeda0ccb703118ccd3d02ead08c0f53511d002203fac50ac0e4cf8a3af0b4b1b12e801650591f748f8ddf1e089c160f10b69e511", "0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc3402000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220010bf035d5823596e50dce2076a4d9f942d8d28031c9c428b901a02b6b8140de02203250e8e4a08bc5b4ecdca4d0eedf98223e02e3ac1c0206b3a7ffdb374aa21e5f014730440220073de0067b88e425b3018b30366bfeda0ccb703118ccd3d02ead08c0f53511d002203fac50ac0e4cf8a3af0b4b1b12e801650591f748f8ddf1e089c160f10b69e51101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" } } ); + + test_commitment_with_anchors!("3045022100c592f6b80d35b4f5d1e3bc9788f51141a0065be6013bad53a1977f7c444651660220278ac06ead9016bfb8dc476f186eabace2b02793b2f308442f5b0d5f24a68948", + "3045022100c37ac4fc8538677631230c4b286f36b6f54c51fb4b34ef0bd0ba219ba47452630220278e09a745454ea380f3694392ed113762c68dd209b48360f547541088be9e45", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aae9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c37ac4fc8538677631230c4b286f36b6f54c51fb4b34ef0bd0ba219ba47452630220278e09a745454ea380f3694392ed113762c68dd209b48360f547541088be9e4501483045022100c592f6b80d35b4f5d1e3bc9788f51141a0065be6013bad53a1977f7c444651660220278ac06ead9016bfb8dc476f186eabace2b02793b2f308442f5b0d5f24a6894801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { + + { 0, + "304402202060a5acb12105e92f27d7b86e6caf1e003d9d82068338e5a8a9a0d14cba11260220030ca4dba8fad24a2e395906220c991eccd5369bc4b0f216d217b5f86d1fc61d", + "3044022044f5425fe630fa614f349f55642e4a0b76e2583054b21543821660d9e8f3735702207f70424835b541874ca8bf0443cca4028afa2f6c03a17b0688df85d5c44eeefc", + "02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe29020000000001000000011e070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202060a5acb12105e92f27d7b86e6caf1e003d9d82068338e5a8a9a0d14cba11260220030ca4dba8fad24a2e395906220c991eccd5369bc4b0f216d217b5f86d1fc61d83473044022044f5425fe630fa614f349f55642e4a0b76e2583054b21543821660d9e8f3735702207f70424835b541874ca8bf0443cca4028afa2f6c03a17b0688df85d5c44eeefc012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" }, + { 1, + "304402206fde7eb6d7a47fdc63705d3db2169054e229f10342dea66f150b163381f48a0802201be28509c2de9be4b7ab72c569c6fd51c0ce0904fea459142f31d442cd043eb8", + "3045022100ad0236a78dbd029d3a8f583f7f82ee62892273d45303d00ef5a03fecf8903a36022004b2db33f8ff2f4a08ca6127c9cbfd9144c691a2feb9287e36ae6bc7c83c5a5f", + "02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe2903000000000100000001e0120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fde7eb6d7a47fdc63705d3db2169054e229f10342dea66f150b163381f48a0802201be28509c2de9be4b7ab72c569c6fd51c0ce0904fea459142f31d442cd043eb883483045022100ad0236a78dbd029d3a8f583f7f82ee62892273d45303d00ef5a03fecf8903a36022004b2db33f8ff2f4a08ca6127c9cbfd9144c691a2feb9287e36ae6bc7c83c5a5f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" }, + { 2, + "304402205eebc78d8ae6a36c27ef80172359eb757fb18e99fa75b28c37ffe3444b967bc7022060a01c33398d4d8244c42c762fb699e9f61c1f034ff976df2c94350c5a6032a7", + "3045022100ad3fd523594e1b876316401774a30ee6c48bb7fa0efd768bf9a2d022201311ff02207bed627ed8e01041137f03dbaf03c836970be27a4d50f69d90cf1282ff2815e3", + "02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe2904000000000100000001e0120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205eebc78d8ae6a36c27ef80172359eb757fb18e99fa75b28c37ffe3444b967bc7022060a01c33398d4d8244c42c762fb699e9f61c1f034ff976df2c94350c5a6032a783483045022100ad3fd523594e1b876316401774a30ee6c48bb7fa0efd768bf9a2d022201311ff02207bed627ed8e01041137f03dbaf03c836970be27a4d50f69d90cf1282ff2815e301008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" } + } ); } #[test] diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c06fe0075ec..542b9a7169b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -36,27 +36,27 @@ use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1; use chain; -use chain::{Confirm, Watch, BestBlock}; +use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock}; use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; -use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; +use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; use chain::transaction::{OutPoint, TransactionData}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch}; use ln::features::{InitFeatures, NodeFeatures}; -use routing::router::{Route, RouteHop}; +use routing::router::{Payee, Route, RouteHop, RoutePath, RouteParameters}; use ln::msgs; use ln::msgs::NetAddress; use ln::onion_utils; -use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField}; +use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT, OptionalField}; use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner}; use util::config::UserConfig; -use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; +use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use util::{byte_utils, events}; use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer}; use util::chacha20::{ChaCha20, ChaChaReader}; -use util::logger::{Logger, Level}; +use util::logger::{Level, Logger}; use util::errors::APIError; use io; @@ -67,10 +67,299 @@ use io::{Cursor, Read}; use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; -#[cfg(any(test, feature = "allow_wallclock_use"))] -use std::time::Instant; use core::ops::Deref; +#[cfg(any(test, feature = "std"))] +use std::time::Instant; + +mod inbound_payment { + use alloc::string::ToString; + use bitcoin::hashes::{Hash, HashEngine}; + use bitcoin::hashes::cmp::fixed_time_eq; + use bitcoin::hashes::hmac::{Hmac, HmacEngine}; + use bitcoin::hashes::sha256::Hash as Sha256; + use chain::keysinterface::{KeyMaterial, KeysInterface, Sign}; + use ln::{PaymentHash, PaymentPreimage, PaymentSecret}; + use ln::channelmanager::APIError; + use ln::msgs; + use ln::msgs::MAX_VALUE_MSAT; + use util::chacha20::ChaCha20; + use util::logger::Logger; + + use core::convert::TryInto; + use core::ops::Deref; + + const IV_LEN: usize = 16; + const METADATA_LEN: usize = 16; + const METADATA_KEY_LEN: usize = 32; + const AMT_MSAT_LEN: usize = 8; + // Used to shift the payment type bits to take up the top 3 bits of the metadata bytes, or to + // retrieve said payment type bits. + const METHOD_TYPE_OFFSET: usize = 5; + + /// A set of keys that were HKDF-expanded from an initial call to + /// [`KeysInterface::get_inbound_payment_key_material`]. + /// + /// [`KeysInterface::get_inbound_payment_key_material`]: crate::chain::keysinterface::KeysInterface::get_inbound_payment_key_material + pub(super) struct ExpandedKey { + /// The key used to encrypt the bytes containing the payment metadata (i.e. the amount and + /// expiry, included for payment verification on decryption). + metadata_key: [u8; 32], + /// The key used to authenticate an LDK-provided payment hash and metadata as previously + /// registered with LDK. + ldk_pmt_hash_key: [u8; 32], + /// The key used to authenticate a user-provided payment hash and metadata as previously + /// registered with LDK. + user_pmt_hash_key: [u8; 32], + } + + impl ExpandedKey { + pub(super) fn new(key_material: &KeyMaterial) -> ExpandedKey { + hkdf_extract_expand(b"LDK Inbound Payment Key Expansion", &key_material) + } + } + + enum Method { + LdkPaymentHash = 0, + UserPaymentHash = 1, + } + + impl Method { + fn from_bits(bits: u8) -> Result { + match bits { + bits if bits == Method::LdkPaymentHash as u8 => Ok(Method::LdkPaymentHash), + bits if bits == Method::UserPaymentHash as u8 => Ok(Method::UserPaymentHash), + unknown => Err(unknown), + } + } + } + + pub(super) fn create(keys: &ExpandedKey, min_value_msat: Option, invoice_expiry_delta_secs: u32, keys_manager: &K, highest_seen_timestamp: u64) -> Result<(PaymentHash, PaymentSecret), ()> + where K::Target: KeysInterface + { + let metadata_bytes = construct_metadata_bytes(min_value_msat, Method::LdkPaymentHash, invoice_expiry_delta_secs, highest_seen_timestamp)?; + + let mut iv_bytes = [0 as u8; IV_LEN]; + let rand_bytes = keys_manager.get_secure_random_bytes(); + iv_bytes.copy_from_slice(&rand_bytes[..IV_LEN]); + + let mut hmac = HmacEngine::::new(&keys.ldk_pmt_hash_key); + hmac.input(&iv_bytes); + hmac.input(&metadata_bytes); + let payment_preimage_bytes = Hmac::from_engine(hmac).into_inner(); + + let ldk_pmt_hash = PaymentHash(Sha256::hash(&payment_preimage_bytes).into_inner()); + let payment_secret = construct_payment_secret(&iv_bytes, &metadata_bytes, &keys.metadata_key); + Ok((ldk_pmt_hash, payment_secret)) + } + + pub(super) fn create_from_hash(keys: &ExpandedKey, min_value_msat: Option, payment_hash: PaymentHash, invoice_expiry_delta_secs: u32, highest_seen_timestamp: u64) -> Result { + let metadata_bytes = construct_metadata_bytes(min_value_msat, Method::UserPaymentHash, invoice_expiry_delta_secs, highest_seen_timestamp)?; + + let mut hmac = HmacEngine::::new(&keys.user_pmt_hash_key); + hmac.input(&metadata_bytes); + hmac.input(&payment_hash.0); + let hmac_bytes = Hmac::from_engine(hmac).into_inner(); + + let mut iv_bytes = [0 as u8; IV_LEN]; + iv_bytes.copy_from_slice(&hmac_bytes[..IV_LEN]); + + Ok(construct_payment_secret(&iv_bytes, &metadata_bytes, &keys.metadata_key)) + } + + fn construct_metadata_bytes(min_value_msat: Option, payment_type: Method, invoice_expiry_delta_secs: u32, highest_seen_timestamp: u64) -> Result<[u8; METADATA_LEN], ()> { + if min_value_msat.is_some() && min_value_msat.unwrap() > MAX_VALUE_MSAT { + return Err(()); + } + + let mut min_amt_msat_bytes: [u8; AMT_MSAT_LEN] = match min_value_msat { + Some(amt) => amt.to_be_bytes(), + None => [0; AMT_MSAT_LEN], + }; + min_amt_msat_bytes[0] |= (payment_type as u8) << METHOD_TYPE_OFFSET; + + // We assume that highest_seen_timestamp is pretty close to the current time - it's updated when + // we receive a new block with the maximum time we've seen in a header. It should never be more + // than two hours in the future. Thus, we add two hours here as a buffer to ensure we + // absolutely never fail a payment too early. + // Note that we assume that received blocks have reasonably up-to-date timestamps. + let expiry_bytes = (highest_seen_timestamp + invoice_expiry_delta_secs as u64 + 7200).to_be_bytes(); + + let mut metadata_bytes: [u8; METADATA_LEN] = [0; METADATA_LEN]; + metadata_bytes[..AMT_MSAT_LEN].copy_from_slice(&min_amt_msat_bytes); + metadata_bytes[AMT_MSAT_LEN..].copy_from_slice(&expiry_bytes); + + Ok(metadata_bytes) + } + + fn construct_payment_secret(iv_bytes: &[u8; IV_LEN], metadata_bytes: &[u8; METADATA_LEN], metadata_key: &[u8; METADATA_KEY_LEN]) -> PaymentSecret { + let mut payment_secret_bytes: [u8; 32] = [0; 32]; + let (iv_slice, encrypted_metadata_slice) = payment_secret_bytes.split_at_mut(IV_LEN); + iv_slice.copy_from_slice(iv_bytes); + + let chacha_block = ChaCha20::get_single_block(metadata_key, iv_bytes); + for i in 0..METADATA_LEN { + encrypted_metadata_slice[i] = chacha_block[i] ^ metadata_bytes[i]; + } + PaymentSecret(payment_secret_bytes) + } + + /// Check that an inbound payment's `payment_data` field is sane. + /// + /// LDK does not store any data for pending inbound payments. Instead, we construct our payment + /// secret (and, if supplied by LDK, our payment preimage) to include encrypted metadata about the + /// payment. + /// + /// The metadata is constructed as: + /// payment method (3 bits) || payment amount (8 bytes - 3 bits) || expiry (8 bytes) + /// and encrypted using a key derived from [`KeysInterface::get_inbound_payment_key_material`]. + /// + /// Then on payment receipt, we verify in this method that the payment preimage and payment secret + /// match what was constructed. + /// + /// [`create_inbound_payment`] and [`create_inbound_payment_for_hash`] are called by the user to + /// construct the payment secret and/or payment hash that this method is verifying. If the former + /// method is called, then the payment method bits mentioned above are represented internally as + /// [`Method::LdkPaymentHash`]. If the latter, [`Method::UserPaymentHash`]. + /// + /// For the former method, the payment preimage is constructed as an HMAC of payment metadata and + /// random bytes. Because the payment secret is also encoded with these random bytes and metadata + /// (with the metadata encrypted with a block cipher), we're able to authenticate the preimage on + /// payment receipt. + /// + /// For the latter, the payment secret instead contains an HMAC of the user-provided payment hash + /// and payment metadata (encrypted with a block cipher), allowing us to authenticate the payment + /// hash and metadata on payment receipt. + /// + /// See [`ExpandedKey`] docs for more info on the individual keys used. + /// + /// [`KeysInterface::get_inbound_payment_key_material`]: crate::chain::keysinterface::KeysInterface::get_inbound_payment_key_material + /// [`create_inbound_payment`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment + /// [`create_inbound_payment_for_hash`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment_for_hash + pub(super) fn verify(payment_hash: PaymentHash, payment_data: msgs::FinalOnionHopData, highest_seen_timestamp: u64, keys: &ExpandedKey, logger: &L) -> Result, ()> + where L::Target: Logger + { + let (iv_bytes, metadata_bytes) = decrypt_metadata(payment_data.payment_secret, keys); + + let payment_type_res = Method::from_bits((metadata_bytes[0] & 0b1110_0000) >> METHOD_TYPE_OFFSET); + let mut amt_msat_bytes = [0; AMT_MSAT_LEN]; + amt_msat_bytes.copy_from_slice(&metadata_bytes[..AMT_MSAT_LEN]); + // Zero out the bits reserved to indicate the payment type. + amt_msat_bytes[0] &= 0b00011111; + let min_amt_msat: u64 = u64::from_be_bytes(amt_msat_bytes.into()); + let expiry = u64::from_be_bytes(metadata_bytes[AMT_MSAT_LEN..].try_into().unwrap()); + + // Make sure to check to check the HMAC before doing the other checks below, to mitigate timing + // attacks. + let mut payment_preimage = None; + match payment_type_res { + Ok(Method::UserPaymentHash) => { + let mut hmac = HmacEngine::::new(&keys.user_pmt_hash_key); + hmac.input(&metadata_bytes[..]); + hmac.input(&payment_hash.0); + if !fixed_time_eq(&iv_bytes, &Hmac::from_engine(hmac).into_inner().split_at_mut(IV_LEN).0) { + log_trace!(logger, "Failing HTLC with user-generated payment_hash {}: unexpected payment_secret", log_bytes!(payment_hash.0)); + return Err(()) + } + }, + Ok(Method::LdkPaymentHash) => { + match derive_ldk_payment_preimage(payment_hash, &iv_bytes, &metadata_bytes, keys) { + Ok(preimage) => payment_preimage = Some(preimage), + Err(bad_preimage_bytes) => { + log_trace!(logger, "Failing HTLC with payment_hash {} due to mismatching preimage {}", log_bytes!(payment_hash.0), log_bytes!(bad_preimage_bytes)); + return Err(()) + } + } + }, + Err(unknown_bits) => { + log_trace!(logger, "Failing HTLC with payment hash {} due to unknown payment type {}", log_bytes!(payment_hash.0), unknown_bits); + return Err(()); + } + } + + if payment_data.total_msat < min_amt_msat { + log_trace!(logger, "Failing HTLC with payment_hash {} due to total_msat {} being less than the minimum amount of {} msat", log_bytes!(payment_hash.0), payment_data.total_msat, min_amt_msat); + return Err(()) + } + + if expiry < highest_seen_timestamp { + log_trace!(logger, "Failing HTLC with payment_hash {}: expired payment", log_bytes!(payment_hash.0)); + return Err(()) + } + + Ok(payment_preimage) + } + + pub(super) fn get_payment_preimage(payment_hash: PaymentHash, payment_secret: PaymentSecret, keys: &ExpandedKey) -> Result { + let (iv_bytes, metadata_bytes) = decrypt_metadata(payment_secret, keys); + + match Method::from_bits((metadata_bytes[0] & 0b1110_0000) >> METHOD_TYPE_OFFSET) { + Ok(Method::LdkPaymentHash) => { + derive_ldk_payment_preimage(payment_hash, &iv_bytes, &metadata_bytes, keys) + .map_err(|bad_preimage_bytes| APIError::APIMisuseError { + err: format!("Payment hash {} did not match decoded preimage {}", log_bytes!(payment_hash.0), log_bytes!(bad_preimage_bytes)) + }) + }, + Ok(Method::UserPaymentHash) => Err(APIError::APIMisuseError { + err: "Expected payment type to be LdkPaymentHash, instead got UserPaymentHash".to_string() + }), + Err(other) => Err(APIError::APIMisuseError { err: format!("Unknown payment type: {}", other) }), + } + } + + fn decrypt_metadata(payment_secret: PaymentSecret, keys: &ExpandedKey) -> ([u8; IV_LEN], [u8; METADATA_LEN]) { + let mut iv_bytes = [0; IV_LEN]; + let (iv_slice, encrypted_metadata_bytes) = payment_secret.0.split_at(IV_LEN); + iv_bytes.copy_from_slice(iv_slice); + + let chacha_block = ChaCha20::get_single_block(&keys.metadata_key, &iv_bytes); + let mut metadata_bytes: [u8; METADATA_LEN] = [0; METADATA_LEN]; + for i in 0..METADATA_LEN { + metadata_bytes[i] = chacha_block[i] ^ encrypted_metadata_bytes[i]; + } + + (iv_bytes, metadata_bytes) + } + + // Errors if the payment preimage doesn't match `payment_hash`. Returns the bad preimage bytes in + // this case. + fn derive_ldk_payment_preimage(payment_hash: PaymentHash, iv_bytes: &[u8; IV_LEN], metadata_bytes: &[u8; METADATA_LEN], keys: &ExpandedKey) -> Result { + let mut hmac = HmacEngine::::new(&keys.ldk_pmt_hash_key); + hmac.input(iv_bytes); + hmac.input(metadata_bytes); + let decoded_payment_preimage = Hmac::from_engine(hmac).into_inner(); + if !fixed_time_eq(&payment_hash.0, &Sha256::hash(&decoded_payment_preimage).into_inner()) { + return Err(decoded_payment_preimage); + } + return Ok(PaymentPreimage(decoded_payment_preimage)) + } + + fn hkdf_extract_expand(salt: &[u8], ikm: &KeyMaterial) -> ExpandedKey { + let mut hmac = HmacEngine::::new(salt); + hmac.input(&ikm.0); + let prk = Hmac::from_engine(hmac).into_inner(); + let mut hmac = HmacEngine::::new(&prk[..]); + hmac.input(&[1; 1]); + let metadata_key = Hmac::from_engine(hmac).into_inner(); + + let mut hmac = HmacEngine::::new(&prk[..]); + hmac.input(&metadata_key); + hmac.input(&[2; 1]); + let ldk_pmt_hash_key = Hmac::from_engine(hmac).into_inner(); + + let mut hmac = HmacEngine::::new(&prk[..]); + hmac.input(&ldk_pmt_hash_key); + hmac.input(&[3; 1]); + let user_pmt_hash_key = Hmac::from_engine(hmac).into_inner(); + + ExpandedKey { + metadata_key, + ldk_pmt_hash_key, + user_pmt_hash_key, + } + } +} + // We hold various information about HTLC relay in the HTLC objects in Channel itself: // // Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should @@ -145,7 +434,7 @@ pub(super) enum HTLCForwardInfo { } /// Tracks the inbound corresponding to an outbound HTLC -#[derive(Clone, PartialEq)] +#[derive(Clone, Hash, PartialEq, Eq)] pub(crate) struct HTLCPreviousHopData { short_channel_id: u64, htlc_id: u64, @@ -172,24 +461,26 @@ struct ClaimableHTLC { onion_payload: OnionPayload, } -/// A payment identifier used to correlate an MPP payment's per-path HTLC sources internally. +/// A payment identifier used to uniquely identify a payment to LDK. +/// (C-not exported) as we just use [u8; 32] directly #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] -pub(crate) struct MppId(pub [u8; 32]); +pub struct PaymentId(pub [u8; 32]); -impl Writeable for MppId { +impl Writeable for PaymentId { fn write(&self, w: &mut W) -> Result<(), io::Error> { self.0.write(w) } } -impl Readable for MppId { +impl Readable for PaymentId { fn read(r: &mut R) -> Result { let buf: [u8; 32] = Readable::read(r)?; - Ok(MppId(buf)) + Ok(PaymentId(buf)) } } /// Tracks the inbound corresponding to an outbound HTLC -#[derive(Clone, PartialEq)] +#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash +#[derive(Clone, PartialEq, Eq)] pub(crate) enum HTLCSource { PreviousHopData(HTLCPreviousHopData), OutboundRoute { @@ -198,9 +489,31 @@ pub(crate) enum HTLCSource { /// Technically we can recalculate this from the route, but we cache it here to avoid /// doing a double-pass on route when we get a failure back first_hop_htlc_msat: u64, - mpp_id: MppId, + payment_id: PaymentId, + payment_secret: Option, + payee: Option, }, } +#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash +impl core::hash::Hash for HTLCSource { + fn hash(&self, hasher: &mut H) { + match self { + HTLCSource::PreviousHopData(prev_hop_data) => { + 0u8.hash(hasher); + prev_hop_data.hash(hasher); + }, + HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat, payee } => { + 1u8.hash(hasher); + path.hash(hasher); + session_priv[..].hash(hasher); + payment_id.hash(hasher); + payment_secret.hash(hasher); + first_hop_htlc_msat.hash(hasher); + payee.hash(hasher); + }, + } + } +} #[cfg(test)] impl HTLCSource { pub fn dummy() -> Self { @@ -208,7 +521,9 @@ impl HTLCSource { path: Vec::new(), session_priv: SecretKey::from_slice(&[1; 32]).unwrap(), first_hop_htlc_msat: 0, - mpp_id: MppId([2; 32]), + payment_id: PaymentId([2; 32]), + payment_secret: None, + payee: None, } } } @@ -242,6 +557,7 @@ type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource struct MsgHandleErrInternal { err: msgs::LightningError, + chan_id: Option<([u8; 32], u64)>, // If Some a channel of ours has been closed shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { @@ -257,6 +573,7 @@ impl MsgHandleErrInternal { }, }, }, + chan_id: None, shutdown_finish: None, } } @@ -267,15 +584,16 @@ impl MsgHandleErrInternal { err, action: msgs::ErrorAction::IgnoreError, }, + chan_id: None, shutdown_finish: None, } } #[inline] fn from_no_close(err: msgs::LightningError) -> Self { - Self { err, shutdown_finish: None } + Self { err, chan_id: None, shutdown_finish: None } } #[inline] - fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option) -> Self { + fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u64, shutdown_res: ShutdownResult, channel_update: Option) -> Self { Self { err: LightningError { err: err.clone(), @@ -286,6 +604,7 @@ impl MsgHandleErrInternal { }, }, }, + chan_id: Some((channel_id, user_channel_id)), shutdown_finish: Some((shutdown_res, channel_update)), } } @@ -294,8 +613,14 @@ impl MsgHandleErrInternal { Self { err: match err { ChannelError::Warn(msg) => LightningError { - err: msg, - action: msgs::ErrorAction::IgnoreError, + err: msg.clone(), + action: msgs::ErrorAction::SendWarningMessage { + msg: msgs::WarningMessage { + channel_id, + data: msg + }, + log_level: Level::Warn, + }, }, ChannelError::Ignore(msg) => LightningError { err: msg, @@ -320,6 +645,7 @@ impl MsgHandleErrInternal { }, }, }, + chan_id: None, shutdown_finish: None, } } @@ -382,6 +708,9 @@ struct PeerState { /// /// For users who don't want to bother doing their own payment preimage storage, we also store that /// here. +/// +/// Note that this struct will be removed entirely soon, in favor of storing no inbound payment data +/// and instead encoding it in the payment secret. struct PendingInboundPayment { /// The payment secret that the sender must use for us to accept this payment payment_secret: PaymentSecret, @@ -395,6 +724,165 @@ struct PendingInboundPayment { min_value_msat: Option, } +/// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102 +/// and later, also stores information for retrying the payment. +pub(crate) enum PendingOutboundPayment { + Legacy { + session_privs: HashSet<[u8; 32]>, + }, + Retryable { + session_privs: HashSet<[u8; 32]>, + payment_hash: PaymentHash, + payment_secret: Option, + pending_amt_msat: u64, + /// Used to track the fee paid. Only present if the payment was serialized on 0.0.103+. + pending_fee_msat: Option, + /// The total payment amount across all paths, used to verify that a retry is not overpaying. + total_msat: u64, + /// Our best known block height at the time this payment was initiated. + starting_block_height: u32, + }, + /// When a pending payment is fulfilled, we continue tracking it until all pending HTLCs have + /// been resolved. This ensures we don't look up pending payments in ChannelMonitors on restart + /// and add a pending payment that was already fulfilled. + Fulfilled { + session_privs: HashSet<[u8; 32]>, + payment_hash: Option, + }, + /// When a payer gives up trying to retry a payment, they inform us, letting us generate a + /// `PaymentFailed` event when all HTLCs have irrevocably failed. This avoids a number of race + /// conditions in MPP-aware payment retriers (1), where the possibility of multiple + /// `PaymentPathFailed` events with `all_paths_failed` can be pending at once, confusing a + /// downstream event handler as to when a payment has actually failed. + /// + /// (1) https://github.com/lightningdevkit/rust-lightning/issues/1164 + Abandoned { + session_privs: HashSet<[u8; 32]>, + payment_hash: PaymentHash, + }, +} + +impl PendingOutboundPayment { + fn is_retryable(&self) -> bool { + match self { + PendingOutboundPayment::Retryable { .. } => true, + _ => false, + } + } + fn is_fulfilled(&self) -> bool { + match self { + PendingOutboundPayment::Fulfilled { .. } => true, + _ => false, + } + } + fn abandoned(&self) -> bool { + match self { + PendingOutboundPayment::Abandoned { .. } => true, + _ => false, + } + } + fn get_pending_fee_msat(&self) -> Option { + match self { + PendingOutboundPayment::Retryable { pending_fee_msat, .. } => pending_fee_msat.clone(), + _ => None, + } + } + + fn payment_hash(&self) -> Option { + match self { + PendingOutboundPayment::Legacy { .. } => None, + PendingOutboundPayment::Retryable { payment_hash, .. } => Some(*payment_hash), + PendingOutboundPayment::Fulfilled { payment_hash, .. } => *payment_hash, + PendingOutboundPayment::Abandoned { payment_hash, .. } => Some(*payment_hash), + } + } + + fn mark_fulfilled(&mut self) { + let mut session_privs = HashSet::new(); + core::mem::swap(&mut session_privs, match self { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } | + PendingOutboundPayment::Fulfilled { session_privs, .. } | + PendingOutboundPayment::Abandoned { session_privs, .. } + => session_privs, + }); + let payment_hash = self.payment_hash(); + *self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash }; + } + + fn mark_abandoned(&mut self) -> Result<(), ()> { + let mut session_privs = HashSet::new(); + let our_payment_hash; + core::mem::swap(&mut session_privs, match self { + PendingOutboundPayment::Legacy { .. } | + PendingOutboundPayment::Fulfilled { .. } => + return Err(()), + PendingOutboundPayment::Retryable { session_privs, payment_hash, .. } | + PendingOutboundPayment::Abandoned { session_privs, payment_hash, .. } => { + our_payment_hash = *payment_hash; + session_privs + }, + }); + *self = PendingOutboundPayment::Abandoned { session_privs, payment_hash: our_payment_hash }; + Ok(()) + } + + /// panics if path is None and !self.is_fulfilled + fn remove(&mut self, session_priv: &[u8; 32], path: Option<&Vec>) -> bool { + let remove_res = match self { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } | + PendingOutboundPayment::Fulfilled { session_privs, .. } | + PendingOutboundPayment::Abandoned { session_privs, .. } => { + session_privs.remove(session_priv) + } + }; + if remove_res { + if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self { + let path = path.expect("Fulfilling a payment should always come with a path"); + let path_last_hop = path.last().expect("Outbound payments must have had a valid path"); + *pending_amt_msat -= path_last_hop.fee_msat; + if let Some(fee_msat) = pending_fee_msat.as_mut() { + *fee_msat -= path.get_path_fees(); + } + } + } + remove_res + } + + fn insert(&mut self, session_priv: [u8; 32], path: &Vec) -> bool { + let insert_res = match self { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } => { + session_privs.insert(session_priv) + } + PendingOutboundPayment::Fulfilled { .. } => false, + PendingOutboundPayment::Abandoned { .. } => false, + }; + if insert_res { + if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self { + let path_last_hop = path.last().expect("Outbound payments must have had a valid path"); + *pending_amt_msat += path_last_hop.fee_msat; + if let Some(fee_msat) = pending_fee_msat.as_mut() { + *fee_msat += path.get_path_fees(); + } + } + } + insert_res + } + + fn remaining_parts(&self) -> usize { + match self { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } | + PendingOutboundPayment::Fulfilled { session_privs, .. } | + PendingOutboundPayment::Abandoned { session_privs, .. } => { + session_privs.len() + } + } + } +} + /// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g. /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static /// lifetimes). Other times you can afford a reference, which is more efficient, in which case @@ -481,24 +969,25 @@ pub struct ChannelManager>, - /// The session_priv bytes of outbound payments which are pending resolution. + /// The session_priv bytes and retry metadata of outbound payments which are pending resolution. /// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors /// (if the channel has been force-closed), however we track them here to prevent duplicative - /// PaymentSent/PaymentFailed events. Specifically, in the case of a duplicative + /// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative /// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice. /// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s) /// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents /// after reloading from disk while replaying blocks against ChannelMonitors. /// - /// Each payment has each of its MPP part's session_priv bytes in the HashSet of the map (even - /// payments over a single path). + /// See `PendingOutboundPayment` documentation for more info. /// /// Locked *after* channel_state. - pending_outbound_payments: Mutex>>, + pending_outbound_payments: Mutex>, our_network_key: SecretKey, our_network_pubkey: PublicKey, + inbound_payment_key: inbound_payment::ExpandedKey, + /// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this /// value increases strictly since we don't assume access to a time source. last_node_announcement_serial: AtomicUsize, @@ -646,6 +1135,10 @@ const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRA #[allow(dead_code)] const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; +/// The number of blocks before we consider an outbound payment for expiry if it doesn't have any +/// pending HTLCs in flight. +pub(crate) const PAYMENT_EXPIRY_BLOCKS: u32 = 3; + /// Information needed for constructing an invoice route hint for this channel. #[derive(Clone, Debug, PartialEq)] pub struct CounterpartyForwardingInfo { @@ -713,19 +1206,32 @@ pub struct ChannelDetails { /// /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat pub unspendable_punishment_reserve: Option, - /// The user_id passed in to create_channel, or 0 if the channel was inbound. - pub user_id: u64, + /// The `user_channel_id` passed in to create_channel, or 0 if the channel was inbound. + pub user_channel_id: u64, + /// Our total balance. This is the amount we would get if we close the channel. + /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this + /// amount is not likely to be recoverable on close. + /// + /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose + /// balance is not available for inclusion in new outbound HTLCs). This further does not include + /// any pending outgoing HTLCs which are awaiting some other resolution to be sent. + /// This does not consider any on-chain fees. + /// + /// See also [`ChannelDetails::outbound_capacity_msat`] + pub balance_msat: u64, /// The available outbound capacity for sending HTLCs to the remote peer. This does not include - /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not + /// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not /// available for inclusion in new outbound HTLCs). This further does not include any pending /// outgoing HTLCs which are awaiting some other resolution to be sent. /// + /// See also [`ChannelDetails::balance_msat`] + /// /// This value is not exact. Due to various in-flight changes, feerate changes, and our /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we /// should be able to spend nearly this amount. pub outbound_capacity_msat: u64, /// The available inbound capacity for the remote peer to send HTLCs to us. This does not - /// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not + /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not /// available for inclusion in new inbound HTLCs). /// Note that there are some corner cases not fully handled here, so the actual available /// inbound capacity may be slightly higher than this. @@ -806,19 +1312,29 @@ pub enum PaymentSendFailure { /// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the /// case of Ok(())) or will send once channel_monitor_updated is called on the next-hop channel /// with the latest update_id. - PartialFailure(Vec>), + PartialFailure { + /// The errors themselves, in the same order as the route hops. + results: Vec>, + /// If some paths failed without irrevocably committing to the new HTLC(s), this will + /// contain a [`RouteParameters`] object which can be used to calculate a new route that + /// will pay all remaining unpaid balance. + failed_paths_retry: Option, + /// The payment id for the payment, which is now at least partially pending. + payment_id: PaymentId, + }, } macro_rules! handle_error { ($self: ident, $internal: expr, $counterparty_node_id: expr) => { match $internal { Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, shutdown_finish }) => { + Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => { #[cfg(debug_assertions)] { // In testing, ensure there are no deadlocks where the lock is already held upon // entering the macro. assert!($self.channel_state.try_lock().is_ok()); + assert!($self.pending_events.try_lock().is_ok()); } let mut msg_events = Vec::with_capacity(2); @@ -830,6 +1346,12 @@ macro_rules! handle_error { msg: update }); } + if let Some((channel_id, user_channel_id)) = chan_id { + $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { + channel_id, user_channel_id, + reason: ClosureReason::ProcessingError { err: err.err.clone() } + }); + } } log_error!($self.logger, "{}", err.err); @@ -857,9 +1379,7 @@ macro_rules! convert_chan_err { ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => { match $err { ChannelError::Warn(msg) => { - //TODO: Once warning messages are merged, we should send a `warning` message to our - //peer here. - (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone())) }, ChannelError::Ignore(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) @@ -870,7 +1390,8 @@ macro_rules! convert_chan_err { $short_to_id.remove(&short_id); } let shutdown_res = $channel.force_shutdown(true); - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), + shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) }, ChannelError::CloseDelayBroadcast(msg) => { log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg); @@ -878,7 +1399,8 @@ macro_rules! convert_chan_err { $short_to_id.remove(&short_id); } let shutdown_res = $channel.force_shutdown(false); - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), + shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) } } } @@ -930,7 +1452,7 @@ macro_rules! handle_monitor_err { ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) }; - ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $chan_id: expr) => { + ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..])); @@ -946,12 +1468,12 @@ macro_rules! handle_monitor_err { // splitting hairs we'd prefer to claim payments that were to us, but we haven't // given up the preimage yet, so might as well just wait until the payment is // retried, avoiding the on-chain fees. - let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, + let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(), $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() )); (res, true) }, ChannelMonitorUpdateErr::TemporaryFailure => { - log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails", + log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations", log_bytes!($chan_id[..]), if $resend_commitment && $resend_raa { match $action_type { @@ -962,25 +1484,29 @@ macro_rules! handle_monitor_err { else if $resend_raa { "RAA" } else { "nothing" }, (&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(), - (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len()); + (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len(), + (&$failed_finalized_fulfills as &Vec).len()); if !$resend_commitment { debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa); } if !$resend_raa { debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); } - $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails); + $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills); (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false) }, } }; - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { { - let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $entry.key()); + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { { + let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key()); if drop { $entry.remove_entry(); } res } }; + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { + handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, Vec::new()) + } } macro_rules! return_monitor_err { @@ -1153,7 +1679,8 @@ impl ChannelMana pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, params: ChainParameters) -> Self { let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes()); - + let inbound_pmt_key_material = keys_manager.get_inbound_payment_key_material(); + let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); ChannelManager { default_configuration: config.clone(), genesis_hash: genesis_block(params.network).header.block_hash(), @@ -1177,6 +1704,8 @@ impl ChannelMana our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()), secp_ctx, + inbound_payment_key: expanded_inbound_key, + last_node_announcement_serial: AtomicUsize::new(0), highest_seen_timestamp: AtomicUsize::new(0), @@ -1200,22 +1729,31 @@ impl ChannelMana /// Creates a new outbound channel to the given remote node and with the given value. /// - /// user_id will be provided back as user_channel_id in FundingGenerationReady events to allow - /// tracking of which events correspond with which create_channel call. Note that the - /// user_channel_id defaults to 0 for inbound channels, so you may wish to avoid using 0 for - /// user_id here. user_id has no meaning inside of LDK, it is simply copied to events and - /// otherwise ignored. + /// `user_channel_id` will be provided back as in + /// [`Event::FundingGenerationReady::user_channel_id`] to allow tracking of which events + /// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to 0 + /// for inbound channels, so you may wish to avoid using 0 for `user_channel_id` here. + /// `user_channel_id` has no meaning inside of LDK, it is simply copied to events and otherwise + /// ignored. /// - /// If successful, will generate a SendOpenChannel message event, so you should probably poll - /// PeerManager::process_events afterwards. - /// - /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is - /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000. + /// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is + /// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`. /// /// Note that we do not check if you are currently connected to the given peer. If no /// connection is available, the outbound `open_channel` message may fail to send, resulting in - /// the channel eventually being silently forgotten. - pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, override_config: Option) -> Result<(), APIError> { + /// the channel eventually being silently forgotten (dropped on reload). + /// + /// Returns the new Channel's temporary `channel_id`. This ID will appear as + /// [`Event::FundingGenerationReady::temporary_channel_id`] and in + /// [`ChannelDetails::channel_id`] until after + /// [`ChannelManager::funding_transaction_generated`] is called, swapping the Channel's ID for + /// one derived from the funding transaction's TXID. If the counterparty rejects the channel + /// immediately, this temporary ID will appear in [`Event::ChannelClosed::channel_id`]. + /// + /// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id + /// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id + /// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id + pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u64, override_config: Option) -> Result<[u8; 32], APIError> { if channel_value_satoshis < 1000 { return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); } @@ -1227,7 +1765,8 @@ impl ChannelMana let peer_state = peer_state.lock().unwrap(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_id, config)? + Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, + channel_value_satoshis, push_msat, user_channel_id, config, self.best_block.read().unwrap().height())? }, None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }), } @@ -1238,8 +1777,9 @@ impl ChannelMana // We want to make sure the lock is actually acquired by PersistenceNotifierGuard. debug_assert!(&self.total_consistency_lock.try_write().is_err()); + let temporary_channel_id = channel.channel_id(); let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.entry(channel.channel_id()) { + match channel_state.by_id.entry(temporary_channel_id) { hash_map::Entry::Occupied(_) => { if cfg!(feature = "fuzztarget") { return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() }); @@ -1253,7 +1793,7 @@ impl ChannelMana node_id: their_network_key, msg: res, }); - Ok(()) + Ok(temporary_channel_id) } fn list_channels_with_filter)) -> bool>(&self, f: Fn) -> Vec { @@ -1263,6 +1803,7 @@ impl ChannelMana res.reserve(channel_state.by_id.len()); for (channel_id, channel) in channel_state.by_id.iter().filter(f) { let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat(); + let balance_msat = channel.get_balance_msat(); let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = channel.get_holder_counterparty_selected_channel_reserve_satoshis(); res.push(ChannelDetails { @@ -1277,9 +1818,10 @@ impl ChannelMana short_channel_id: channel.get_short_channel_id(), channel_value_satoshis: channel.get_value_satoshis(), unspendable_punishment_reserve: to_self_reserve_satoshis, + balance_msat, inbound_capacity_msat, outbound_capacity_msat, - user_id: channel.get_user_id(), + user_channel_id: channel.get_user_id(), confirmations_required: channel.minimum_depth(), force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), is_outbound: channel.is_outbound(), @@ -1317,6 +1859,22 @@ impl ChannelMana self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) } + /// Helper function that issues the channel close events + fn issue_channel_close_events(&self, channel: &Channel, closure_reason: ClosureReason) { + let mut pending_events_lock = self.pending_events.lock().unwrap(); + match channel.unbroadcasted_funding() { + Some(transaction) => { + pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction }) + }, + None => {}, + } + pending_events_lock.push(events::Event::ChannelClosed { + channel_id: channel.channel_id(), + user_channel_id: channel.get_user_id(), + reason: closure_reason + }); + } + fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); @@ -1343,7 +1901,7 @@ impl ChannelMana if let Some(monitor_update) = monitor_update { if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key()); + handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), Vec::new(), chan_entry.key()); if is_permanent { remove_channel!(channel_state, chan_entry); break result; @@ -1363,6 +1921,7 @@ impl ChannelMana msg: channel_update }); } + self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); } break Ok(()); }, @@ -1438,7 +1997,9 @@ impl ChannelMana } } - fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result { + /// `peer_node_id` should be set when we receive a message from a peer, but not set when the + /// user closes, which will be re-exposed as the `ChannelClosed` reason. + fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result { let mut chan = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -1451,6 +2012,13 @@ impl ChannelMana if let Some(short_id) = chan.get().get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } + if peer_node_id.is_some() { + if let Some(peer_msg) = peer_msg { + self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); + } + } else { + self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); + } chan.remove_entry().1 } else { return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); @@ -1472,7 +2040,7 @@ impl ChannelMana /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager. pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - match self.force_close_channel_with_peer(channel_id, None) { + match self.force_close_channel_with_peer(channel_id, None, None) { Ok(counterparty_node_id) => { self.channel_state.lock().unwrap().pending_msg_events.push( events::MessageSendEvent::HandleError { @@ -1761,17 +2329,24 @@ impl ChannelMana break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } let cur_height = self.best_block.read().unwrap().height() + 1; - // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty - // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational) + // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, + // but we want to be robust wrt to counterparty packet sanitization (see + // HTLC_FAIL_BACK_BUFFER rationale). if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far break Some(("CLTV expiry is too far in the future", 21, None)); } - // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS. - // But, to be safe against policy reception, we use a longer delay. - if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 { + // If the HTLC expires ~now, don't bother trying to forward it to our + // counterparty. They should fail it anyway, but we don't want to bother with + // the round-trips or risk them deciding they definitely want the HTLC and + // force-closing to ensure they get it if we're offline. + // We previously had a much more aggressive check here which tried to ensure + // our counterparty receives an HTLC which has *our* risk threshold met on it, + // but there is no need to do that, and since we're a bit conservative with our + // risk threshold it just results in failing to forward payments. + if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 { break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } @@ -1853,7 +2428,7 @@ impl ChannelMana } // Only public for testing, this should otherwise never be called direcly - pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, mpp_id: MppId, keysend_preimage: &Option) -> Result<(), APIError> { + pub(crate) fn send_payment_along_path(&self, path: &Vec, payee: &Option, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option) -> Result<(), APIError> { log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); let prng_seed = self.keys_manager.get_secure_random_bytes(); let session_priv_bytes = self.keys_manager.get_secure_random_bytes(); @@ -1868,17 +2443,40 @@ impl ChannelMana let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); - let sessions = pending_outbounds.entry(mpp_id).or_insert(HashSet::new()); - assert!(sessions.insert(session_priv_bytes)); let err: Result<(), _> = loop { let mut channel_lock = self.channel_state.lock().unwrap(); + + let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); + let payment_entry = pending_outbounds.entry(payment_id); + if let hash_map::Entry::Occupied(payment) = &payment_entry { + if !payment.get().is_retryable() { + return Err(APIError::RouteError { + err: "Payment already completed" + }); + } + } + let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}), Some(id) => id.clone(), }; + macro_rules! insert_outbound_payment { + () => { + let payment = payment_entry.or_insert_with(|| PendingOutboundPayment::Retryable { + session_privs: HashSet::new(), + pending_amt_msat: 0, + pending_fee_msat: Some(0), + payment_hash: *payment_hash, + payment_secret: *payment_secret, + starting_block_height: self.best_block.read().unwrap().height(), + total_msat: total_value, + }); + assert!(payment.insert(session_priv_bytes, path)); + } + } + let channel_state = &mut *channel_lock; if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { match { @@ -1888,12 +2486,16 @@ impl ChannelMana if !chan.get().is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()}); } - break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - path: path.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - mpp_id, - }, onion_packet, &self.logger), channel_state, chan) + break_chan_entry!(self, chan.get_mut().send_htlc_and_commit( + htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + path: path.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + payment_id, + payment_secret: payment_secret.clone(), + payee: payee.clone(), + }, onion_packet, &self.logger), + channel_state, chan) } { Some((update_add, commitment_signed, monitor_update)) => { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { @@ -1903,8 +2505,10 @@ impl ChannelMana // is restored. Therefore, we must return an error indicating that // it is unsafe to retry the payment wholesale, which we do in the // send_payment check for MonitorUpdateFailed, below. + insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above. return Err(APIError::MonitorUpdateFailed); } + insert_outbound_payment!(); log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { @@ -1919,7 +2523,7 @@ impl ChannelMana }, }); }, - None => {}, + None => { insert_outbound_payment!(); }, } } else { unreachable!(); } return Ok(()); @@ -1972,11 +2576,11 @@ impl ChannelMana /// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature /// bit set (either as required or as available). If multiple paths are present in the Route, /// we assume the invoice had the basic_mpp feature set. - pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option) -> Result<(), PaymentSendFailure> { - self.send_payment_internal(route, payment_hash, payment_secret, None) + pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option) -> Result { + self.send_payment_internal(route, payment_hash, payment_secret, None, None, None) } - fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option) -> Result<(), PaymentSendFailure> { + fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option, payment_id: Option, recv_value_msat: Option) -> Result { if route.paths.len() < 1 { return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"})); } @@ -1992,7 +2596,7 @@ impl ChannelMana let mut total_value = 0; let our_node_id = self.get_our_node_id(); let mut path_errs = Vec::with_capacity(route.paths.len()); - let mpp_id = MppId(self.keys_manager.get_secure_random_bytes()); + let payment_id = if let Some(id) = payment_id { id } else { PaymentId(self.keys_manager.get_secure_random_bytes()) }; 'path_check: for path in route.paths.iter() { if path.len() < 1 || path.len() > 20 { path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"})); @@ -2010,15 +2614,21 @@ impl ChannelMana if path_errs.iter().any(|e| e.is_err()) { return Err(PaymentSendFailure::PathParameterError(path_errs)); } + if let Some(amt_msat) = recv_value_msat { + debug_assert!(amt_msat >= total_value); + total_value = amt_msat; + } let cur_height = self.best_block.read().unwrap().height() + 1; let mut results = Vec::new(); for path in route.paths.iter() { - results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, mpp_id, &keysend_preimage)); + results.push(self.send_payment_along_path(&path, &route.payee, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage)); } let mut has_ok = false; let mut has_err = false; - for res in results.iter() { + let mut pending_amt_unsent = 0; + let mut max_unsent_cltv_delta = 0; + for (res, path) in results.iter().zip(route.paths.iter()) { if res.is_ok() { has_ok = true; } if res.is_err() { has_err = true; } if let &Err(APIError::MonitorUpdateFailed) = res { @@ -2026,15 +2636,123 @@ impl ChannelMana // PartialFailure. has_err = true; has_ok = true; - break; + } else if res.is_err() { + pending_amt_unsent += path.last().unwrap().fee_msat; + max_unsent_cltv_delta = cmp::max(max_unsent_cltv_delta, path.last().unwrap().cltv_expiry_delta); } } if has_err && has_ok { - Err(PaymentSendFailure::PartialFailure(results)) + Err(PaymentSendFailure::PartialFailure { + results, + payment_id, + failed_paths_retry: if pending_amt_unsent != 0 { + if let Some(payee) = &route.payee { + Some(RouteParameters { + payee: payee.clone(), + final_value_msat: pending_amt_unsent, + final_cltv_expiry_delta: max_unsent_cltv_delta, + }) + } else { None } + } else { None }, + }) } else if has_err { + // If we failed to send any paths, we shouldn't have inserted the new PaymentId into + // our `pending_outbound_payments` map at all. + debug_assert!(self.pending_outbound_payments.lock().unwrap().get(&payment_id).is_none()); Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect())) } else { - Ok(()) + Ok(payment_id) + } + } + + /// Retries a payment along the given [`Route`]. + /// + /// Errors returned are a superset of those returned from [`send_payment`], so see + /// [`send_payment`] documentation for more details on errors. This method will also error if the + /// retry amount puts the payment more than 10% over the payment's total amount, if the payment + /// for the given `payment_id` cannot be found (likely due to timeout or success), or if + /// further retries have been disabled with [`abandon_payment`]. + /// + /// [`send_payment`]: [`ChannelManager::send_payment`] + /// [`abandon_payment`]: [`ChannelManager::abandon_payment`] + pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { + const RETRY_OVERFLOW_PERCENTAGE: u64 = 10; + for path in route.paths.iter() { + if path.len() == 0 { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: "length-0 path in route".to_string() + })) + } + } + + let (total_msat, payment_hash, payment_secret) = { + let outbounds = self.pending_outbound_payments.lock().unwrap(); + if let Some(payment) = outbounds.get(&payment_id) { + match payment { + PendingOutboundPayment::Retryable { + total_msat, payment_hash, payment_secret, pending_amt_msat, .. + } => { + let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum(); + if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string() + })) + } + (*total_msat, *payment_hash, *payment_secret) + }, + PendingOutboundPayment::Legacy { .. } => { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string() + })) + }, + PendingOutboundPayment::Fulfilled { .. } => { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: "Payment already completed".to_owned() + })); + }, + PendingOutboundPayment::Abandoned { .. } => { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: "Payment already abandoned (with some HTLCs still pending)".to_owned() + })); + }, + } + } else { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)), + })) + } + }; + return self.send_payment_internal(route, payment_hash, &payment_secret, None, Some(payment_id), Some(total_msat)).map(|_| ()) + } + + /// Signals that no further retries for the given payment will occur. + /// + /// After this method returns, any future calls to [`retry_payment`] for the given `payment_id` + /// will fail with [`PaymentSendFailure::ParameterError`]. If no such event has been generated, + /// an [`Event::PaymentFailed`] event will be generated as soon as there are no remaining + /// pending HTLCs for this payment. + /// + /// Note that calling this method does *not* prevent a payment from succeeding. You must still + /// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to + /// determine the ultimate status of a payment. + /// + /// [`retry_payment`]: Self::retry_payment + /// [`Event::PaymentFailed`]: events::Event::PaymentFailed + /// [`Event::PaymentSent`]: events::Event::PaymentSent + pub fn abandon_payment(&self, payment_id: PaymentId) { + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { + if let Ok(()) = payment.get_mut().mark_abandoned() { + if payment.get().remaining_parts() == 0 { + self.pending_events.lock().unwrap().push(events::Event::PaymentFailed { + payment_id, + payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"), + }); + payment.remove(); + } + } } } @@ -2052,14 +2770,14 @@ impl ChannelMana /// Note that `route` must have exactly one path. /// /// [`send_payment`]: Self::send_payment - pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option) -> Result { + pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { let preimage = match payment_preimage { Some(p) => p, None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()), }; let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner()); - match self.send_payment_internal(route, payment_hash, &None, Some(preimage)) { - Ok(()) => Ok(payment_hash), + match self.send_payment_internal(route, payment_hash, &None, Some(preimage), None, None) { + Ok(payment_id) => Ok((payment_hash, payment_id)), Err(e) => Err(e) } } @@ -2075,7 +2793,7 @@ impl ChannelMana (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None) + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) } else { unreachable!(); }) , chan) }, @@ -2119,7 +2837,8 @@ impl ChannelMana /// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs /// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`]. /// - /// Panics if a funding transaction has already been provided for this channel. + /// Returns [`APIError::ChannelUnavailable`] if a funding transaction has already been provided + /// for the channel or if the channel has been closed as indicated by [`Event::ChannelClosed`]. /// /// May panic if the output found in the funding transaction is duplicative with some other /// channel (note that this should be trivially prevented by using unique funding transaction @@ -2134,6 +2853,7 @@ impl ChannelMana /// create a new channel with a conflicting funding transaction. /// /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady + /// [`Event::ChannelClosed`]: crate::util::events::Event::ChannelClosed pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); @@ -2337,7 +3057,7 @@ impl ChannelMana htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, }); - match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet) { + match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) { Err(e) => { if let ChannelError::Ignore(msg) = e { log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); @@ -2416,7 +3136,8 @@ impl ChannelMana if let Some(short_id) = channel.get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } - Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) + // ChannelClosed event is generated by handle_error for us. + Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) }, ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } }; @@ -2489,6 +3210,59 @@ impl ChannelMana } } + macro_rules! check_total_value { + ($payment_data_total_msat: expr, $payment_secret: expr, $payment_preimage: expr) => {{ + let mut total_value = 0; + let mut payment_received_generated = false; + let htlcs = channel_state.claimable_htlcs.entry(payment_hash) + .or_insert(Vec::new()); + if htlcs.len() == 1 { + if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { + log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + continue + } + } + htlcs.push(claimable_htlc); + for htlc in htlcs.iter() { + total_value += htlc.value; + match &htlc.onion_payload { + OnionPayload::Invoice(htlc_payment_data) => { + if htlc_payment_data.total_msat != $payment_data_total_msat { + log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", + log_bytes!(payment_hash.0), $payment_data_total_msat, htlc_payment_data.total_msat); + total_value = msgs::MAX_VALUE_MSAT; + } + if total_value >= msgs::MAX_VALUE_MSAT { break; } + }, + _ => unreachable!(), + } + } + if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data_total_msat { + log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)", + log_bytes!(payment_hash.0), total_value, $payment_data_total_msat); + for htlc in htlcs.iter() { + fail_htlc!(htlc); + } + } else if total_value == $payment_data_total_msat { + new_events.push(events::Event::PaymentReceived { + payment_hash, + purpose: events::PaymentPurpose::InvoicePayment { + payment_preimage: $payment_preimage, + payment_secret: $payment_secret, + }, + amt: total_value, + }); + payment_received_generated = true; + } else { + // Nothing to do - we haven't reached the total + // payment value yet, wait until we receive more + // MPP parts. + } + payment_received_generated + }} + } + // Check that the payment hash and secret are known. Note that we // MUST take care to handle the "unknown payment hash" and // "incorrect payment secret" cases here identically or we'd expose @@ -2499,9 +3273,17 @@ impl ChannelMana match payment_secrets.entry(payment_hash) { hash_map::Entry::Vacant(_) => { match claimable_htlc.onion_payload { - OnionPayload::Invoice(_) => { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); + OnionPayload::Invoice(ref payment_data) => { + let payment_preimage = match inbound_payment::verify(payment_hash, payment_data.clone(), self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) { + Ok(payment_preimage) => payment_preimage, + Err(()) => { + fail_htlc!(claimable_htlc); + continue + } + }; + let payment_data_total_msat = payment_data.total_msat; + let payment_secret = payment_data.payment_secret.clone(); + check_total_value!(payment_data_total_msat, payment_secret, payment_preimage); }, OnionPayload::Spontaneous(preimage) => { match channel_state.claimable_htlcs.entry(payment_hash) { @@ -2538,55 +3320,9 @@ impl ChannelMana log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap()); fail_htlc!(claimable_htlc); } else { - let mut total_value = 0; - let htlcs = channel_state.claimable_htlcs.entry(payment_hash) - .or_insert(Vec::new()); - if htlcs.len() == 1 { - if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); - continue - } - } - htlcs.push(claimable_htlc); - for htlc in htlcs.iter() { - total_value += htlc.value; - match &htlc.onion_payload { - OnionPayload::Invoice(htlc_payment_data) => { - if htlc_payment_data.total_msat != payment_data.total_msat { - log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", - log_bytes!(payment_hash.0), payment_data.total_msat, htlc_payment_data.total_msat); - total_value = msgs::MAX_VALUE_MSAT; - } - if total_value >= msgs::MAX_VALUE_MSAT { break; } - }, - _ => unreachable!(), - } - } - if total_value >= msgs::MAX_VALUE_MSAT || total_value > payment_data.total_msat { - log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)", - log_bytes!(payment_hash.0), total_value, payment_data.total_msat); - for htlc in htlcs.iter() { - fail_htlc!(htlc); - } - } else if total_value == payment_data.total_msat { - new_events.push(events::Event::PaymentReceived { - payment_hash, - purpose: events::PaymentPurpose::InvoicePayment { - payment_preimage: inbound_payment.get().payment_preimage, - payment_secret: payment_data.payment_secret, - user_payment_id: inbound_payment.get().user_payment_id, - }, - amt: total_value, - }); - // Only ever generate at most one PaymentReceived - // per registered payment_hash, even if it isn't - // claimed. + let payment_received_generated = check_total_value!(payment_data.total_msat, payment_data.payment_secret, inbound_payment.get().payment_preimage); + if payment_received_generated { inbound_payment.remove_entry(); - } else { - // Nothing to do - we haven't reached the total - // payment value yet, wait until we receive more - // MPP parts. } } }, @@ -2673,7 +3409,7 @@ impl ChannelMana let ret_err = match res { Ok(Some((update_fee, commitment_signed, monitor_update))) => { if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { - let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), chan_id); + let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), Vec::new(), chan_id); if drop { retain_channel = false; } res } else { @@ -2849,26 +3585,41 @@ impl ChannelMana self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data}); }, - HTLCSource::OutboundRoute { session_priv, mpp_id, .. } => { + HTLCSource::OutboundRoute { session_priv, payment_id, path, payee, .. } => { let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); - if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) { - if sessions.get_mut().remove(&session_priv_bytes) { - self.pending_events.lock().unwrap().push( - events::Event::PaymentFailed { - payment_hash, - rejected_by_dest: false, - network_update: None, - all_paths_failed: sessions.get().len() == 0, - #[cfg(test)] - error_code: None, - #[cfg(test)] - error_data: None, - } - ); - if sessions.get().len() == 0 { - sessions.remove(); + if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { + if payment.get_mut().remove(&session_priv_bytes, Some(&path)) && !payment.get().is_fulfilled() { + let retry = if let Some(payee_data) = payee { + let path_last_hop = path.last().expect("Outbound payments must have had a valid path"); + Some(RouteParameters { + payee: payee_data, + final_value_msat: path_last_hop.fee_msat, + final_cltv_expiry_delta: path_last_hop.cltv_expiry_delta, + }) + } else { None }; + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PaymentPathFailed { + payment_id: Some(payment_id), + payment_hash, + rejected_by_dest: false, + network_update: None, + all_paths_failed: payment.get().remaining_parts() == 0, + path: path.clone(), + short_channel_id: None, + retry, + #[cfg(test)] + error_code: None, + #[cfg(test)] + error_data: None, + }); + if payment.get().abandoned() && payment.get().remaining_parts() == 0 { + pending_events.push(events::Event::PaymentFailed { + payment_id, + payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"), + }); + payment.remove(); } } } else { @@ -2895,47 +3646,69 @@ impl ChannelMana // from block_connected which may run during initialization prior to the chain_monitor // being fully configured. See the docs for `ChannelManagerReadArgs` for more. match source { - HTLCSource::OutboundRoute { ref path, session_priv, mpp_id, .. } => { + HTLCSource::OutboundRoute { ref path, session_priv, payment_id, ref payee, .. } => { let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); let mut all_paths_failed = false; - if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) { - if !sessions.get_mut().remove(&session_priv_bytes) { + let mut full_failure_ev = None; + if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { + if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); return; } - if sessions.get().len() == 0 { + if payment.get().is_fulfilled() { + log_trace!(self.logger, "Received failure of HTLC with payment_hash {} after payment completion", log_bytes!(payment_hash.0)); + return; + } + if payment.get().remaining_parts() == 0 { all_paths_failed = true; - sessions.remove(); + if payment.get().abandoned() { + full_failure_ev = Some(events::Event::PaymentFailed { + payment_id, + payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"), + }); + payment.remove(); + } } } else { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); return; } - log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0)); mem::drop(channel_state_lock); - match &onion_error { + let retry = if let Some(payee_data) = payee { + let path_last_hop = path.last().expect("Outbound payments must have had a valid path"); + Some(RouteParameters { + payee: payee_data.clone(), + final_value_msat: path_last_hop.fee_msat, + final_cltv_expiry_delta: path_last_hop.cltv_expiry_delta, + }) + } else { None }; + log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0)); + + let path_failure = match &onion_error { &HTLCFailReason::LightningError { ref err } => { #[cfg(test)] - let (network_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); #[cfg(not(test))] - let (network_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); // TODO: If we decided to blame ourselves (or one of our channels) in // process_onion_failure we should close that channel as it implies our // next-hop is needlessly blaming us! - self.pending_events.lock().unwrap().push( - events::Event::PaymentFailed { - payment_hash: payment_hash.clone(), - rejected_by_dest: !payment_retryable, - network_update, - all_paths_failed, + events::Event::PaymentPathFailed { + payment_id: Some(payment_id), + payment_hash: payment_hash.clone(), + rejected_by_dest: !payment_retryable, + network_update, + all_paths_failed, + path: path.clone(), + short_channel_id, + retry, #[cfg(test)] - error_code: onion_error_code, + error_code: onion_error_code, #[cfg(test)] - error_data: onion_error_data - } - ); + error_data: onion_error_data + } }, &HTLCFailReason::Reason { #[cfg(test)] @@ -2950,20 +3723,25 @@ impl ChannelMana // ChannelDetails. // TODO: For non-temporary failures, we really should be closing the // channel here as we apparently can't relay through them anyway. - self.pending_events.lock().unwrap().push( - events::Event::PaymentFailed { - payment_hash: payment_hash.clone(), - rejected_by_dest: path.len() == 1, - network_update: None, - all_paths_failed, + events::Event::PaymentPathFailed { + payment_id: Some(payment_id), + payment_hash: payment_hash.clone(), + rejected_by_dest: path.len() == 1, + network_update: None, + all_paths_failed, + path: path.clone(), + short_channel_id: Some(path.first().unwrap().short_channel_id), + retry, #[cfg(test)] - error_code: Some(*failure_code), + error_code: Some(*failure_code), #[cfg(test)] - error_data: Some(data.clone()), - } - ); + error_data: Some(data.clone()), + } } - } + }; + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(path_failure); + if let Some(ev) = full_failure_ev { pending_events.push(ev); } }, HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, .. }) => { let err_packet = match onion_error { @@ -3001,19 +3779,21 @@ impl ChannelMana } } - /// Provides a payment preimage in response to a PaymentReceived event, returning true and - /// generating message events for the net layer to claim the payment, if possible. Thus, you - /// should probably kick the net layer to go send messages if this returns true! + /// Provides a payment preimage in response to [`Event::PaymentReceived`], generating any + /// [`MessageSendEvent`]s needed to claim the payment. /// /// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentReceived` /// event matches your expectation. If you fail to do so and call this method, you may provide /// the sender "proof-of-payment" when they did not fulfill the full expected payment. /// - /// May panic if called except in response to a PaymentReceived event. + /// Returns whether any HTLCs were claimed, and thus if any new [`MessageSendEvent`]s are now + /// pending for processing via [`get_and_clear_pending_msg_events`]. /// + /// [`Event::PaymentReceived`]: crate::util::events::Event::PaymentReceived /// [`create_inbound_payment`]: Self::create_inbound_payment /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash + /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); @@ -3150,20 +3930,77 @@ impl ChannelMana } else { unreachable!(); } } + fn finalize_claims(&self, mut sources: Vec) { + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + let mut pending_events = self.pending_events.lock().unwrap(); + for source in sources.drain(..) { + if let HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } = source { + let mut session_priv_bytes = [0; 32]; + session_priv_bytes.copy_from_slice(&session_priv[..]); + if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { + assert!(payment.get().is_fulfilled()); + if payment.get_mut().remove(&session_priv_bytes, None) { + pending_events.push( + events::Event::PaymentPathSuccessful { + payment_id, + payment_hash: payment.get().payment_hash(), + path, + } + ); + } + if payment.get().remaining_parts() == 0 { + payment.remove(); + } + } + } + } + } + fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool) { match source { - HTLCSource::OutboundRoute { session_priv, mpp_id, .. } => { + HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { mem::drop(channel_state_lock); let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); - let found_payment = if let Some(mut sessions) = outbounds.remove(&mpp_id) { - sessions.remove(&session_priv_bytes) - } else { false }; - if found_payment { - self.pending_events.lock().unwrap().push( - events::Event::PaymentSent { payment_preimage } - ); + if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { + let mut pending_events = self.pending_events.lock().unwrap(); + if !payment.get().is_fulfilled() { + let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); + let fee_paid_msat = payment.get().get_pending_fee_msat(); + pending_events.push( + events::Event::PaymentSent { + payment_id: Some(payment_id), + payment_preimage, + payment_hash, + fee_paid_msat, + } + ); + payment.get_mut().mark_fulfilled(); + } + + if from_onchain { + // We currently immediately remove HTLCs which were fulfilled on-chain. + // This could potentially lead to removing a pending payment too early, + // with a reorg of one block causing us to re-add the fulfilled payment on + // restart. + // TODO: We should have a second monitor event that informs us of payments + // irrevocably fulfilled. + if payment.get_mut().remove(&session_priv_bytes, Some(&path)) { + let payment_hash = Some(PaymentHash(Sha256::hash(&payment_preimage.0).into_inner())); + pending_events.push( + events::Event::PaymentPathSuccessful { + payment_id, + payment_hash, + path, + } + ); + } + + if payment.get().remaining_parts() == 0 { + payment.remove(); + } + } } else { log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0)); } @@ -3225,31 +4062,11 @@ impl ChannelMana self.our_network_pubkey.clone() } - /// Restores a single, given channel to normal operation after a - /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update - /// operation. - /// - /// All ChannelMonitor updates up to and including highest_applied_update_id must have been - /// fully committed in every copy of the given channels' ChannelMonitors. - /// - /// Note that there is no effect to calling with a highest_applied_update_id other than the - /// current latest ChannelMonitorUpdate and one call to this function after multiple - /// ChannelMonitorUpdateErr::TemporaryFailures is fine. The highest_applied_update_id field - /// exists largely only to prevent races between this and concurrent update_monitor calls. - /// - /// Thus, the anticipated use is, at a high level: - /// 1) You register a chain::Watch with this ChannelManager, - /// 2) it stores each update to disk, and begins updating any remote (eg watchtower) copies of - /// said ChannelMonitors as it can, returning ChannelMonitorUpdateErr::TemporaryFailures - /// any time it cannot do so instantly, - /// 3) update(s) are applied to each remote copy of a ChannelMonitor, - /// 4) once all remote copies are updated, you call this function with the update_id that - /// completed, and once it is the latest the Channel will be re-enabled. - pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) { + fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let chan_restoration_res; - let mut pending_failures = { + let (mut pending_failures, finalized_claims) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) { @@ -3260,8 +4077,8 @@ impl ChannelMana return; } - let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger); - let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() { + let updates = channel.get_mut().monitor_updating_restored(&self.logger); + let channel_update = if updates.funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() { // We only send a channel_update in the case where we are just now sending a // funding_locked and the channel is in a usable state. Further, we rely on the // normal announcement_signatures process to send a channel_update for public @@ -3271,13 +4088,14 @@ impl ChannelMana msg: self.get_channel_update_for_unicast(channel.get()).unwrap(), }) } else { None }; - chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked); + chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.funding_locked); if let Some(upd) = channel_update { channel_state.pending_msg_events.push(upd); } - pending_failures + (updates.failed_htlcs, updates.finalized_claimed_htlcs) }; post_handle_chan_restoration!(self, chan_restoration_res); + self.finalize_claims(finalized_claims); for failure in pending_failures.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); } @@ -3288,7 +4106,12 @@ impl ChannelMana return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone())); } - let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration) + if !self.default_configuration.accept_inbound_channels { + return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone())); + } + + let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), + &their_features, msg, 0, &self.default_configuration, self.best_block.read().unwrap().height(), &self.logger) .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?; let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -3366,7 +4189,7 @@ impl ChannelMana // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't // accepted payment from yet. We do, however, need to wait to send our funding_locked // until we have persisted our monitor. - chan.monitor_update_failed(false, false, Vec::new(), Vec::new()); + chan.monitor_update_failed(false, false, Vec::new(), Vec::new(), Vec::new()); }, } } @@ -3402,7 +4225,16 @@ impl ChannelMana Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), }; if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { - return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); + let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); + if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { + // We weren't able to watch the channel to begin with, so no updates should be made on + // it. Previously, full_stack_target found an (unreachable) panic when the + // monitor update contained within `shutdown_finish` was applied. + if let Some((ref mut shutdown_finish, _)) = shutdown_finish { + shutdown_finish.0.take(); + } + } + return res } funding_tx }, @@ -3475,7 +4307,7 @@ impl ChannelMana if let Some(monitor_update) = monitor_update { if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key()); + handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), Vec::new(), chan_entry.key()); if is_permanent { remove_channel!(channel_state, chan_entry); break result; @@ -3545,6 +4377,7 @@ impl ChannelMana msg: update }); } + self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure); } Ok(()) } @@ -3761,37 +4594,51 @@ impl ChannelMana break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); - let (commitment_update, pending_forwards, pending_failures, monitor_update, htlcs_to_fail_in) = - break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); - htlcs_to_fail = htlcs_to_fail_in; - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + let raa_updates = break_chan_entry!(self, + chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); + htlcs_to_fail = raa_updates.holding_cell_failed_htlcs; + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) { if was_frozen_for_monitor { - assert!(commitment_update.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); + assert!(raa_updates.commitment_update.is_none()); + assert!(raa_updates.accepted_htlcs.is_empty()); + assert!(raa_updates.failed_htlcs.is_empty()); + assert!(raa_updates.finalized_claimed_htlcs.is_empty()); break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned())); } else { - if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) { + if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, + RAACommitmentOrder::CommitmentFirst, false, + raa_updates.commitment_update.is_some(), + raa_updates.accepted_htlcs, raa_updates.failed_htlcs, + raa_updates.finalized_claimed_htlcs) { break Err(e); } else { unreachable!(); } } } - if let Some(updates) = commitment_update { + if let Some(updates) = raa_updates.commitment_update { channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: counterparty_node_id.clone(), updates, }); } - break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap())) + break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs, + raa_updates.finalized_claimed_htlcs, + chan.get().get_short_channel_id() + .expect("RAA should only work on a short-id-available channel"), + chan.get().get_funding_txo().unwrap())) }, hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id); match res { - Ok((pending_forwards, mut pending_failures, short_channel_id, channel_outpoint)) => { + Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs, + short_channel_id, channel_outpoint)) => + { for failure in pending_failures.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); } self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]); + self.finalize_claims(finalized_claim_htlcs); Ok(()) }, Err(e) => Err(e) @@ -3940,7 +4787,8 @@ impl ChannelMana self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } }, - MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => { + MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | + MonitorEvent::UpdateFailed(funding_outpoint) => { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; let by_id = &mut channel_state.by_id; @@ -3956,6 +4804,12 @@ impl ChannelMana msg: update }); } + let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event { + ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() } + } else { + ClosureReason::CommitmentTxConfirmed + }; + self.issue_channel_close_events(&chan, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { @@ -3964,6 +4818,9 @@ impl ChannelMana }); } }, + MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => { + self.channel_monitor_updated(&funding_txo, monitor_update_id); + }, } } @@ -3974,6 +4831,14 @@ impl ChannelMana has_pending_monitor_events } + /// In chanmon_consistency_target, we'd like to be able to restore monitor updating without + /// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor + /// update events as a separate process method here. + #[cfg(feature = "fuzztarget")] + pub fn process_monitor_events(&self) { + self.process_pending_monitor_events(); + } + /// Check the holding cell in each channel and free any pending HTLCs in them if possible. /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor /// update was applied. @@ -4002,7 +4867,7 @@ impl ChannelMana if let Some((commitment_update, monitor_update)) = commitment_opt { if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { has_monitor_update = true; - let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), channel_id); + let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), Vec::new(), channel_id); handle_errors.push((chan.get_counterparty_node_id(), res)); if close_channel { return false; } } else { @@ -4017,6 +4882,7 @@ impl ChannelMana Err(e) => { let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + // ChannelClosed event is generated by handle_error for us !close_channel } } @@ -4070,6 +4936,8 @@ impl ChannelMana }); } + self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); + log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transaction(&tx); false @@ -4115,9 +4983,13 @@ impl ChannelMana } } - fn set_payment_hash_secret_map(&self, payment_hash: PaymentHash, payment_preimage: Option, min_value_msat: Option, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result { + fn set_payment_hash_secret_map(&self, payment_hash: PaymentHash, payment_preimage: Option, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result { assert!(invoice_expiry_delta_secs <= 60*60*24*365); // Sadly bitcoin timestamps are u32s, so panic before 2106 + if min_value_msat.is_some() && min_value_msat.unwrap() > MAX_VALUE_MSAT { + return Err(APIError::APIMisuseError { err: format!("min_value_msat of {} greater than total 21 million bitcoin supply", min_value_msat.unwrap()) }); + } + let payment_secret = PaymentSecret(self.keys_manager.get_secure_random_bytes()); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); @@ -4125,9 +4997,10 @@ impl ChannelMana match payment_secrets.entry(payment_hash) { hash_map::Entry::Vacant(e) => { e.insert(PendingInboundPayment { - payment_secret, min_value_msat, user_payment_id, payment_preimage, + payment_secret, min_value_msat, payment_preimage, + user_payment_id: 0, // For compatibility with version 0.0.103 and earlier // We assume that highest_seen_timestamp is pretty close to the current time - - // its updated when we receive a new block with the maximum time we've seen in + // it's updated when we receive a new block with the maximum time we've seen in // a header. It should never be more than two hours in the future. // Thus, we add two hours here as a buffer to ensure we absolutely // never fail a payment too early. @@ -4145,7 +5018,7 @@ impl ChannelMana /// to pay us. /// /// This differs from [`create_inbound_payment_for_hash`] only in that it generates the - /// [`PaymentHash`] and [`PaymentPreimage`] for you, returning the first and storing the second. + /// [`PaymentHash`] and [`PaymentPreimage`] for you. /// /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentReceived`], which /// will have the [`PaymentReceived::payment_preimage`] field filled in. That should then be @@ -4153,17 +5026,37 @@ impl ChannelMana /// /// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements. /// + /// Note that a malicious eavesdropper can intuit whether an inbound payment was created by + /// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime. + /// + /// # Note + /// + /// If you register an inbound payment with this method, then serialize the `ChannelManager`, then + /// deserialize it with a node running 0.0.103 and earlier, the payment will fail to be received. + /// + /// Errors if `min_value_msat` is greater than total bitcoin supply. + /// /// [`claim_funds`]: Self::claim_funds /// [`PaymentReceived`]: events::Event::PaymentReceived /// [`PaymentReceived::payment_preimage`]: events::Event::PaymentReceived::payment_preimage /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash - pub fn create_inbound_payment(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> (PaymentHash, PaymentSecret) { + pub fn create_inbound_payment(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), ()> { + inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, &self.keys_manager, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) + } + + /// Legacy version of [`create_inbound_payment`]. Use this method if you wish to share + /// serialized state with LDK node(s) running 0.0.103 and earlier. + /// + /// # Note + /// This method is deprecated and will be removed soon. + /// + /// [`create_inbound_payment`]: Self::create_inbound_payment + #[deprecated] + pub fn create_inbound_payment_legacy(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), APIError> { let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes()); let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); - - (payment_hash, - self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs, user_payment_id) - .expect("RNG Generated Duplicate PaymentHash")) + let payment_secret = self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs)?; + Ok((payment_hash, payment_secret)) } /// Gets a [`PaymentSecret`] for a given [`PaymentHash`], for which the payment preimage is @@ -4173,14 +5066,9 @@ impl ChannelMana /// payment secret fetched via this method or [`create_inbound_payment`], and which is at least /// the `min_value_msat` provided here, if one is provided. /// - /// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) must be globally unique. This - /// method may return an Err if another payment with the same payment_hash is still pending. - /// - /// `user_payment_id` will be provided back in [`PaymentPurpose::InvoicePayment::user_payment_id`] events to - /// allow tracking of which events correspond with which calls to this and - /// [`create_inbound_payment`]. `user_payment_id` has no meaning inside of LDK, it is simply - /// copied to events and otherwise ignored. It may be used to correlate PaymentReceived events - /// with invoice metadata stored elsewhere. + /// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) should be globally unique, though + /// note that LDK will not stop you from registering duplicate payment hashes for inbound + /// payments. /// /// `min_value_msat` should be set if the invoice being generated contains a value. Any payment /// received for the returned [`PaymentHash`] will be required to be at least `min_value_msat` @@ -4199,20 +5087,45 @@ impl ChannelMana /// If you need exact expiry semantics, you should enforce them upon receipt of /// [`PaymentReceived`]. /// - /// Pending inbound payments are stored in memory and in serialized versions of this - /// [`ChannelManager`]. If potentially unbounded numbers of inbound payments may exist and - /// space is limited, you may wish to rate-limit inbound payment creation. - /// /// May panic if `invoice_expiry_delta_secs` is greater than one year. /// /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry` /// set to at least [`MIN_FINAL_CLTV_EXPIRY`]. /// + /// Note that a malicious eavesdropper can intuit whether an inbound payment was created by + /// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime. + /// + /// # Note + /// + /// If you register an inbound payment with this method, then serialize the `ChannelManager`, then + /// deserialize it with a node running 0.0.103 and earlier, the payment will fail to be received. + /// + /// Errors if `min_value_msat` is greater than total bitcoin supply. + /// /// [`create_inbound_payment`]: Self::create_inbound_payment /// [`PaymentReceived`]: events::Event::PaymentReceived - /// [`PaymentPurpose::InvoicePayment::user_payment_id`]: events::PaymentPurpose::InvoicePayment::user_payment_id - pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result { - self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id) + pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result { + inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash, invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64) + } + + /// Legacy version of [`create_inbound_payment_for_hash`]. Use this method if you wish to share + /// serialized state with LDK node(s) running 0.0.103 and earlier. + /// + /// # Note + /// This method is deprecated and will be removed soon. + /// + /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash + #[deprecated] + pub fn create_inbound_payment_for_hash_legacy(&self, payment_hash: PaymentHash, min_value_msat: Option, invoice_expiry_delta_secs: u32) -> Result { + self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs) + } + + /// Gets an LDK-generated payment preimage from a payment hash and payment secret that were + /// previously returned from [`create_inbound_payment`]. + /// + /// [`create_inbound_payment`]: Self::create_inbound_payment + pub fn get_payment_preimage(&self, payment_hash: PaymentHash, payment_secret: PaymentSecret) -> Result { + inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key) } #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))] @@ -4222,6 +5135,16 @@ impl ChannelMana self.process_pending_events(&event_handler); events.into_inner() } + + #[cfg(test)] + pub fn has_pending_payments(&self) -> bool { + !self.pending_outbound_payments.lock().unwrap().is_empty() + } + + #[cfg(test)] + pub fn clear_pending_payments(&self) { + self.pending_outbound_payments.lock().unwrap().clear() + } } impl MessageSendEventsProvider for ChannelManager @@ -4397,6 +5320,21 @@ where payment_secrets.retain(|_, inbound_payment| { inbound_payment.expiry_time > header.time as u64 }); + + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + let mut pending_events = self.pending_events.lock().unwrap(); + outbounds.retain(|payment_id, payment| { + if payment.remaining_parts() != 0 { return true } + if let PendingOutboundPayment::Retryable { starting_block_height, payment_hash, .. } = payment { + if *starting_block_height + PAYMENT_EXPIRY_BLOCKS <= height { + log_info!(self.logger, "Timing out payment with id {} and hash {}", log_bytes!(payment_id.0), log_bytes!(payment_hash.0)); + pending_events.push(events::Event::PaymentFailed { + payment_id: *payment_id, payment_hash: *payment_hash, + }); + false + } else { true } + } else { true } + }); } fn get_relevant_txids(&self) -> Vec { @@ -4433,7 +5371,7 @@ where /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by /// the function. - fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage>> + fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>), ClosureReason>> (&self, height_opt: Option, f: FN) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // during initialization prior to the chain_monitor being fully configured in some cases. @@ -4478,7 +5416,7 @@ where } short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id()); } - } else if let Err(e) = res { + } else if let Err(reason) = res { if let Some(short_id) = channel.get_short_channel_id() { short_to_id.remove(&short_id); } @@ -4490,9 +5428,14 @@ where msg: update }); } + let reason_message = format!("{}", reason); + self.issue_channel_close_events(channel, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { msg: e }, + action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { + channel_id: channel.channel_id(), + data: reason_message, + } }, }); return false; } @@ -4532,8 +5475,9 @@ where /// indicating whether persistence is necessary. Only one listener on /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken /// up. - /// Note that the feature `allow_wallclock_use` must be enabled to use this function. - #[cfg(any(test, feature = "allow_wallclock_use"))] + /// + /// Note that this method is not available with the `no-std` feature. + #[cfg(any(test, feature = "std"))] pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool { self.persistence_notifier.wait_timeout(max_wait) } @@ -4680,6 +5624,7 @@ impl msg: update }); } + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); false } else { true @@ -4694,6 +5639,7 @@ impl if let Some(short_id) = chan.get_short_channel_id() { short_to_id.remove(&short_id); } + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); return false; } else { no_channels_remain = false; @@ -4784,12 +5730,12 @@ impl for chan in self.list_channels() { if chan.counterparty.node_id == *counterparty_node_id { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id)); + let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data)); } } } else { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id)); + let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data)); } } } @@ -4826,7 +5772,7 @@ impl PersistenceNotifier { } } - #[cfg(any(test, feature = "allow_wallclock_use"))] + #[cfg(any(test, feature = "std"))] fn wait_timeout(&self, max_wait: Duration) -> bool { let current_time = Instant::now(); loop { @@ -5037,23 +5983,29 @@ impl Readable for HTLCSource { let mut session_priv: ::util::ser::OptionDeserWrapper = ::util::ser::OptionDeserWrapper(None); let mut first_hop_htlc_msat: u64 = 0; let mut path = Some(Vec::new()); - let mut mpp_id = None; + let mut payment_id = None; + let mut payment_secret = None; + let mut payee = None; read_tlv_fields!(reader, { (0, session_priv, required), - (1, mpp_id, option), + (1, payment_id, option), (2, first_hop_htlc_msat, required), + (3, payment_secret, option), (4, path, vec_type), + (5, payee, option), }); - if mpp_id.is_none() { - // For backwards compat, if there was no mpp_id written, use the session_priv bytes + if payment_id.is_none() { + // For backwards compat, if there was no payment_id written, use the session_priv bytes // instead. - mpp_id = Some(MppId(*session_priv.0.unwrap().as_ref())); + payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref())); } Ok(HTLCSource::OutboundRoute { session_priv: session_priv.0.unwrap(), first_hop_htlc_msat: first_hop_htlc_msat, path: path.unwrap(), - mpp_id: mpp_id.unwrap(), + payment_id: payment_id.unwrap(), + payment_secret, + payee, }) } 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), @@ -5065,14 +6017,16 @@ impl Readable for HTLCSource { impl Writeable for HTLCSource { fn write(&self, writer: &mut W) -> Result<(), ::io::Error> { match self { - HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, mpp_id } => { + HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret, payee } => { 0u8.write(writer)?; - let mpp_id_opt = Some(mpp_id); + let payment_id_opt = Some(payment_id); write_tlv_fields!(writer, { (0, session_priv, required), - (1, mpp_id_opt, option), + (1, payment_id_opt, option), (2, first_hop_htlc_msat, required), + (3, payment_secret, option), (4, path, vec_type), + (5, payee, option), }); } HTLCSource::PreviousHopData(ref field) => { @@ -5115,6 +6069,29 @@ impl_writeable_tlv_based!(PendingInboundPayment, { (8, min_value_msat, required), }); +impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment, + (0, Legacy) => { + (0, session_privs, required), + }, + (1, Fulfilled) => { + (0, session_privs, required), + (1, payment_hash, option), + }, + (2, Retryable) => { + (0, session_privs, required), + (1, pending_fee_msat, option), + (2, payment_hash, required), + (4, payment_secret, option), + (6, total_msat, required), + (8, pending_amt_msat, required), + (10, starting_block_height, required), + }, + (3, Abandoned) => { + (0, session_privs, required), + (2, payment_hash, required), + }, +); + impl Writeable for ChannelManager where M::Target: chain::Watch, T::Target: BroadcasterInterface, @@ -5174,6 +6151,8 @@ impl Writeable f peer_state.latest_features.write(writer)?; } + let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); + let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap(); let events = self.pending_events.lock().unwrap(); (events.len() as u64).write(writer)?; for event in events.iter() { @@ -5195,28 +6174,48 @@ impl Writeable f (self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?; (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?; - let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); (pending_inbound_payments.len() as u64).write(writer)?; for (hash, pending_payment) in pending_inbound_payments.iter() { hash.write(writer)?; pending_payment.write(writer)?; } - let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap(); // For backwards compat, write the session privs and their total length. let mut num_pending_outbounds_compat: u64 = 0; - for (_, outbounds) in pending_outbound_payments.iter() { - num_pending_outbounds_compat += outbounds.len() as u64; + for (_, outbound) in pending_outbound_payments.iter() { + if !outbound.is_fulfilled() && !outbound.abandoned() { + num_pending_outbounds_compat += outbound.remaining_parts() as u64; + } } num_pending_outbounds_compat.write(writer)?; - for (_, outbounds) in pending_outbound_payments.iter() { - for outbound in outbounds.iter() { - outbound.write(writer)?; + for (_, outbound) in pending_outbound_payments.iter() { + match outbound { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } => { + for session_priv in session_privs.iter() { + session_priv.write(writer)?; + } + } + PendingOutboundPayment::Fulfilled { .. } => {}, + PendingOutboundPayment::Abandoned { .. } => {}, } } + // Encode without retry info for 0.0.101 compatibility. + let mut pending_outbound_payments_no_retry: HashMap> = HashMap::new(); + for (id, outbound) in pending_outbound_payments.iter() { + match outbound { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } => { + pending_outbound_payments_no_retry.insert(*id, session_privs.clone()); + }, + _ => {}, + } + } write_tlv_fields!(writer, { - (1, pending_outbound_payments, required), + (1, pending_outbound_payments_no_retry, required), + (3, pending_outbound_payments, required), + (5, self.our_network_pubkey, required) }); Ok(()) @@ -5227,20 +6226,25 @@ impl Writeable f /// /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation /// is: -/// 1) Deserialize all stored ChannelMonitors. -/// 2) Deserialize the ChannelManager by filling in this struct and calling: -/// <(BlockHash, ChannelManager)>::read(reader, args) -/// This may result in closing some Channels if the ChannelMonitor is newer than the stored -/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted. -/// 3) If you are not fetching full blocks, register all relevant ChannelMonitor outpoints the same -/// way you would handle a `chain::Filter` call using ChannelMonitor::get_outputs_to_watch() and -/// ChannelMonitor::get_funding_txo(). -/// 4) Reconnect blocks on your ChannelMonitors. -/// 5) Disconnect/connect blocks on the ChannelManager. -/// 6) Move the ChannelMonitors into your local chain::Watch. +/// 1) Deserialize all stored [`ChannelMonitor`]s. +/// 2) Deserialize the [`ChannelManager`] by filling in this struct and calling: +/// `<(BlockHash, ChannelManager)>::read(reader, args)` +/// This may result in closing some channels if the [`ChannelMonitor`] is newer than the stored +/// [`ChannelManager`] state to ensure no loss of funds. Thus, transactions may be broadcasted. +/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the +/// same way you would handle a [`chain::Filter`] call using +/// [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`]. +/// 4) Reconnect blocks on your [`ChannelMonitor`]s. +/// 5) Disconnect/connect blocks on the [`ChannelManager`]. +/// 6) Re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk. +/// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you +/// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in +/// the next step. +/// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a +/// [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`]. /// -/// Note that the ordering of #4-6 is not of importance, however all three must occur before you -/// call any other methods on the newly-deserialized ChannelManager. +/// Note that the ordering of #4-7 is not of importance, however all four must occur before you +/// call any other methods on the newly-deserialized [`ChannelManager`]. /// /// Note that because some channels may be closed during deserialization, it is critical that you /// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to @@ -5248,6 +6252,8 @@ impl Writeable f /// broadcast), and then later deserialize a newer version of the same ChannelManager (which will /// not force-close the same channels but consider them live), you may end up revoking a state for /// which you've already broadcasted the transaction. +/// +/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> where M::Target: chain::Watch, T::Target: BroadcasterInterface, @@ -5354,8 +6360,9 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut channel_closures = Vec::new(); for _ in 0..channel_count { - let mut channel: Channel = Channel::read(reader, &args.keys_manager)?; + let mut channel: Channel = Channel::read(reader, (&args.keys_manager, best_block_height))?; let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { @@ -5370,7 +6377,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); - log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning"); + log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); return Err(DecodeError::InvalidValue); } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || @@ -5384,7 +6391,13 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let (_, mut new_failed_htlcs) = channel.force_shutdown(true); failed_htlcs.append(&mut new_failed_htlcs); monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); + channel_closures.push(events::Event::ChannelClosed { + channel_id: channel.channel_id(), + user_channel_id: channel.get_user_id(), + reason: ClosureReason::OutdatedChannelManager + }); } else { + log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id())); if let Some(short_channel_id) = channel.get_short_channel_id() { short_to_id.insert(short_channel_id, channel.channel_id()); } @@ -5395,13 +6408,14 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds."); - log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning"); + log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); return Err(DecodeError::InvalidValue); } } for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() { if !funding_txo_set.contains(funding_txo) { + log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id())); monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); } } @@ -5449,6 +6463,16 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> None => continue, } } + if forward_htlcs_count > 0 { + // If we have pending HTLCs to forward, assume we either dropped a + // `PendingHTLCsForwardable` or the user received it but never processed it as they + // shut down before the timer hit. Either way, set the time_forwardable to a small + // constant as enough time has likely passed that we should simply handle the forwards + // now, or at least after the user gets a chance to reconnect to our peers. + pending_events_read.push(events::Event::PendingHTLCsForwardable { + time_forwardable: Duration::from_secs(2), + }); + } let background_event_count: u64 = Readable::read(reader)?; let mut pending_background_events_read: Vec = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); @@ -5471,26 +6495,100 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; - let mut pending_outbound_payments_compat: HashMap> = + let mut pending_outbound_payments_compat: HashMap = HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); for _ in 0..pending_outbound_payments_count_compat { let session_priv = Readable::read(reader)?; - if pending_outbound_payments_compat.insert(MppId(session_priv), [session_priv].iter().cloned().collect()).is_some() { + let payment = PendingOutboundPayment::Legacy { + session_privs: [session_priv].iter().cloned().collect() + }; + if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() { return Err(DecodeError::InvalidValue) }; } + // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. + let mut pending_outbound_payments_no_retry: Option>> = None; let mut pending_outbound_payments = None; + let mut received_network_pubkey: Option = None; read_tlv_fields!(reader, { - (1, pending_outbound_payments, option), + (1, pending_outbound_payments_no_retry, option), + (3, pending_outbound_payments, option), + (5, received_network_pubkey, option) }); - if pending_outbound_payments.is_none() { + + if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { pending_outbound_payments = Some(pending_outbound_payments_compat); + } else if pending_outbound_payments.is_none() { + let mut outbounds = HashMap::new(); + for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() { + outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); + } + pending_outbound_payments = Some(outbounds); + } else { + // If we're tracking pending payments, ensure we haven't lost any by looking at the + // ChannelMonitor data for any channels for which we do not have authorative state + // (i.e. those for which we just force-closed above or we otherwise don't have a + // corresponding `Channel` at all). + // This avoids several edge-cases where we would otherwise "forget" about pending + // payments which are still in-flight via their on-chain state. + // We only rebuild the pending payments map if we were most recently serialized by + // 0.0.102+ + for (_, monitor) in args.channel_monitors { + if by_id.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { + for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() { + if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source { + if path.is_empty() { + log_error!(args.logger, "Got an empty path for a pending payment"); + return Err(DecodeError::InvalidValue); + } + let path_amt = path.last().unwrap().fee_msat; + let mut session_priv_bytes = [0; 32]; + session_priv_bytes[..].copy_from_slice(&session_priv[..]); + match pending_outbound_payments.as_mut().unwrap().entry(payment_id) { + hash_map::Entry::Occupied(mut entry) => { + let newly_added = entry.get_mut().insert(session_priv_bytes, &path); + log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", + if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), log_bytes!(htlc.payment_hash.0)); + }, + hash_map::Entry::Vacant(entry) => { + let path_fee = path.get_path_fees(); + entry.insert(PendingOutboundPayment::Retryable { + session_privs: [session_priv_bytes].iter().map(|a| *a).collect(), + payment_hash: htlc.payment_hash, + payment_secret, + pending_amt_msat: path_amt, + pending_fee_msat: Some(path_fee), + total_msat: path_amt, + starting_block_height: best_block_height, + }); + log_info!(args.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}", + path_amt, log_bytes!(htlc.payment_hash.0), log_bytes!(session_priv_bytes)); + } + } + } + } + } + } } let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes()); + if !channel_closures.is_empty() { + pending_events_read.append(&mut channel_closures); + } + + let our_network_pubkey = PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()); + if let Some(network_pubkey) = received_network_pubkey { + if network_pubkey != our_network_pubkey { + log_error!(args.logger, "Key that was generated does not match the existing key."); + return Err(DecodeError::InvalidValue); + } + } + + let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); + let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); let channel_manager = ChannelManager { genesis_hash, fee_estimator: args.fee_estimator, @@ -5506,11 +6604,12 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> claimable_htlcs, pending_msg_events: Vec::new(), }), + inbound_payment_key: expanded_inbound_key, pending_inbound_payments: Mutex::new(pending_inbound_payments), pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), our_network_key: args.keys_manager.get_node_secret(), - our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()), + our_network_pubkey, secp_ctx, last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize), @@ -5544,13 +6643,15 @@ mod tests { use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; use core::time::Duration; + use core::sync::atomic::Ordering; use ln::{PaymentPreimage, PaymentHash, PaymentSecret}; - use ln::channelmanager::{MppId, PaymentSendFailure}; - use ln::features::{InitFeatures, InvoiceFeatures}; + use ln::channelmanager::{PaymentId, PaymentSendFailure}; + use ln::channelmanager::inbound_payment; + use ln::features::InitFeatures; use ln::functional_test_utils::*; use ln::msgs; use ln::msgs::ChannelMessageHandler; - use routing::router::{get_keysend_route, get_route}; + use routing::router::{Payee, RouteParameters, find_route}; use util::errors::APIError; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; use util::test_utils; @@ -5560,7 +6661,7 @@ mod tests { fn test_wait_timeout() { use ln::channelmanager::PersistenceNotifier; use sync::Arc; - use core::sync::atomic::{AtomicBool, Ordering}; + use core::sync::atomic::AtomicBool; use std::thread; let persistence_notifier = Arc::new(PersistenceNotifier::new()); @@ -5690,17 +6791,14 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // First, send a partial MPP payment. - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); - let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]); - let mpp_id = MppId([42; 32]); + let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000); + let payment_id = PaymentId([42; 32]); // Use the utility function send_payment_along_path to send the payment with MPP data which // indicates there are more HTLCs coming. let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. - nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap(); + nodes[0].node.send_payment_along_path(&route.paths[0], &route.payee, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5730,7 +6828,7 @@ mod tests { expect_payment_failed!(nodes[0], our_payment_hash, true); // Send the second half of the original MPP payment. - nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap(); + nodes[0].node.send_payment_along_path(&route.paths[0], &route.payee, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5768,12 +6866,31 @@ mod tests { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa); check_added_monitors!(nodes[0], 1); - // Note that successful MPP payments will generate 1 event upon the first path's success. No - // further events will be generated for subsequence path successes. + // Note that successful MPP payments will generate a single PaymentSent event upon the first + // path's success and a PaymentPathSuccessful event for each path's success. let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 3); match events[0] { - Event::PaymentSent { payment_preimage: ref preimage } => { + Event::PaymentSent { payment_id: ref id, payment_preimage: ref preimage, payment_hash: ref hash, .. } => { + assert_eq!(Some(payment_id), *id); assert_eq!(payment_preimage, *preimage); + assert_eq!(our_payment_hash, *hash); + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => { + assert_eq!(payment_id, *actual_payment_id); + assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap()); + assert_eq!(route.paths[0], *path); + }, + _ => panic!("Unexpected event"), + } + match events[2] { + Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => { + assert_eq!(payment_id, *actual_payment_id); + assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap()); + assert_eq!(route.paths[0], *path); }, _ => panic!("Unexpected event"), } @@ -5790,14 +6907,22 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); + let scorer = test_utils::TestScorer::with_fixed_penalty(0); // To start (1), send a regular payment but don't claim it. let expected_route = [&nodes[1]]; let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000); // Next, attempt a keysend payment and make sure it fails. - let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let params = RouteParameters { + payee: Payee::for_keysend(expected_route.last().unwrap().node.get_our_node_id()), + final_value_msat: 100_000, + final_cltv_expiry_delta: TEST_FINAL_CLTV, + }; + let route = find_route( + &nodes[0].node.get_our_node_id(), ¶ms, nodes[0].network_graph, None, + nodes[0].logger, &scorer + ).unwrap(); nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -5825,8 +6950,11 @@ mod tests { // To start (2), send a keysend payment but don't claim it. let payment_preimage = PaymentPreimage([42; 32]); - let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); - let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + let route = find_route( + &nodes[0].node.get_our_node_id(), ¶ms, nodes[0].network_graph, None, + nodes[0].logger, &scorer + ).unwrap(); + let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5877,15 +7005,22 @@ mod tests { nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() }); let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); - let network_graph = &nodes[0].net_graph_msg_handler.network_graph; + let params = RouteParameters { + payee: Payee::for_keysend(payee_pubkey), + final_value_msat: 10000, + final_cltv_expiry_delta: 40, + }; + let network_graph = nodes[0].network_graph; let first_hops = nodes[0].node.list_usable_channels(); - let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey, - Some(&first_hops.iter().collect::>()), &vec![], 10000, 40, - nodes[0].logger).unwrap(); + let scorer = test_utils::TestScorer::with_fixed_penalty(0); + let route = find_route( + &payer_pubkey, ¶ms, network_graph, Some(&first_hops.iter().collect::>()), + nodes[0].logger, &scorer + ).unwrap(); let test_preimage = PaymentPreimage([42; 32]); let mismatch_payment_hash = PaymentHash([43; 32]); - let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage)).unwrap(); + let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), None, None).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -5913,16 +7048,23 @@ mod tests { nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() }); let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); - let network_graph = &nodes[0].net_graph_msg_handler.network_graph; + let params = RouteParameters { + payee: Payee::for_keysend(payee_pubkey), + final_value_msat: 10000, + final_cltv_expiry_delta: 40, + }; + let network_graph = nodes[0].network_graph; let first_hops = nodes[0].node.list_usable_channels(); - let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey, - Some(&first_hops.iter().collect::>()), &vec![], 10000, 40, - nodes[0].logger).unwrap(); + let scorer = test_utils::TestScorer::with_fixed_penalty(0); + let route = find_route( + &payer_pubkey, ¶ms, network_graph, Some(&first_hops.iter().collect::>()), + nodes[0].logger, &scorer + ).unwrap(); let test_preimage = PaymentPreimage([42; 32]); let test_secret = PaymentSecret([43; 32]); let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner()); - let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage)).unwrap(); + let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), None, None).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -5947,12 +7089,9 @@ mod tests { let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let logger = test_utils::TestLogger::new(); // Marshall an MPP route. - let (_, payment_hash, _) = get_payment_preimage_hash!(&nodes[3]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); route.paths[0][0].pubkey = nodes[1].node.get_our_node_id(); @@ -5968,20 +7107,49 @@ mod tests { _ => panic!("unexpected error") } } + + #[test] + fn bad_inbound_payment_hash() { + // Add coverage for checking that a user-provided payment hash matches the payment secret. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]); + let payment_data = msgs::FinalOnionHopData { + payment_secret, + total_msat: 100_000, + }; + + // Ensure that if the payment hash given to `inbound_payment::verify` differs from the original, + // payment verification fails as expected. + let mut bad_payment_hash = payment_hash.clone(); + bad_payment_hash.0[0] += 1; + match inbound_payment::verify(bad_payment_hash, payment_data.clone(), nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) { + Ok(_) => panic!("Unexpected ok"), + Err(()) => { + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager::inbound_payment".to_string(), "Failing HTLC with user-generated payment_hash".to_string(), 1); + } + } + + // Check that using the original payment hash succeeds. + assert!(inbound_payment::verify(payment_hash, payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok()); + } } #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))] pub mod bench { use chain::Listen; - use chain::chainmonitor::ChainMonitor; - use chain::channelmonitor::Persist; + use chain::chainmonitor::{ChainMonitor, Persist}; use chain::keysinterface::{KeysManager, InMemorySigner}; use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage}; use ln::features::{InitFeatures, InvoiceFeatures}; use ln::functional_test_utils::*; use ln::msgs::{ChannelMessageHandler, Init}; use routing::network_graph::NetworkGraph; - use routing::router::get_route; + use routing::router::{Payee, get_route}; + use routing::scoring::Scorer; use util::test_utils; use util::config::UserConfig; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; @@ -6089,14 +7257,17 @@ pub mod bench { macro_rules! send_payment { ($node_a: expr, $node_b: expr) => { let usable_channels = $node_a.list_usable_channels(); - let route = get_route(&$node_a.get_our_node_id(), &dummy_graph, &$node_b.get_our_node_id(), Some(InvoiceFeatures::known()), - Some(&usable_channels.iter().map(|r| r).collect::>()), &[], 10_000, TEST_FINAL_CLTV, &logger_a).unwrap(); + let payee = Payee::from_node_id($node_b.get_our_node_id()) + .with_features(InvoiceFeatures::known()); + let scorer = Scorer::with_fixed_penalty(0); + let route = get_route(&$node_a.get_our_node_id(), &payee, &dummy_graph, + Some(&usable_channels.iter().map(|r| r).collect::>()), 10_000, TEST_FINAL_CLTV, &logger_a, &scorer).unwrap(); let mut payment_preimage = PaymentPreimage([0; 32]); payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes()); payment_count += 1; let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()); - let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, 0).unwrap(); + let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200).unwrap(); $node_a.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap()); diff --git a/lightning/src/ln/features.rs b/lightning/src/ln/features.rs index 8f251d8478c..dab84132780 100644 --- a/lightning/src/ln/features.rs +++ b/lightning/src/ln/features.rs @@ -22,7 +22,7 @@ //! [BOLT #9]: https://github.com/lightningnetwork/lightning-rfc/blob/master/09-features.md //! [messages]: crate::ln::msgs -use io; +use {io, io_extras}; use prelude::*; use core::{cmp, fmt}; use core::hash::{Hash, Hasher}; @@ -126,6 +126,10 @@ mod sealed { , // Byte 3 , + // Byte 4 + , + // Byte 5 + , ], optional_features: [ // Byte 0 @@ -136,6 +140,10 @@ mod sealed { BasicMPP, // Byte 3 ShutdownAnySegwit, + // Byte 4 + , + // Byte 5 + ChannelType, ], }); define_context!(NodeContext { @@ -167,7 +175,7 @@ mod sealed { // Byte 4 , // Byte 5 - , + ChannelType, // Byte 6 Keysend, ], @@ -194,12 +202,36 @@ mod sealed { BasicMPP, ], }); + // This isn't a "real" feature context, and is only used in the channel_type field in an + // `OpenChannel` message. + define_context!(ChannelTypeContext { + required_features: [ + // Byte 0 + , + // Byte 1 + StaticRemoteKey, + // Byte 2 + , + // Byte 3 + , + ], + optional_features: [ + // Byte 0 + , + // Byte 1 + , + // Byte 2 + , + // Byte 3 + , + ], + }); /// Defines a feature with the given bits for the specified [`Context`]s. The generated trait is /// useful for manipulating feature flags. macro_rules! define_feature { ($odd_bit: expr, $feature: ident, [$($context: ty),+], $doc: expr, $optional_setter: ident, - $required_setter: ident) => { + $required_setter: ident, $supported_getter: ident) => { #[doc = $doc] /// /// See [BOLT #9] for details. @@ -296,6 +328,11 @@ mod sealed { ::set_required_bit(&mut self.flags); self } + + /// Checks if this feature is supported. + pub fn $supported_getter(&self) -> bool { + ::supports_feature(&self.flags) + } } $( @@ -307,41 +344,60 @@ mod sealed { const ASSERT_ODD_BIT_PARITY: usize = (::ODD_BIT % 2) - 1; } )* - + }; + ($odd_bit: expr, $feature: ident, [$($context: ty),+], $doc: expr, $optional_setter: ident, + $required_setter: ident, $supported_getter: ident, $required_getter: ident) => { + define_feature!($odd_bit, $feature, [$($context),+], $doc, $optional_setter, $required_setter, $supported_getter); + impl Features { + /// Checks if this feature is required. + pub fn $required_getter(&self) -> bool { + ::requires_feature(&self.flags) + } + } } } define_feature!(1, DataLossProtect, [InitContext, NodeContext], "Feature flags for `option_data_loss_protect`.", set_data_loss_protect_optional, - set_data_loss_protect_required); + set_data_loss_protect_required, supports_data_loss_protect, requires_data_loss_protect); // NOTE: Per Bolt #9, initial_routing_sync has no even bit. define_feature!(3, InitialRoutingSync, [InitContext], "Feature flags for `initial_routing_sync`.", - set_initial_routing_sync_optional, set_initial_routing_sync_required); + set_initial_routing_sync_optional, set_initial_routing_sync_required, + initial_routing_sync); define_feature!(5, UpfrontShutdownScript, [InitContext, NodeContext], "Feature flags for `option_upfront_shutdown_script`.", set_upfront_shutdown_script_optional, - set_upfront_shutdown_script_required); + set_upfront_shutdown_script_required, supports_upfront_shutdown_script, + requires_upfront_shutdown_script); define_feature!(7, GossipQueries, [InitContext, NodeContext], - "Feature flags for `gossip_queries`.", set_gossip_queries_optional, set_gossip_queries_required); + "Feature flags for `gossip_queries`.", set_gossip_queries_optional, set_gossip_queries_required, + supports_gossip_queries, requires_gossip_queries); define_feature!(9, VariableLengthOnion, [InitContext, NodeContext, InvoiceContext], "Feature flags for `var_onion_optin`.", set_variable_length_onion_optional, - set_variable_length_onion_required); - define_feature!(13, StaticRemoteKey, [InitContext, NodeContext], + set_variable_length_onion_required, supports_variable_length_onion, + requires_variable_length_onion); + define_feature!(13, StaticRemoteKey, [InitContext, NodeContext, ChannelTypeContext], "Feature flags for `option_static_remotekey`.", set_static_remote_key_optional, - set_static_remote_key_required); + set_static_remote_key_required, supports_static_remote_key, requires_static_remote_key); define_feature!(15, PaymentSecret, [InitContext, NodeContext, InvoiceContext], - "Feature flags for `payment_secret`.", set_payment_secret_optional, set_payment_secret_required); + "Feature flags for `payment_secret`.", set_payment_secret_optional, set_payment_secret_required, + supports_payment_secret, requires_payment_secret); define_feature!(17, BasicMPP, [InitContext, NodeContext, InvoiceContext], - "Feature flags for `basic_mpp`.", set_basic_mpp_optional, set_basic_mpp_required); + "Feature flags for `basic_mpp`.", set_basic_mpp_optional, set_basic_mpp_required, + supports_basic_mpp, requires_basic_mpp); define_feature!(27, ShutdownAnySegwit, [InitContext, NodeContext], "Feature flags for `opt_shutdown_anysegwit`.", set_shutdown_any_segwit_optional, - set_shutdown_any_segwit_required); + set_shutdown_any_segwit_required, supports_shutdown_anysegwit, requires_shutdown_anysegwit); + define_feature!(45, ChannelType, [InitContext, NodeContext], + "Feature flags for `option_channel_type`.", set_channel_type_optional, + set_channel_type_required, supports_channel_type, requires_channel_type); define_feature!(55, Keysend, [NodeContext], - "Feature flags for keysend payments.", set_keysend_optional, set_keysend_required); + "Feature flags for keysend payments.", set_keysend_optional, set_keysend_required, + supports_keysend, requires_keysend); #[cfg(test)] define_feature!(123456789, UnknownFeature, [NodeContext, ChannelContext, InvoiceContext], "Feature flags for an unknown feature used in testing.", set_unknown_feature_optional, - set_unknown_feature_required); + set_unknown_feature_required, supports_unknown_test_feature, requires_unknown_test_feature); } /// Tracks the set of features which a node implements, templated by the context in which it @@ -388,6 +444,18 @@ pub type ChannelFeatures = Features; /// Features used within an invoice. pub type InvoiceFeatures = Features; +/// Features used within the channel_type field in an OpenChannel message. +/// +/// A channel is always of some known "type", describing the transaction formats used and the exact +/// semantics of our interaction with our peer. +/// +/// Note that because a channel is a specific type which is proposed by the opener and accepted by +/// the counterparty, only required features are allowed here. +/// +/// This is serialized differently from other feature types - it is not prefixed by a length, and +/// thus must only appear inside a TLV where its length is known in advance. +pub type ChannelTypeFeatures = Features; + impl InitFeatures { /// Writes all features present up to, and including, 13. pub(crate) fn write_up_to_13(&self, w: &mut W) -> Result<(), io::Error> { @@ -432,16 +500,38 @@ impl InvoiceFeatures { /// Getting a route for a keysend payment to a private node requires providing the payee's /// features (since they were not announced in a node announcement). However, keysend payments /// don't have an invoice to pull the payee's features from, so this method is provided for use in - /// [`get_keysend_route`], thus omitting the need for payers to manually construct an - /// `InvoiceFeatures` for [`get_route`]. + /// [`Payee::for_keysend`], thus omitting the need for payers to manually construct an + /// `InvoiceFeatures` for [`find_route`]. /// - /// [`get_keysend_route`]: crate::routing::router::get_keysend_route - /// [`get_route`]: crate::routing::router::get_route + /// [`Payee::for_keysend`]: crate::routing::router::Payee::for_keysend + /// [`find_route`]: crate::routing::router::find_route pub(crate) fn for_keysend() -> InvoiceFeatures { InvoiceFeatures::empty().set_variable_length_onion_optional() } } +impl ChannelTypeFeatures { + /// Constructs the implicit channel type based on the common supported types between us and our + /// counterparty + pub(crate) fn from_counterparty_init(counterparty_init: &InitFeatures) -> Self { + let mut ret = counterparty_init.to_context_internal(); + // ChannelTypeFeatures must only contain required bits, so we OR the required forms of all + // optional bits and then AND out the optional ones. + for byte in ret.flags.iter_mut() { + *byte |= (*byte & 0b10_10_10_10) >> 1; + *byte &= 0b01_01_01_01; + } + ret + } + + /// Constructs a ChannelTypeFeatures with only static_remotekey set + pub(crate) fn only_static_remote_key() -> Self { + let mut ret = Self::empty(); + ::set_required_bit(&mut ret.flags); + ret + } +} + impl ToBase32 for InvoiceFeatures { fn write_base32(&self, writer: &mut W) -> Result<(), ::Err> { // Explanation for the "4": the normal way to round up when dividing is to add the divisor @@ -553,6 +643,25 @@ impl Features { &self.flags } + fn write_be(&self, w: &mut W) -> Result<(), io::Error> { + for f in self.flags.iter().rev() { // Swap back to big-endian + f.write(w)?; + } + Ok(()) + } + + fn from_be_bytes(mut flags: Vec) -> Features { + flags.reverse(); // Swap to little-endian + Self { + flags, + mark: PhantomData, + } + } + + pub(crate) fn supports_any_optional_bits(&self) -> bool { + self.flags.iter().any(|&byte| (byte & 0b10_10_10_10) != 0) + } + /// Returns true if this `Features` object contains unknown feature flags which are set as /// "required". pub fn requires_unknown_bits(&self) -> bool { @@ -585,25 +694,7 @@ impl Features { } } -impl Features { - #[cfg(test)] - pub(crate) fn requires_data_loss_protect(&self) -> bool { - ::requires_feature(&self.flags) - } - #[cfg(test)] - pub(crate) fn supports_data_loss_protect(&self) -> bool { - ::supports_feature(&self.flags) - } -} - impl Features { - #[cfg(test)] - pub(crate) fn requires_upfront_shutdown_script(&self) -> bool { - ::requires_feature(&self.flags) - } - pub(crate) fn supports_upfront_shutdown_script(&self) -> bool { - ::supports_feature(&self.flags) - } #[cfg(test)] pub(crate) fn clear_upfront_shutdown_script(mut self) -> Self { ::clear_bits(&mut self.flags); @@ -613,13 +704,6 @@ impl Features { impl Features { - #[cfg(test)] - pub(crate) fn requires_gossip_queries(&self) -> bool { - ::requires_feature(&self.flags) - } - pub(crate) fn supports_gossip_queries(&self) -> bool { - ::supports_feature(&self.flags) - } #[cfg(test)] pub(crate) fn clear_gossip_queries(mut self) -> Self { ::clear_bits(&mut self.flags); @@ -627,30 +711,7 @@ impl Features { } } -impl Features { - #[cfg(test)] - pub(crate) fn requires_variable_length_onion(&self) -> bool { - ::requires_feature(&self.flags) - } - pub(crate) fn supports_variable_length_onion(&self) -> bool { - ::supports_feature(&self.flags) - } -} - -impl Features { - pub(crate) fn supports_static_remote_key(&self) -> bool { - ::supports_feature(&self.flags) - } - #[cfg(test)] - pub(crate) fn requires_static_remote_key(&self) -> bool { - ::requires_feature(&self.flags) - } -} - impl Features { - pub(crate) fn initial_routing_sync(&self) -> bool { - ::supports_feature(&self.flags) - } // We are no longer setting initial_routing_sync now that gossip_queries // is enabled. This feature is ignored by a peer when gossip_queries has // been negotiated. @@ -660,63 +721,51 @@ impl Features { } } -impl Features { - #[cfg(test)] - pub(crate) fn requires_payment_secret(&self) -> bool { - ::requires_feature(&self.flags) - } - /// Returns whether the `payment_secret` feature is supported. - pub fn supports_payment_secret(&self) -> bool { - ::supports_feature(&self.flags) - } -} - -impl Features { - #[cfg(test)] - pub(crate) fn requires_basic_mpp(&self) -> bool { - ::requires_feature(&self.flags) - } - // We currently never test for this since we don't actually *generate* multipath routes. - pub(crate) fn supports_basic_mpp(&self) -> bool { - ::supports_feature(&self.flags) - } -} - impl Features { - pub(crate) fn supports_shutdown_anysegwit(&self) -> bool { - ::supports_feature(&self.flags) - } #[cfg(test)] pub(crate) fn clear_shutdown_anysegwit(mut self) -> Self { ::clear_bits(&mut self.flags); self } } - -impl Writeable for Features { - fn write(&self, w: &mut W) -> Result<(), io::Error> { - (self.flags.len() as u16).write(w)?; - for f in self.flags.iter().rev() { // Swap back to big-endian - f.write(w)?; +macro_rules! impl_feature_len_prefixed_write { + ($features: ident) => { + impl Writeable for $features { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + (self.flags.len() as u16).write(w)?; + self.write_be(w) + } + } + impl Readable for $features { + fn read(r: &mut R) -> Result { + Ok(Self::from_be_bytes(Vec::::read(r)?)) + } } - Ok(()) } } - -impl Readable for Features { +impl_feature_len_prefixed_write!(InitFeatures); +impl_feature_len_prefixed_write!(ChannelFeatures); +impl_feature_len_prefixed_write!(NodeFeatures); +impl_feature_len_prefixed_write!(InvoiceFeatures); + +// Because ChannelTypeFeatures only appears inside of TLVs, it doesn't have a length prefix when +// serialized. Thus, we can't use `impl_feature_len_prefixed_write`, above, and have to write our +// own serialization. +impl Writeable for ChannelTypeFeatures { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + self.write_be(w) + } +} +impl Readable for ChannelTypeFeatures { fn read(r: &mut R) -> Result { - let mut flags: Vec = Readable::read(r)?; - flags.reverse(); // Swap to little-endian - Ok(Self { - flags, - mark: PhantomData, - }) + let v = io_extras::read_to_end(r)?; + Ok(Self::from_be_bytes(v)) } } #[cfg(test)] mod tests { - use super::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures}; + use super::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, InvoiceFeatures, NodeFeatures}; use bitcoin::bech32::{Base32Len, FromBase32, ToBase32, u5}; #[test] @@ -769,6 +818,11 @@ mod tests { assert!(!NodeFeatures::known().requires_basic_mpp()); assert!(!InvoiceFeatures::known().requires_basic_mpp()); + assert!(InitFeatures::known().supports_channel_type()); + assert!(NodeFeatures::known().supports_channel_type()); + assert!(!InitFeatures::known().requires_channel_type()); + assert!(!NodeFeatures::known().requires_channel_type()); + assert!(InitFeatures::known().supports_shutdown_anysegwit()); assert!(NodeFeatures::known().supports_shutdown_anysegwit()); @@ -807,11 +861,15 @@ mod tests { // - var_onion_optin (req) | static_remote_key (req) | payment_secret(req) // - basic_mpp // - opt_shutdown_anysegwit - assert_eq!(node_features.flags.len(), 4); + // - + // - option_channel_type + assert_eq!(node_features.flags.len(), 6); assert_eq!(node_features.flags[0], 0b00000010); assert_eq!(node_features.flags[1], 0b01010001); assert_eq!(node_features.flags[2], 0b00000010); assert_eq!(node_features.flags[3], 0b00001000); + assert_eq!(node_features.flags[4], 0b00000000); + assert_eq!(node_features.flags[5], 0b00100000); } // Check that cleared flags are kept blank when converting back: @@ -875,4 +933,15 @@ mod tests { let features_deserialized = InvoiceFeatures::from_base32(&features_as_u5s).unwrap(); assert_eq!(features, features_deserialized); } + + #[test] + fn test_channel_type_mapping() { + // If we map an InvoiceFeatures with StaticRemoteKey optional, it should map into a + // required-StaticRemoteKey ChannelTypeFeatures. + let init_features = InitFeatures::empty().set_static_remote_key_optional(); + let converted_features = ChannelTypeFeatures::from_counterparty_init(&init_features); + assert_eq!(converted_features, ChannelTypeFeatures::only_static_remote_key()); + assert!(!converted_features.supports_any_optional_bits()); + assert!(converted_features.requires_static_remote_key()); + } } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index dd1fedd9733..2e8994b5880 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -14,15 +14,15 @@ use chain::{BestBlock, Confirm, Listen, Watch}; use chain::channelmonitor::ChannelMonitor; use chain::transaction::OutPoint; use ln::{PaymentPreimage, PaymentHash, PaymentSecret}; -use ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure}; -use routing::router::{Route, get_route}; +use ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, PaymentId}; use routing::network_graph::{NetGraphMsgHandler, NetworkGraph}; +use routing::router::{Payee, Route, get_route}; use ln::features::{InitFeatures, InvoiceFeatures}; use ln::msgs; use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler}; use util::enforcing_trait_impls::EnforcingSigner; use util::test_utils; -use util::test_utils::TestChainMonitor; +use util::test_utils::{panicking, TestChainMonitor}; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; use util::errors::APIError; use util::config::UserConfig; @@ -33,8 +33,6 @@ use bitcoin::blockdata::constants::genesis_block; use bitcoin::blockdata::transaction::{Transaction, TxOut}; use bitcoin::network::constants::Network; -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; use bitcoin::hash_types::BlockHash; use bitcoin::secp256k1::key::PublicKey; @@ -42,7 +40,7 @@ use bitcoin::secp256k1::key::PublicKey; use io; use prelude::*; use core::cell::RefCell; -use std::rc::Rc; +use alloc::rc::Rc; use sync::{Arc, Mutex}; use core::mem; @@ -189,6 +187,7 @@ pub struct TestChanMonCfg { pub persister: test_utils::TestPersister, pub logger: test_utils::TestLogger, pub keys_manager: test_utils::TestKeysInterface, + pub network_graph: NetworkGraph, } pub struct NodeCfg<'a> { @@ -198,6 +197,7 @@ pub struct NodeCfg<'a> { pub chain_monitor: test_utils::TestChainMonitor<'a>, pub keys_manager: &'a test_utils::TestKeysInterface, pub logger: &'a test_utils::TestLogger, + pub network_graph: &'a NetworkGraph, pub node_seed: [u8; 32], pub features: InitFeatures, } @@ -208,7 +208,8 @@ pub struct Node<'a, 'b: 'a, 'c: 'b> { pub chain_monitor: &'b test_utils::TestChainMonitor<'c>, pub keys_manager: &'b test_utils::TestKeysInterface, pub node: &'a ChannelManager, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>, - pub net_graph_msg_handler: NetGraphMsgHandler<&'c test_utils::TestChainSource, &'c test_utils::TestLogger>, + pub network_graph: &'c NetworkGraph, + pub net_graph_msg_handler: NetGraphMsgHandler<&'c NetworkGraph, &'c test_utils::TestChainSource, &'c test_utils::TestLogger>, pub node_seed: [u8; 32], pub network_payment_count: Rc>, pub network_chan_count: Rc>, @@ -230,7 +231,7 @@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> { impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { fn drop(&mut self) { - if !::std::thread::panicking() { + if !panicking() { // Check that we processed all pending events assert!(self.node.get_and_clear_pending_msg_events().is_empty()); assert!(self.node.get_and_clear_pending_events().is_empty()); @@ -239,12 +240,11 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { // Check that if we serialize the Router, we can deserialize it again. { let mut w = test_utils::TestVecWriter(Vec::new()); - let network_graph_ser = &self.net_graph_msg_handler.network_graph; - network_graph_ser.write(&mut w).unwrap(); + self.network_graph.write(&mut w).unwrap(); let network_graph_deser = ::read(&mut io::Cursor::new(&w.0)).unwrap(); - assert!(network_graph_deser == self.net_graph_msg_handler.network_graph); + assert!(network_graph_deser == *self.network_graph); let net_graph_msg_handler = NetGraphMsgHandler::new( - network_graph_deser, Some(self.chain_source), self.logger + &network_graph_deser, Some(self.chain_source), self.logger ); let mut chan_progress = 0; loop { @@ -274,10 +274,9 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let feeest = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; let mut deserialized_monitors = Vec::new(); { - let old_monitors = self.chain_monitor.chain_monitor.monitors.read().unwrap(); - for (_, old_monitor) in old_monitors.iter() { + for outpoint in self.chain_monitor.chain_monitor.list_monitors() { let mut w = test_utils::TestVecWriter(Vec::new()); - old_monitor.write(&mut w).unwrap(); + self.chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut w).unwrap(); let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), self.keys_manager).unwrap(); deserialized_monitors.push(deserialized_monitor); @@ -336,9 +335,12 @@ pub fn create_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, (announcement, as_update, bs_update, channel_id, tx) } +#[macro_export] +/// Gets an RAA and CS which were sent in response to a commitment update macro_rules! get_revoke_commit_msgs { ($node: expr, $node_id: expr) => { { + use $crate::util::events::MessageSendEvent; let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); (match events[0] { @@ -399,14 +401,15 @@ macro_rules! get_event { } } -#[cfg(test)] +#[macro_export] +/// Gets an UpdateHTLCs MessageSendEvent macro_rules! get_htlc_update_msgs { ($node: expr, $node_id: expr) => { { let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + $crate::util::events::MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { assert_eq!(*node_id, $node_id); (*updates).clone() }, @@ -437,20 +440,46 @@ macro_rules! get_feerate { } } -/// Returns any local commitment transactions for the channel. +#[cfg(test)] +macro_rules! get_opt_anchors { + ($node: expr, $channel_id: expr) => { + { + let mut lock; + let chan = get_channel_ref!($node, lock, $channel_id); + chan.opt_anchors() + } + } +} + +/// Returns a channel monitor given a channel id, making some naive assumptions #[macro_export] -macro_rules! get_local_commitment_txn { +macro_rules! get_monitor { ($node: expr, $channel_id: expr) => { { - let monitors = $node.chain_monitor.chain_monitor.monitors.read().unwrap(); - let mut commitment_txn = None; - for (funding_txo, monitor) in monitors.iter() { - if funding_txo.to_channel_id() == $channel_id { - commitment_txn = Some(monitor.unsafe_get_latest_holder_commitment_txn(&$node.logger)); + use bitcoin::hashes::Hash; + let mut monitor = None; + // Assume funding vout is either 0 or 1 blindly + for index in 0..2 { + if let Ok(mon) = $node.chain_monitor.chain_monitor.get_monitor( + $crate::chain::transaction::OutPoint { + txid: bitcoin::Txid::from_slice(&$channel_id[..]).unwrap(), index + }) + { + monitor = Some(mon); break; } } - commitment_txn.unwrap() + monitor.unwrap() + } + } +} + +/// Returns any local commitment transactions for the channel. +#[macro_export] +macro_rules! get_local_commitment_txn { + ($node: expr, $channel_id: expr) => { + { + $crate::get_monitor!($node, $channel_id).unsafe_get_latest_holder_commitment_txn(&$node.logger) } } } @@ -467,9 +496,9 @@ macro_rules! unwrap_send_err { _ => panic!(), } }, - &Err(PaymentSendFailure::PartialFailure(ref fails)) if !$all_failed => { - assert_eq!(fails.len(), 1); - match fails[0] { + &Err(PaymentSendFailure::PartialFailure { ref results, .. }) if !$all_failed => { + assert_eq!(results.len(), 1); + match results[0] { Err($type) => { $check }, _ => panic!(), } @@ -510,18 +539,16 @@ pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_ _ => panic!("Unexpected event"), } } - -pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> Transaction { - node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None).unwrap(); - node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())); - node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())); - +pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, expected_temporary_channel_id: [u8; 32]) -> Transaction { let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, channel_value, 42); + assert_eq!(temporary_channel_id, expected_temporary_channel_id); - node_a.node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap(); + assert!(node_a.node.funding_transaction_generated(&temporary_channel_id, tx.clone()).is_ok()); check_added_monitors!(node_a, 0); - node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())); + let funding_created_msg = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()); + assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id); + node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &funding_created_msg); { let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); @@ -544,9 +571,26 @@ pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, ' assert_eq!(node_a.tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); node_a.tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + // Ensure that funding_transaction_generated is idempotent. + assert!(node_a.node.funding_transaction_generated(&temporary_channel_id, tx.clone()).is_err()); + assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(node_a, 0); + tx } +pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> Transaction { + let create_chan_id = node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None).unwrap(); + let open_channel_msg = get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id()); + assert_eq!(open_channel_msg.temporary_channel_id, create_chan_id); + node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &open_channel_msg); + let accept_channel_msg = get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id()); + assert_eq!(accept_channel_msg.temporary_channel_id, create_chan_id); + node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &accept_channel_msg); + + sign_funding_transaction(node_a, node_b, channel_value, create_chan_id) +} + pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c, 'd>(node_recv: &'a Node<'b, 'c, 'c>, node_conf: &'a Node<'b, 'c, 'd>, tx: &Transaction, conf_height: u32) { confirm_transaction_at(node_conf, tx, conf_height); connect_blocks(node_conf, CHAN_CONFIRM_DEPTH - 1); @@ -687,9 +731,18 @@ pub fn update_nodes_with_chan_announce<'a, 'b, 'c, 'd>(nodes: &'a Vec { { + $( + for outp in $spends_txn.output.iter() { + assert!(outp.value >= outp.script_pubkey.dust_value().as_sat(), "Input tx output didn't meet dust limit"); + } + )* + for outp in $tx.output.iter() { + assert!(outp.value >= outp.script_pubkey.dust_value().as_sat(), "Spending tx output didn't meet dust limit"); + } let get_output = |out_point: &bitcoin::blockdata::transaction::OutPoint| { $( if out_point.txid == $spends_txn.txid() { @@ -738,21 +791,40 @@ macro_rules! get_closing_signed_broadcast { } } +#[cfg(test)] +macro_rules! check_warn_msg { + ($node: expr, $recipient_node_id: expr, $chan_id: expr) => {{ + let msg_events = $node.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + match msg_events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendWarningMessage { ref msg, log_level: _ }, node_id } => { + assert_eq!(node_id, $recipient_node_id); + assert_eq!(msg.channel_id, $chan_id); + msg.data.clone() + }, + _ => panic!("Unexpected event"), + } + }} +} + /// Check that a channel's closing channel update has been broadcasted, and optionally /// check whether an error message event has occurred. #[macro_export] macro_rules! check_closed_broadcast { ($node: expr, $with_error_msg: expr) => {{ - let events = $node.node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), if $with_error_msg { 2 } else { 1 }); - match events[0] { + use $crate::util::events::MessageSendEvent; + use $crate::ln::msgs::ErrorAction; + + let msg_events = $node.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), if $with_error_msg { 2 } else { 1 }); + match msg_events[0] { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { assert_eq!(msg.contents.flags & 2, 2); }, _ => panic!("Unexpected event"), } if $with_error_msg { - match events[1] { + match msg_events[1] { MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { // TODO: Check node_id Some(msg.clone()) @@ -763,6 +835,34 @@ macro_rules! check_closed_broadcast { }} } +/// Check that a channel's closing channel events has been issued +#[macro_export] +macro_rules! check_closed_event { + ($node: expr, $events: expr, $reason: expr) => { + check_closed_event!($node, $events, $reason, false); + }; + ($node: expr, $events: expr, $reason: expr, $is_check_discard_funding: expr) => {{ + use $crate::util::events::Event; + + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), $events); + let expected_reason = $reason; + let mut issues_discard_funding = false; + for event in events { + match event { + Event::ChannelClosed { ref reason, .. } => { + assert_eq!(*reason, expected_reason); + }, + Event::DiscardFunding { .. } => { + issues_discard_funding = true; + } + _ => panic!("Unexpected event"), + } + } + assert_eq!($is_check_discard_funding, issues_discard_funding); + }} +} + pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) { let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) }; let (node_b, broadcaster_b, struct_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) } else { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) }; @@ -861,6 +961,9 @@ impl SendEvent { } } +#[macro_export] +/// Performs the "commitment signed dance" - the series of message exchanges which occur after a +/// commitment update. macro_rules! commitment_signed_dance { ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */) => { { @@ -927,7 +1030,7 @@ macro_rules! commitment_signed_dance { { commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true); if $fail_backwards { - expect_pending_htlcs_forwardable!($node_a); + $crate::expect_pending_htlcs_forwardable!($node_a); check_added_monitors!($node_a, 1); let channel_state = $node_a.node.channel_state.lock().unwrap(); @@ -947,27 +1050,46 @@ macro_rules! commitment_signed_dance { macro_rules! get_payment_preimage_hash { ($dest_node: expr) => { { - let payment_preimage = PaymentPreimage([*$dest_node.network_payment_count.borrow(); 32]); - *$dest_node.network_payment_count.borrow_mut() += 1; - let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()); - let payment_secret = $dest_node.node.create_inbound_payment_for_hash(payment_hash, None, 7200, 0).unwrap(); + get_payment_preimage_hash!($dest_node, None) + } + }; + ($dest_node: expr, $min_value_msat: expr) => { + { + use bitcoin::hashes::Hash as _; + let mut payment_count = $dest_node.network_payment_count.borrow_mut(); + let payment_preimage = $crate::ln::PaymentPreimage([*payment_count; 32]); + *payment_count += 1; + let payment_hash = $crate::ln::PaymentHash( + bitcoin::hashes::sha256::Hash::hash(&payment_preimage.0[..]).into_inner()); + let payment_secret = $dest_node.node.create_inbound_payment_for_hash(payment_hash, $min_value_msat, 7200).unwrap(); (payment_preimage, payment_hash, payment_secret) } } } #[cfg(test)] +#[macro_export] macro_rules! get_route_and_payment_hash { ($send_node: expr, $recv_node: expr, $recv_value: expr) => {{ - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!($recv_node); - let net_graph_msg_handler = &$send_node.net_graph_msg_handler; - let route = get_route(&$send_node.node.get_our_node_id(), - &net_graph_msg_handler.network_graph, - &$recv_node.node.get_our_node_id(), None, None, &Vec::new(), $recv_value, TEST_FINAL_CLTV, $send_node.logger).unwrap(); + $crate::get_route_and_payment_hash!($send_node, $recv_node, vec![], $recv_value, TEST_FINAL_CLTV) + }}; + ($send_node: expr, $recv_node: expr, $last_hops: expr, $recv_value: expr, $cltv: expr) => {{ + let (payment_preimage, payment_hash, payment_secret) = $crate::get_payment_preimage_hash!($recv_node, Some($recv_value)); + let payee = $crate::routing::router::Payee::from_node_id($recv_node.node.get_our_node_id()) + .with_features($crate::ln::features::InvoiceFeatures::known()) + .with_route_hints($last_hops); + let scorer = $crate::util::test_utils::TestScorer::with_fixed_penalty(0); + let route = $crate::routing::router::get_route( + &$send_node.node.get_our_node_id(), &payee, $send_node.network_graph, + Some(&$send_node.node.list_usable_channels().iter().collect::>()), + $recv_value, $cltv, $send_node.logger, &scorer + ).unwrap(); (route, payment_hash, payment_preimage, payment_secret) }} } +#[macro_export] +/// Clears (and ignores) a PendingHTLCsForwardable event macro_rules! expect_pending_htlcs_forwardable_ignore { ($node: expr) => {{ let events = $node.node.get_and_clear_pending_events(); @@ -979,10 +1101,32 @@ macro_rules! expect_pending_htlcs_forwardable_ignore { }} } +#[macro_export] +/// Handles a PendingHTLCsForwardable event macro_rules! expect_pending_htlcs_forwardable { ($node: expr) => {{ - expect_pending_htlcs_forwardable_ignore!($node); + $crate::expect_pending_htlcs_forwardable_ignore!($node); $node.node.process_pending_htlc_forwards(); + + // Ensure process_pending_htlc_forwards is idempotent. + $node.node.process_pending_htlc_forwards(); + }} +} + +#[cfg(test)] +macro_rules! expect_pending_htlcs_forwardable_from_events { + ($node: expr, $events: expr, $ignore: expr) => {{ + assert_eq!($events.len(), 1); + match $events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + if $ignore { + $node.node.process_pending_htlc_forwards(); + + // Ensure process_pending_htlc_forwards is idempotent. + $node.node.process_pending_htlc_forwards(); + } }} } @@ -1008,15 +1152,70 @@ macro_rules! expect_payment_received { } } +#[cfg(test)] +#[macro_export] +macro_rules! expect_payment_sent_without_paths { + ($node: expr, $expected_payment_preimage: expr) => { + expect_payment_sent!($node, $expected_payment_preimage, None::, false); + }; + ($node: expr, $expected_payment_preimage: expr, $expected_fee_msat_opt: expr) => { + expect_payment_sent!($node, $expected_payment_preimage, $expected_fee_msat_opt, false); + } +} + +#[macro_export] macro_rules! expect_payment_sent { ($node: expr, $expected_payment_preimage: expr) => { + $crate::expect_payment_sent!($node, $expected_payment_preimage, None::, true); + }; + ($node: expr, $expected_payment_preimage: expr, $expected_fee_msat_opt: expr) => { + $crate::expect_payment_sent!($node, $expected_payment_preimage, $expected_fee_msat_opt, true); + }; + ($node: expr, $expected_payment_preimage: expr, $expected_fee_msat_opt: expr, $expect_paths: expr) => { { + use bitcoin::hashes::Hash as _; let events = $node.node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { ref payment_preimage } => { + let expected_payment_hash = $crate::ln::PaymentHash( + bitcoin::hashes::sha256::Hash::hash(&$expected_payment_preimage.0).into_inner()); + if $expect_paths { + assert!(events.len() > 1); + } else { + assert_eq!(events.len(), 1); + } + let expected_payment_id = match events[0] { + $crate::util::events::Event::PaymentSent { ref payment_id, ref payment_preimage, ref payment_hash, ref fee_paid_msat } => { assert_eq!($expected_payment_preimage, *payment_preimage); + assert_eq!(expected_payment_hash, *payment_hash); + assert!(fee_paid_msat.is_some()); + if $expected_fee_msat_opt.is_some() { + assert_eq!(*fee_paid_msat, $expected_fee_msat_opt); + } + payment_id.unwrap() }, _ => panic!("Unexpected event"), + }; + if $expect_paths { + for i in 1..events.len() { + match events[i] { + $crate::util::events::Event::PaymentPathSuccessful { payment_id, payment_hash, .. } => { + assert_eq!(payment_id, expected_payment_id); + assert_eq!(payment_hash, Some(expected_payment_hash)); + }, + _ => panic!("Unexpected event"), + } + } + } + } } +} + +#[cfg(test)] +#[macro_export] +macro_rules! expect_payment_path_successful { + ($node: expr) => { + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + $crate::util::events::Event::PaymentPathSuccessful { .. } => {}, + _ => panic!("Unexpected event"), } } } @@ -1035,60 +1234,123 @@ macro_rules! expect_payment_forwarded { } } +pub struct PaymentFailedConditions<'a> { + pub(crate) expected_htlc_error_data: Option<(u16, &'a [u8])>, + pub(crate) expected_blamed_scid: Option, + pub(crate) expected_blamed_chan_closed: Option, + pub(crate) expected_mpp_parts_remain: bool, +} + +impl<'a> PaymentFailedConditions<'a> { + pub fn new() -> Self { + Self { + expected_htlc_error_data: None, + expected_blamed_scid: None, + expected_blamed_chan_closed: None, + expected_mpp_parts_remain: false, + } + } + pub fn mpp_parts_remain(mut self) -> Self { + self.expected_mpp_parts_remain = true; + self + } + pub fn blamed_scid(mut self, scid: u64) -> Self { + self.expected_blamed_scid = Some(scid); + self + } + pub fn blamed_chan_closed(mut self, closed: bool) -> Self { + self.expected_blamed_chan_closed = Some(closed); + self + } + pub fn expected_htlc_error_data(mut self, code: u16, data: &'a [u8]) -> Self { + self.expected_htlc_error_data = Some((code, data)); + self + } +} + #[cfg(test)] macro_rules! expect_payment_failed_with_update { ($node: expr, $expected_payment_hash: expr, $rejected_by_dest: expr, $scid: expr, $chan_closed: expr) => { - let events = $node.node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { ref payment_hash, rejected_by_dest, ref network_update, ref error_code, ref error_data, .. } => { - assert_eq!(*payment_hash, $expected_payment_hash, "unexpected payment_hash"); - assert_eq!(rejected_by_dest, $rejected_by_dest, "unexpected rejected_by_dest value"); - assert!(error_code.is_some(), "expected error_code.is_some() = true"); - assert!(error_data.is_some(), "expected error_data.is_some() = true"); - match network_update { - &Some(NetworkUpdate::ChannelUpdateMessage { ref msg }) if !$chan_closed => { - assert_eq!(msg.contents.short_channel_id, $scid); - assert_eq!(msg.contents.flags & 2, 0); - }, - &Some(NetworkUpdate::ChannelClosed { short_channel_id, is_permanent }) if $chan_closed => { - assert_eq!(short_channel_id, $scid); - assert!(is_permanent); - }, - Some(_) => panic!("Unexpected update type"), - None => panic!("Expected update"), - } - }, - _ => panic!("Unexpected event"), - } + expect_payment_failed_conditions!($node, $expected_payment_hash, $rejected_by_dest, + $crate::ln::functional_test_utils::PaymentFailedConditions::new().blamed_scid($scid).blamed_chan_closed($chan_closed)); } } #[cfg(test)] macro_rules! expect_payment_failed { ($node: expr, $expected_payment_hash: expr, $rejected_by_dest: expr $(, $expected_error_code: expr, $expected_error_data: expr)*) => { + #[allow(unused_mut)] + let mut conditions = $crate::ln::functional_test_utils::PaymentFailedConditions::new(); + $( + conditions = conditions.expected_htlc_error_data($expected_error_code, &$expected_error_data); + )* + expect_payment_failed_conditions!($node, $expected_payment_hash, $rejected_by_dest, conditions); + }; +} + +#[cfg(test)] +macro_rules! expect_payment_failed_conditions { + ($node: expr, $expected_payment_hash: expr, $rejected_by_dest: expr, $conditions: expr) => { let events = $node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { ref payment_hash, rejected_by_dest, network_update: _, ref error_code, ref error_data, .. } => { + let expected_payment_id = match events[0] { + Event::PaymentPathFailed { ref payment_hash, rejected_by_dest, ref error_code, ref error_data, ref path, ref retry, ref payment_id, ref network_update, .. } => { assert_eq!(*payment_hash, $expected_payment_hash, "unexpected payment_hash"); assert_eq!(rejected_by_dest, $rejected_by_dest, "unexpected rejected_by_dest value"); + assert!(retry.is_some(), "expected retry.is_some()"); + assert_eq!(retry.as_ref().unwrap().final_value_msat, path.last().unwrap().fee_msat, "Retry amount should match last hop in path"); + assert_eq!(retry.as_ref().unwrap().payee.pubkey, path.last().unwrap().pubkey, "Retry payee node_id should match last hop in path"); + assert!(error_code.is_some(), "expected error_code.is_some() = true"); assert!(error_data.is_some(), "expected error_data.is_some() = true"); - $( - assert_eq!(error_code.unwrap(), $expected_error_code, "unexpected error code"); - assert_eq!(&error_data.as_ref().unwrap()[..], $expected_error_data, "unexpected error data"); - )* + if let Some((code, data)) = $conditions.expected_htlc_error_data { + assert_eq!(error_code.unwrap(), code, "unexpected error code"); + assert_eq!(&error_data.as_ref().unwrap()[..], data, "unexpected error data"); + } + + if let Some(chan_closed) = $conditions.expected_blamed_chan_closed { + match network_update { + &Some($crate::routing::network_graph::NetworkUpdate::ChannelUpdateMessage { ref msg }) if !chan_closed => { + if let Some(scid) = $conditions.expected_blamed_scid { + assert_eq!(msg.contents.short_channel_id, scid); + } + assert_eq!(msg.contents.flags & 2, 0); + }, + &Some($crate::routing::network_graph::NetworkUpdate::ChannelClosed { short_channel_id, is_permanent }) if chan_closed => { + if let Some(scid) = $conditions.expected_blamed_scid { + assert_eq!(short_channel_id, scid); + } + assert!(is_permanent); + }, + Some(_) => panic!("Unexpected update type"), + None => panic!("Expected update"), + } + } + + payment_id.unwrap() }, _ => panic!("Unexpected event"), + }; + if !$conditions.expected_mpp_parts_remain { + $node.node.abandon_payment(expected_payment_id); + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { ref payment_hash, ref payment_id } => { + assert_eq!(*payment_hash, $expected_payment_hash, "unexpected second payment_hash"); + assert_eq!(*payment_id, expected_payment_id); + } + _ => panic!("Unexpected second event"), + } } } } -pub fn send_along_route_with_secret<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, route: Route, expected_paths: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret) { - origin_node.node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); +pub fn send_along_route_with_secret<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, route: Route, expected_paths: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret) -> PaymentId { + let payment_id = origin_node.node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(origin_node, expected_paths.len()); pass_along_route(origin_node, expected_paths, recv_value, our_payment_hash, our_payment_secret); + payment_id } pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option, ev: MessageSendEvent, payment_received_expected: bool, expected_preimage: Option) { @@ -1151,19 +1413,21 @@ pub fn pass_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_rou } } -pub fn send_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, route: Route, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) { +pub fn send_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, route: Route, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret, PaymentId) { let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(expected_route.last().unwrap()); - send_along_route_with_secret(origin_node, route, &[expected_route], recv_value, our_payment_hash, our_payment_secret); - (our_payment_preimage, our_payment_hash, our_payment_secret) + let payment_id = send_along_route_with_secret(origin_node, route, &[expected_route], recv_value, our_payment_hash, our_payment_secret); + (our_payment_preimage, our_payment_hash, our_payment_secret, payment_id) } -pub fn claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_preimage: PaymentPreimage) { +pub fn do_claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_preimage: PaymentPreimage) -> u64 { for path in expected_paths.iter() { assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id()); } assert!(expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage)); check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len()); + let mut expected_total_fee_msat = 0; + macro_rules! msgs_from_ev { ($ev: expr) => { match $ev { @@ -1206,6 +1470,7 @@ pub fn claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, exp $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); let fee = $node.node.channel_state.lock().unwrap().by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap().config.forwarding_fee_base_msat; expect_payment_forwarded!($node, Some(fee as u64), false); + expected_total_fee_msat += fee as u64; check_added_monitors!($node, 1); let new_next_msgs = if $new_msgs { let events = $node.node.get_and_clear_pending_msg_events(); @@ -1244,8 +1509,18 @@ pub fn claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, exp last_update_fulfill_dance!(origin_node, expected_route.first().unwrap()); } } + + // Ensure that claim_funds is idempotent. + assert!(!expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage)); + assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(expected_paths[0].last().unwrap(), 0); + + expected_total_fee_msat +} +pub fn claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_preimage: PaymentPreimage) { + let expected_total_fee_msat = do_claim_payment_along_route(origin_node, expected_paths, skip_last, our_payment_preimage); if !skip_last { - expect_payment_sent!(origin_node, our_payment_preimage); + expect_payment_sent!(origin_node, our_payment_preimage, Some(expected_total_fee_msat)); } } @@ -1256,23 +1531,28 @@ pub fn claim_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: pub const TEST_FINAL_CLTV: u32 = 70; pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) { - let net_graph_msg_handler = &origin_node.net_graph_msg_handler; - let route = get_route(&origin_node.node.get_our_node_id(), &net_graph_msg_handler.network_graph, - &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), - Some(&origin_node.node.list_usable_channels().iter().collect::>()), &[], - recv_value, TEST_FINAL_CLTV, origin_node.logger).unwrap(); + let payee = Payee::from_node_id(expected_route.last().unwrap().node.get_our_node_id()) + .with_features(InvoiceFeatures::known()); + let scorer = test_utils::TestScorer::with_fixed_penalty(0); + let route = get_route( + &origin_node.node.get_our_node_id(), &payee, &origin_node.network_graph, + Some(&origin_node.node.list_usable_channels().iter().collect::>()), + recv_value, TEST_FINAL_CLTV, origin_node.logger, &scorer).unwrap(); assert_eq!(route.paths.len(), 1); assert_eq!(route.paths[0].len(), expected_route.len()); for (node, hop) in expected_route.iter().zip(route.paths[0].iter()) { assert_eq!(hop.pubkey, node.node.get_our_node_id()); } - send_along_route(origin_node, route, expected_route, recv_value) + let res = send_along_route(origin_node, route, expected_route, recv_value); + (res.0, res.1, res.2) } pub fn route_over_limit<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) { - let net_graph_msg_handler = &origin_node.net_graph_msg_handler; - let route = get_route(&origin_node.node.get_our_node_id(), &net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), recv_value, TEST_FINAL_CLTV, origin_node.logger).unwrap(); + let payee = Payee::from_node_id(expected_route.last().unwrap().node.get_our_node_id()) + .with_features(InvoiceFeatures::known()); + let scorer = test_utils::TestScorer::with_fixed_penalty(0); + let route = get_route(&origin_node.node.get_our_node_id(), &payee, origin_node.network_graph, None, recv_value, TEST_FINAL_CLTV, origin_node.logger, &scorer).unwrap(); assert_eq!(route.paths.len(), 1); assert_eq!(route.paths[0].len(), expected_route.len()); for (node, hop) in expected_route.iter().zip(route.paths[0].iter()) { @@ -1366,16 +1646,38 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe commitment_signed_dance!(origin_node, prev_node, next_msgs.as_ref().unwrap().1, false); let events = origin_node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { payment_hash, rejected_by_dest, all_paths_failed, .. } => { + let expected_payment_id = match events[0] { + Event::PaymentPathFailed { payment_hash, rejected_by_dest, all_paths_failed, ref path, ref payment_id, .. } => { assert_eq!(payment_hash, our_payment_hash); assert!(rejected_by_dest); assert_eq!(all_paths_failed, i == expected_paths.len() - 1); + for (idx, hop) in expected_route.iter().enumerate() { + assert_eq!(hop.node.get_our_node_id(), path[idx].pubkey); + } + payment_id.unwrap() }, _ => panic!("Unexpected event"), + }; + if i == expected_paths.len() - 1 { + origin_node.node.abandon_payment(expected_payment_id); + let events = origin_node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { ref payment_hash, ref payment_id } => { + assert_eq!(*payment_hash, our_payment_hash, "unexpected second payment_hash"); + assert_eq!(*payment_id, expected_payment_id); + } + _ => panic!("Unexpected second event"), + } } } } + + // Ensure that fail_htlc_backwards is idempotent. + assert!(!expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash)); + assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_events().is_empty()); + assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(expected_paths[0].last().unwrap(), 0); } pub fn fail_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], our_payment_hash: PaymentHash) { @@ -1395,8 +1697,9 @@ pub fn create_chanmon_cfgs(node_count: usize) -> Vec { let persister = test_utils::TestPersister::new(); let seed = [i as u8; 32]; let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet); + let network_graph = NetworkGraph::new(chain_source.genesis_hash); - chan_mon_cfgs.push(TestChanMonCfg{ tx_broadcaster, fee_estimator, chain_source, logger, persister, keys_manager }); + chan_mon_cfgs.push(TestChanMonCfg{ tx_broadcaster, fee_estimator, chain_source, logger, persister, keys_manager, network_graph }); } chan_mon_cfgs @@ -1417,6 +1720,7 @@ pub fn create_node_cfgs<'a>(node_count: usize, chanmon_cfgs: &'a Vec(node_count: usize, cfgs: &'b Vec(nodes: &Vec(nodes: &Vec>, a: usize, b: usize) { - handle_announce_close_broadcast_events(nodes, a, b, false, "Commitment or closing transaction was confirmed on chain."); + handle_announce_close_broadcast_events(nodes, a, b, false, "Channel closed because commitment or closing transaction was confirmed on chain."); } #[cfg(test)] diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 85eef456c21..0993caf4c1d 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -18,33 +18,30 @@ use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PER use chain::transaction::OutPoint; use chain::keysinterface::BaseSign; use ln::{PaymentPreimage, PaymentSecret, PaymentHash}; -use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC}; -use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, MppId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA}; +use ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; +use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, PAYMENT_EXPIRY_BLOCKS }; use ln::channel::{Channel, ChannelError}; use ln::{chan_utils, onion_utils}; -use ln::chan_utils::HTLC_SUCCESS_TX_WEIGHT; -use routing::router::{Route, RouteHop, RouteHint, RouteHintHop, get_route, get_keysend_route}; -use routing::network_graph::{NetworkUpdate, RoutingFees}; +use ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; +use routing::network_graph::RoutingFees; +use routing::router::{Payee, Route, RouteHop, RouteHint, RouteHintHop, RouteParameters, find_route, get_route}; use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures}; use ln::msgs; use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; use util::enforcing_trait_impls::EnforcingSigner; use util::{byte_utils, test_utils}; -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason}; use util::errors::APIError; use util::ser::{Writeable, ReadableArgs}; use util::config::UserConfig; -use bitcoin::hash_types::{Txid, BlockHash}; +use bitcoin::hash_types::BlockHash; use bitcoin::blockdata::block::{Block, BlockHeader}; use bitcoin::blockdata::script::Builder; use bitcoin::blockdata::opcodes; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; - use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::key::{PublicKey,SecretKey}; @@ -108,8 +105,6 @@ fn test_insane_channel_opens() { insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg }); - insane_open_helper(r"Bogus; channel reserve \(\d+\) is less than dust limit \(\d+\)", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg }); - insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg }); insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg }); @@ -119,6 +114,68 @@ fn test_insane_channel_opens() { insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg }); } +fn do_test_counterparty_no_reserve(send_from_initiator: bool) { + // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure, + // but only for them. Because some LSPs do it with some level of trust of the clients (for a + // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often + // in normal testing, we test it explicitly here. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Have node0 initiate a channel to node1 with aforementioned parameters + let mut push_amt = 100_000_000; + let feerate_per_kw = 253; + let opt_anchors = false; + push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; + push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; + + let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap(); + let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + if !send_from_initiator { + open_channel_message.channel_reserve_satoshis = 0; + open_channel_message.max_htlc_value_in_flight_msat = 100_000_000; + } + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_message); + + // Extract the channel accept message from node1 to node0 + let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + if send_from_initiator { + accept_channel_message.channel_reserve_satoshis = 0; + accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000; + } + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel_message); + { + let mut lock; + let mut chan = get_channel_ref!(if send_from_initiator { &nodes[1] } else { &nodes[0] }, lock, temp_channel_id); + chan.holder_selected_channel_reserve_satoshis = 0; + chan.holder_max_htlc_value_in_flight_msat = 100_000_000; + } + + let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id); + let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx); + create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0); + + // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s + // security model if it ever tries to send funds back to nodes[0] (but that's not our problem). + if send_from_initiator { + send_payment(&nodes[0], &[&nodes[1]], 100_000_000 + // Note that for outbound channels we have to consider the commitment tx fee and the + // "fee spike buffer", which is currently a multiple of the total commitment tx fee as + // well as an additional HTLC. + - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, opt_anchors)); + } else { + send_payment(&nodes[1], &[&nodes[0]], push_amt); + } +} + +#[test] +fn test_counterparty_no_reserve() { + do_test_counterparty_no_reserve(true); + do_test_counterparty_no_reserve(false); +} + #[test] fn test_async_inbound_update_fee() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -126,7 +183,6 @@ fn test_async_inbound_update_fee() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // balancing send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); @@ -168,9 +224,8 @@ fn test_async_inbound_update_fee() { nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[0]); - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - nodes[1].node.send_payment(&get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 40000, TEST_FINAL_CLTV, &logger).unwrap(), our_payment_hash, &Some(our_payment_secret)).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); + nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[1], 1); let payment_event = { @@ -244,7 +299,6 @@ fn test_update_fee_unordered_raa() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // balancing send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); @@ -269,9 +323,8 @@ fn test_update_fee_unordered_raa() { nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[0]); - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - nodes[1].node.send_payment(&get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 40000, TEST_FINAL_CLTV, &logger).unwrap(), our_payment_hash, &Some(our_payment_secret)).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); + nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[1], 1); let payment_event = { @@ -588,11 +641,21 @@ fn test_update_fee_that_funder_cannot_afford() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let channel_value = 1888; - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000, InitFeatures::known(), InitFeatures::known()); + let channel_value = 5000; + let push_sats = 700; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000, InitFeatures::known(), InitFeatures::known()); let channel_id = chan.2; + let secp_ctx = Secp256k1::new(); + let bs_channel_reserve_sats = Channel::::get_holder_selected_channel_reserve_satoshis(channel_value); + + let opt_anchors = false; - let feerate = 260; + // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee + // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we + // calculate two different feerates here - the expected local limit as well as the expected + // remote limit. + let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(opt_anchors) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32; + let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(opt_anchors)) as u32; { let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); *feerate_lock = feerate; @@ -605,39 +668,92 @@ fn test_update_fee_that_funder_cannot_afford() { commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false); - //Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate of 260 set above. - //This value results in a fee that is exactly what the funder can afford (277 sat + 1000 sat channel reserve) + // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above. { let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone(); - //We made sure neither party's funds are below the dust limit so -2 non-HTLC txns from number of outputs - let num_htlcs = commitment_tx.output.len() - 2; - let total_fee: u64 = feerate as u64 * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000; + //We made sure neither party's funds are below the dust limit and there are no HTLCs here + assert_eq!(commitment_tx.output.len(), 2); + let total_fee: u64 = commit_tx_fee_msat(feerate, 0, opt_anchors) / 1000; let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value); actual_fee = channel_value - actual_fee; assert_eq!(total_fee, actual_fee); } - //Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially - //fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down) { + // Increment the feerate by a small constant, accounting for rounding errors let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = feerate + 2; + *feerate_lock += 4; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 4), 1); + check_added_monitors!(nodes[0], 0); + + const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; + + // Get the EnforcingSigner for each channel, which will be used to (1) get the keys + // needed to sign the new commitment tx and (2) sign the new commitment tx. + let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = { + let chan_lock = nodes[0].node.channel_state.lock().unwrap(); + let local_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let chan_signer = local_chan.get_signer(); + let pubkeys = chan_signer.pubkeys(); + (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, + pubkeys.funding_pubkey) + }; + let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = { + let chan_lock = nodes[1].node.channel_state.lock().unwrap(); + let remote_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let chan_signer = remote_chan.get_signer(); + let pubkeys = chan_signer.pubkeys(); + (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, + chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), + pubkeys.funding_pubkey) + }; + + // Assemble the set of keys we can use for signatures for our commitment_signed message. + let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint, + &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap(); + + let res = { + let local_chan_lock = nodes[0].node.channel_state.lock().unwrap(); + let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap(); + let local_chan_signer = local_chan.get_signer(); + let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![]; + let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( + INITIAL_COMMITMENT_NUMBER - 1, + push_sats, + channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, opt_anchors) / 1000, + opt_anchors, local_funding, remote_funding, + commit_tx_keys.clone(), + non_buffer_feerate + 4, + &mut htlcs, + &local_chan.channel_transaction_parameters.as_counterparty_broadcastable() + ); + local_chan_signer.sign_counterparty_commitment(&commitment_tx, &secp_ctx).unwrap() + }; + + let commit_signed_msg = msgs::CommitmentSigned { + channel_id: chan.2, + signature: res.0, + htlc_signatures: res.1 + }; - let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_fee = msgs::UpdateFee { + channel_id: chan.2, + feerate_per_kw: non_buffer_feerate + 4, + }; - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap()); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee); //While producing the commitment_signed response after handling a received update_fee request the //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) //Should produce and error. - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }); } #[test] @@ -647,7 +763,6 @@ fn test_update_fee_with_fundee_update_add_htlc() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // balancing send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); @@ -672,9 +787,7 @@ fn test_update_fee_with_fundee_update_add_htlc() { let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); - let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[0]); - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 800000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000); // nothing happens since node[1] is in AwaitingRemoteRevoke nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); @@ -738,6 +851,8 @@ fn test_update_fee_with_fundee_update_add_htlc() { send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000); send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); } #[test] @@ -850,6 +965,8 @@ fn test_update_fee() { assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30); assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); } #[test] @@ -920,7 +1037,7 @@ fn fake_network_test() { }); hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; - let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![hops] }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0; + let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![hops], payee: None }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0; let mut hops = Vec::with_capacity(3); hops.push(RouteHop { @@ -949,7 +1066,7 @@ fn fake_network_test() { }); hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; - let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![hops] }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1; + let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![hops], payee: None }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1; // Claim the rebalances... fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2); @@ -977,10 +1094,20 @@ fn fake_network_test() { // Close down the channels... close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure); close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true); + check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure); close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure); close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure); } #[test] @@ -994,13 +1121,10 @@ fn holding_cell_htlc_counting() { let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); let mut payments = Vec::new(); for _ in 0..::ln::channel::OUR_MAX_HTLCS { - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); payments.push((payment_preimage, payment_hash)); } @@ -1014,10 +1138,8 @@ fn holding_cell_htlc_counting() { // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in // the holding cell waiting on B's RAA to send. At this point we should not be able to add // another HTLC. - let (_, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[2]); + let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); { - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap(); unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), true, APIError::ChannelUnavailable { ref err }, assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err))); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1025,10 +1147,8 @@ fn holding_cell_htlc_counting() { } // This should also be true if we try to forward a payment. - let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[2]); + let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000); { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1144,16 +1264,14 @@ fn test_duplicate_htlc_different_direction_onchain() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // balancing send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000); - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 800_000, TEST_FINAL_CLTV, &logger).unwrap(); - let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, 0).unwrap(); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000); + let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200).unwrap(); send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret); // Provide preimage to node 0 by claiming payment @@ -1176,6 +1294,7 @@ fn test_duplicate_htlc_different_direction_onchain() { mine_transaction(&nodes[0], &remote_txn[0]); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires // Check we only broadcast 1 timeout tx @@ -1207,7 +1326,7 @@ fn test_duplicate_htlc_different_direction_onchain() { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()); - assert_eq!(msg.data, "Commitment or closing transaction was confirmed on chain."); + assert_eq!(msg.data, "Channel closed because commitment or closing transaction was confirmed on chain."); }, MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { assert!(update_add_htlcs.is_empty()); @@ -1228,17 +1347,14 @@ fn test_basic_channel_reserve() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); let chan_stat = get_channel_value_stat!(nodes[0], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; // The 2* and +1 are for the fee spike reserve. - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], chan.2), 1 + 1); + let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], chan.2), 1 + 1, get_opt_anchors!(nodes[0], chan.2)); let max_can_send = 5000000 - channel_reserve - commit_tx_fee; - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), max_can_send + 1, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send + 1); let err = nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).err().unwrap(); match err { PaymentSendFailure::AllFailedRetrySafe(ref fails) => { @@ -1344,7 +1460,7 @@ fn test_fee_spike_violation_fails_htlc() { commitment_number, 95000, local_chan_balance, - false, local_funding, remote_funding, + local_chan.opt_anchors(), local_funding, remote_funding, commit_tx_keys.clone(), feerate_per_kw, &mut vec![(accepted_htlc_info, ())], @@ -1394,21 +1510,23 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { // sending any above-dust amount would result in a channel reserve violation. // In this test we check that we would be prevented from sending an HTLC in // this situation. - let feerate_per_kw = 253; - chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) }; - chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) }; + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let opt_anchors = false; + let mut push_amt = 100_000_000; - push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known()); // Sending exactly enough to hit the reserve amount should be accepted - let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } // However one more HTLC should be significantly over the reserve amount and fail. let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000); @@ -1421,30 +1539,38 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { #[test] fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let mut chanmon_cfgs = create_chanmon_cfgs(2); - // Set the fee rate for the channel very high, to the point where the funder - // receiving 1 update_add_htlc would result in them closing the channel due - // to channel reserve violation. This close could also happen if the fee went - // up a more realistic amount, but many HTLCs were outstanding at the time of - // the update_add_htlc. - chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) }; - chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) }; + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000); + let opt_anchors = false; + + // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a + // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment + // transaction fee with 0 HTLCs (183 sats)). + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); + push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known()); + + // Send four HTLCs to cover the initial push_msat buffer we're required to include + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } + + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 700_000); // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() let secp_ctx = Secp256k1::new(); let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1; let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap(); - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 1000, &Some(payment_secret), cur_height, &None).unwrap(); + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 700_000, &Some(payment_secret), cur_height, &None).unwrap(); let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash); let msg = msgs::UpdateAddHTLC { channel_id: chan.2, - htlc_id: 1, - amount_msat: htlc_msat + 1, + htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64, + amount_msat: htlc_msat, payment_hash: payment_hash, cltv_expiry: htlc_cltv, onion_routing_packet: onion_packet, @@ -1457,6 +1583,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() }); } #[test] @@ -1464,35 +1591,77 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { // Test that if we receive many dust HTLCs over an outbound channel, they don't count when // calculating our commitment transaction fee (this was previously broken). let mut chanmon_cfgs = create_chanmon_cfgs(2); - let feerate_per_kw = 253; - chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) }; - chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) }; + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let opt_anchors = false; + // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment // transaction fee with 0 HTLCs (183 sats)). let mut push_amt = 100_000_000; - push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT) / 1000 * 1000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt, InitFeatures::known(), InitFeatures::known()); - let dust_amt = crate::ln::channel::MIN_DUST_LIMIT_SATOSHIS * 1000 - + feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000 * 1000 - 1; + let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 + + feerate_per_kw as u64 * htlc_success_tx_weight(opt_anchors) / 1000 * 1000 - 1; // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the // commitment transaction fee. let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt); + // Send four HTLCs to cover the initial push_msat buffer we're required to include + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } + // One more than the dust amt should fail, however. let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt + 1); unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value")); } +#[test] +fn test_chan_init_feerate_unaffordability() { + // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to + // channel reserve and feerate requirements. + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let opt_anchors = false; + + // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single + // HTLC. + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); + assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None).unwrap_err(), + APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() }); + + // During open, we don't have a "counterparty channel reserve" to check against, so that + // requirement only comes into play on the open_channel handling side. + push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap(); + let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + open_channel_msg.push_msat += 1; + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_msg); + + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + match msg_events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { + assert_eq!(msg.data, "Insufficient funding amount for initial reserve"); + }, + _ => panic!("Unexpected event"), + } +} + #[test] fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { // Test that if we receive many dust HTLCs over an inbound channel, they don't count when @@ -1536,9 +1705,10 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; let chan_stat = get_channel_value_stat!(nodes[0], chan.2); let feerate = get_feerate!(nodes[0], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], chan.2); // Add a 2* and +1 for the fee spike reserve. - let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1); + let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors); let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2; let amt_msat_1 = recv_value_1 + total_routing_fee_msat; @@ -1555,7 +1725,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]); // Attempt to trigger a channel reserve violation --> payment failure. - let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2); + let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, opt_anchors); let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1; let amt_msat_2 = recv_value_2 + total_routing_fee_msat; let (route_2, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_2); @@ -1583,6 +1753,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() }); } #[test] @@ -1605,8 +1776,8 @@ fn test_inbound_outbound_capacity_is_not_zero() { assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000); } -fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64) -> u64 { - (COMMITMENT_TX_BASE_WEIGHT + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000 +fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, opt_anchors: bool) -> u64 { + (commitment_tx_base_weight(opt_anchors) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000 } #[test] @@ -1641,6 +1812,7 @@ fn test_channel_reserve_holding_cell_htlcs() { let feemsat = 239; // set above let total_fee_msat = (nodes.len() - 2) as u64 * feemsat; let feerate = get_feerate!(nodes[0], chan_1.2); + let opt_anchors = get_opt_anchors!(nodes[0], chan_1.2); let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat; @@ -1662,7 +1834,7 @@ fn test_channel_reserve_holding_cell_htlcs() { // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve. // Also, ensure that each payment has enough to be over the dust limit to // ensure it'll be included in each commit tx fee calculation. - let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1); + let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors); let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000); if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat { break; @@ -1694,7 +1866,7 @@ fn test_channel_reserve_holding_cell_htlcs() { // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee // policy. - let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1); + let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors); let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2; let amt_msat_1 = recv_value_1 + total_fee_msat; @@ -1719,7 +1891,7 @@ fn test_channel_reserve_holding_cell_htlcs() { } // split the rest to test holding cell - let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1); + let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors); let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs; let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2; let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat; @@ -1830,11 +2002,11 @@ fn test_channel_reserve_holding_cell_htlcs() { claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21); claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22); - let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1); + let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, opt_anchors); let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat; send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3); - let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1); + let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat); let stat0 = get_channel_value_stat!(nodes[0], chan_1.2); assert_eq!(stat0.value_to_self_msat, expected_value_to_self); @@ -1873,7 +2045,6 @@ fn channel_reserve_in_flight_removes() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); let b_chan_values = get_channel_value_stat!(nodes[1], chan_1.2); // Route the first two HTLCs. @@ -1881,10 +2052,8 @@ fn channel_reserve_in_flight_removes() { let (payment_preimage_2, _, _) = route_payment(&nodes[0], &[&nodes[1]], 20000); // Start routing the third HTLC (this is just used to get everyone in the right state). - let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]); + let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); let send_1 = { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1908,7 +2077,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent!(nodes[0], payment_preimage_1); + expect_payment_sent_without_paths!(nodes[0], payment_preimage_1); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg); @@ -1937,7 +2106,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent!(nodes[0], payment_preimage_2); + expect_payment_sent_without_paths!(nodes[0], payment_preimage_2); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); @@ -1950,14 +2119,13 @@ fn channel_reserve_in_flight_removes() { // resolve the second HTLC from A's point of view. nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing. - let (payment_preimage_4, payment_hash_4, payment_secret_4) = get_payment_preimage_hash!(nodes[0]); + let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000); let send_2 = { - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 10000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[1].node.send_payment(&route, payment_hash_4, &Some(payment_secret_4)).unwrap(); check_added_monitors!(nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1981,6 +2149,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); @@ -2039,6 +2208,8 @@ fn channel_monitor_network_test() { check_closed_broadcast!(nodes[0], true); assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer); // One pending HTLC is discarded by the force-close: let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0; @@ -2059,6 +2230,8 @@ fn channel_monitor_network_test() { check_closed_broadcast!(nodes[2], true); assert_eq!(nodes[1].node.list_channels().len(), 0); assert_eq!(nodes[2].node.list_channels().len(), 1); + check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); macro_rules! claim_funds { ($node: expr, $prev_node: expr, $preimage: expr) => { @@ -2101,10 +2274,12 @@ fn channel_monitor_network_test() { check_closed_broadcast!(nodes[3], true); assert_eq!(nodes[2].node.list_channels().len(), 0); assert_eq!(nodes[3].node.list_channels().len(), 1); + check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer); + check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed); // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and // confusing us in the following tests. - let chan_3_mon = nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().remove(&OutPoint { txid: chan_3.3.txid(), index: 0 }).unwrap(); + let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 }); // One pending HTLC to time out: let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0; @@ -2171,7 +2346,9 @@ fn channel_monitor_network_test() { assert_eq!(nodes[3].node.list_channels().len(), 0); assert_eq!(nodes[4].node.list_channels().len(), 0); - nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().insert(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon); + nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon).unwrap(); + check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed); } #[test] @@ -2221,6 +2398,7 @@ fn test_justice_tx() { node_txn.truncate(1); } check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE); mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -2228,6 +2406,7 @@ fn test_justice_tx() { // Verify broadcast of revoked HTLC-timeout let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); // Broadcast revoked HTLC-timeout on node 1 mine_transaction(&nodes[1], &node_txn[1]); test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone()); @@ -2269,9 +2448,11 @@ fn test_justice_tx() { test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE); mine_transaction(&nodes[1], &revoked_local_txn[0]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS); check_added_monitors!(nodes[1], 1); mine_transaction(&nodes[0], &node_txn[1]); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone()); } get_announce_close_broadcast_events(&nodes, 0, 1); @@ -2299,7 +2480,8 @@ fn revoked_output_claim() { // Inform nodes[1] that nodes[0] broadcast a stale tx mine_transaction(&nodes[1], &revoked_local_txn[0]); check_added_monitors!(nodes[1], 1); - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx check_spends!(node_txn[0], revoked_local_txn[0]); @@ -2308,7 +2490,8 @@ fn revoked_output_claim() { // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan mine_transaction(&nodes[0], &revoked_local_txn[0]); get_announce_close_broadcast_events(&nodes, 0, 1); - check_added_monitors!(nodes[0], 1) + check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); } #[test] @@ -2345,8 +2528,10 @@ fn claim_htlc_outputs_shared_tx() { { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); mine_transaction(&nodes[1], &revoked_local_txn[0]); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], payment_hash_2, true); @@ -2403,7 +2588,13 @@ fn claim_htlc_outputs_single_tx() { check_added_monitors!(nodes[0], 1); confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable_ignore!(nodes[0]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + let mut events = nodes[0].node.get_and_clear_pending_events(); + expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true); + match events[1] { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + _ => panic!("Unexpected event"), + } connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], payment_hash_2, true); @@ -2481,8 +2672,8 @@ fn test_htlc_on_chain_success() { send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); - let (our_payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); - let (our_payment_preimage_2, _payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let (our_payment_preimage, payment_hash_1, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let (our_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); // Broadcast legit commitment tx from C on B's chain // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain @@ -2501,6 +2692,7 @@ fn test_htlc_on_chain_success() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx) assert_eq!(node_txn.len(), 5); assert_eq!(node_txn[0], node_txn[3]); @@ -2526,11 +2718,15 @@ fn test_htlc_on_chain_success() { added_monitors.clear(); } let forwarded_events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(forwarded_events.len(), 2); - if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[0] { - } else { panic!(); } + assert_eq!(forwarded_events.len(), 3); + match forwarded_events[0] { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + _ => panic!("Unexpected event"), + } if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] { } else { panic!(); } + if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[2] { + } else { panic!(); } let events = nodes[1].node.get_and_clear_pending_msg_events(); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); @@ -2597,6 +2793,7 @@ fn test_htlc_on_chain_success() { mine_transaction(&nodes[1], &node_a_commitment_tx[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn) let commitment_spend = @@ -2632,18 +2829,21 @@ fn test_htlc_on_chain_success() { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); + assert_eq!(events.len(), 5); let mut first_claimed = false; for event in events { match event { - Event::PaymentSent { payment_preimage } => { - if payment_preimage == our_payment_preimage { + Event::PaymentSent { payment_preimage, payment_hash, .. } => { + if payment_preimage == our_payment_preimage && payment_hash == payment_hash_1 { assert!(!first_claimed); first_claimed = true; } else { assert_eq!(payment_preimage, our_payment_preimage_2); + assert_eq!(payment_hash, payment_hash_2); } }, + Event::PaymentPathSuccessful { .. } => {}, + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, _ => panic!("Unexpected event"), } } @@ -2700,6 +2900,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx) assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan_2.3); @@ -2709,6 +2910,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1); mine_transaction(&nodes[1], &commitment_tx[0]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let timeout_tx; { let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -2776,6 +2978,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 commitment tx, ChannelMonitor : 1 timeout tx assert_eq!(node_txn.len(), 2); check_spends!(node_txn[0], chan_1.3); @@ -2814,6 +3017,7 @@ fn test_simple_commitment_revoked_fail_backward() { let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); mine_transaction(&nodes[1], &revoked_local_txn[0]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); @@ -2931,10 +3135,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting // on nodes[2]'s RAA. - let (_, fourth_payment_hash, fourth_payment_secret) = get_payment_preimage_hash!(nodes[2]); - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let logger = test_utils::TestLogger::new(); - let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000); nodes[1].node.send_payment(&route, fourth_payment_hash, &Some(fourth_payment_secret)).unwrap(); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -2961,17 +3162,28 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use mine_transaction(&nodes[1], &revoked_local_txn[0]); check_added_monitors!(nodes[1], 1); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + assert!(ANTI_REORG_DELAY > PAYMENT_EXPIRY_BLOCKS); // We assume payments will also expire let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 }); + assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 4 }); match events[0] { - Event::PaymentFailed { ref payment_hash, .. } => { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { }, + _ => panic!("Unexepected event"), + } + match events[1] { + Event::PaymentPathFailed { ref payment_hash, .. } => { assert_eq!(*payment_hash, fourth_payment_hash); }, _ => panic!("Unexpected event"), } if !deliver_bs_raa { - match events[1] { + match events[2] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(*payment_hash, fourth_payment_hash); + }, + _ => panic!("Unexpected event"), + } + match events[3] { Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; @@ -2988,7 +3200,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use match events[if deliver_bs_raa { 2 } else { 1 }] { MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => { assert_eq!(channel_id, chan_2.2); - assert_eq!(data.as_str(), "Commitment or closing transaction was confirmed on chain."); + assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain."); }, _ => panic!("Unexpected event"), } @@ -3021,7 +3233,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 3); match events[0] { - Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => { + Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => { assert!(failed_htlcs.insert(payment_hash.0)); // If we delivered B's RAA we got an unknown preimage error, not something // that we should update our routing table for. @@ -3032,14 +3244,14 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => { + Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => { assert!(failed_htlcs.insert(payment_hash.0)); assert!(network_update.is_some()); }, _ => panic!("Unexpected event"), } match events[2] { - Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => { + Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => { assert!(failed_htlcs.insert(payment_hash.0)); assert!(network_update.is_some()); }, @@ -3077,13 +3289,10 @@ fn fail_backward_pending_htlc_upon_channel_failure() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack. { - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 50_000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000); nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -3097,10 +3306,8 @@ fn fail_backward_pending_htlc_upon_channel_failure() { } // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack. - let (_, failed_payment_hash, failed_payment_secret) = get_payment_preimage_hash!(nodes[1]); + let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000); { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 50_000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, failed_payment_hash, &Some(failed_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 0); @@ -3109,13 +3316,11 @@ fn fail_backward_pending_htlc_upon_channel_failure() { // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel. { - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]); + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 50_000); let secp_ctx = Secp256k1::new(); let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); let current_height = nodes[1].node.best_block.read().unwrap().height() + 1; - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 50_000, TEST_FINAL_CLTV, &logger).unwrap(); let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(&route.paths[0], 50_000, &Some(payment_secret), current_height, &None).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap(); let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash); @@ -3131,9 +3336,21 @@ fn fail_backward_pending_htlc_upon_channel_failure() { }; nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc); } - + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); // Check that Alice fails backward the pending HTLC from the second payment. - expect_payment_failed!(nodes[0], failed_payment_hash, true); + match events[0] { + Event::PaymentPathFailed { payment_hash, .. } => { + assert_eq!(payment_hash, failed_payment_hash); + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => { + assert_eq!(err, "Remote side tried to send a 0-msat HTLC"); + }, + _ => panic!("Unexpected event {:?}", events[1]), + } check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); } @@ -3153,6 +3370,7 @@ fn test_htlc_ignore_latest_remote_commitment() { connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 3); @@ -3162,6 +3380,7 @@ fn test_htlc_ignore_latest_remote_commitment() { connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]}); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); // Duplicate the connect_block call since this may happen due to other listeners // registering new transactions @@ -3178,13 +3397,10 @@ fn test_force_close_fail_back() { let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); + let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); let mut payment_event = { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, 42, &logger).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -3216,6 +3432,7 @@ fn test_force_close_fail_back() { nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap(); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed); let tx = { let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't @@ -3230,12 +3447,12 @@ fn test_force_close_fail_back() { // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. { - let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.read().unwrap(); - monitors.get(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap() - .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &node_cfgs[2].fee_estimator, &&logger); + get_monitor!(nodes[2], payment_event.commitment_msg.channel_id) + .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &node_cfgs[2].fee_estimator, &node_cfgs[2].logger); } mine_transaction(&nodes[2], &tx); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -3267,13 +3484,13 @@ fn test_dup_events_on_peer_disconnect() { check_added_monitors!(nodes[1], 1); let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]); - expect_payment_sent!(nodes[0], payment_preimage); + expect_payment_sent_without_paths!(nodes[0], payment_preimage); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); - assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + expect_payment_path_successful!(nodes[0]); } #[test] @@ -3299,7 +3516,7 @@ fn test_simple_peer_disconnect() { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; + let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000); let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; @@ -3313,20 +3530,25 @@ fn test_simple_peer_disconnect() { reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); { let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); + assert_eq!(events.len(), 3); match events[0] { - Event::PaymentSent { payment_preimage } => { + Event::PaymentSent { payment_preimage, payment_hash, .. } => { assert_eq!(payment_preimage, payment_preimage_3); + assert_eq!(payment_hash, payment_hash_3); }, _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => { + Event::PaymentPathFailed { payment_hash, rejected_by_dest, .. } => { assert_eq!(payment_hash, payment_hash_5); assert!(rejected_by_dest); }, _ => panic!("Unexpected event"), } + match events[2] { + Event::PaymentPathSuccessful { .. } => {}, + _ => panic!("Unexpected event"), + } } claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4); @@ -3351,14 +3573,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); } - let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - let logger = test_utils::TestLogger::new(); let payment_event = { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, - &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), Some(&nodes[0].node.list_usable_channels().iter().collect::>()), - &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -3483,8 +3700,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 1); match events_4[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => { assert_eq!(payment_preimage_1, *payment_preimage); + assert_eq!(payment_hash_1, *payment_hash); }, _ => panic!("Unexpected event"), } @@ -3520,14 +3738,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken if messages_delivered < 2 { reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); if messages_delivered < 1 { - let events_4 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_4.len(), 1); - match events_4[0] { - Event::PaymentSent { ref payment_preimage } => { - assert_eq!(payment_preimage_1, *payment_preimage); - }, - _ => panic!("Unexpected event"), - } + expect_payment_sent!(nodes[0], payment_preimage_1); } else { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -3545,15 +3756,20 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } + if messages_delivered == 1 || messages_delivered == 2 { + expect_payment_path_successful!(nodes[0]); + } + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + if messages_delivered > 2 { + expect_payment_path_successful!(nodes[0]); + } + // Channel should still work fine... - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, - &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), Some(&nodes[0].node.list_usable_channels().iter().collect::>()), - &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0; claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } @@ -3591,10 +3807,12 @@ fn test_funding_peer_disconnect() { confirm_transaction(&nodes[0], &tx); let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); + let chan_id; assert_eq!(events_1.len(), 1); match events_1[0] { - MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => { + MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => { assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + chan_id = msg.channel_id; }, _ => panic!("Unexpected event"), } @@ -3656,10 +3874,8 @@ fn test_funding_peer_disconnect() { nodes[0].net_graph_msg_handler.handle_channel_update(&bs_update).unwrap(); nodes[0].net_graph_msg_handler.handle_channel_update(&as_update).unwrap(); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let logger = test_utils::TestLogger::new(); - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); - let (payment_preimage, _, _) = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0; claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); // Check that after deserialization and reconnection we can still generate an identical @@ -3668,7 +3884,7 @@ fn test_funding_peer_disconnect() { let nodes_0_serialized = nodes[0].node.encode(); let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap(); persister = test_utils::TestPersister::new(); let keys_manager = &chanmon_cfgs[0].keys_manager; @@ -3727,14 +3943,11 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); // Now try to send a second payment which will fail to send - let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -3763,8 +3976,9 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => { assert_eq!(*payment_preimage, payment_preimage_1); + assert_eq!(*payment_hash, payment_hash_1); }, _ => panic!("Unexpected event"), } @@ -3865,6 +4079,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } @@ -3877,17 +4092,14 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); let our_payment_hash = if send_partial_mpp { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap(); - let (_, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]); + let (route, our_payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000); // Use the utility function send_payment_along_path to send the payment with MPP data which // indicates there are more HTLCs coming. let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. - let mpp_id = MppId([42; 32]); - nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, mpp_id, &None).unwrap(); + let payment_id = PaymentId([42; 32]); + nodes[0].node.send_payment_along_path(&route.paths[0], &route.payee, &our_payment_hash, &Some(payment_secret), 200000, cur_height, payment_id, &None).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3949,38 +4161,28 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); - let logger = test_utils::TestLogger::new(); - // Route a first payment to get the 1 -> 2 channel in awaiting_raa... - let (_, first_payment_hash, first_payment_secret) = get_payment_preimage_hash!(nodes[2]); + let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); { - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[1].node.send_payment(&route, first_payment_hash, &Some(first_payment_secret)).unwrap(); } assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1); check_added_monitors!(nodes[1], 1); // Now attempt to route a second payment, which should be placed in the holding cell - let (_, second_payment_hash, second_payment_secret) = get_payment_preimage_hash!(nodes[2]); + let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] }; + let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000); + sending_node.node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap(); if forwarded_htlc { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap(); - nodes[0].node.send_payment(&route, second_payment_hash, &Some(first_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - check_added_monitors!(nodes[1], 0); - } else { - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap(); - nodes[1].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap(); - check_added_monitors!(nodes[1], 0); } + check_added_monitors!(nodes[1], 0); - connect_blocks(&nodes[1], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS); + connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); connect_blocks(&nodes[1], 1); @@ -3999,7 +4201,14 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { } expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false); } else { - expect_payment_failed!(nodes[1], second_payment_hash, true); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + if let Event::PaymentPathFailed { ref payment_hash, .. } = events[0] { + assert_eq!(*payment_hash, second_payment_hash); + } else { panic!("Unexpected event"); } + if let Event::PaymentFailed { ref payment_hash, .. } = events[1] { + assert_eq!(*payment_hash, second_payment_hash); + } else { panic!("Unexpected event"); } } } @@ -4027,7 +4236,8 @@ fn test_no_txn_manager_serialize_deserialize() { let nodes_0_serialized = nodes[0].node.encode(); let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()) + .write(&mut chan_0_monitor_serialized).unwrap(); logger = test_utils::TestLogger::new(); fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; @@ -4084,141 +4294,6 @@ fn test_no_txn_manager_serialize_deserialize() { send_payment(&nodes[0], &[&nodes[1]], 1000000); } -#[test] -fn mpp_failure() { - let chanmon_cfgs = create_chanmon_cfgs(4); - let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); - let nodes = create_network(4, &node_cfgs, &node_chanmgrs); - - let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let logger = test_utils::TestLogger::new(); - - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); - let path = route.paths[0].clone(); - route.paths.push(path); - route.paths[0][0].pubkey = nodes[1].node.get_our_node_id(); - route.paths[0][0].short_channel_id = chan_1_id; - route.paths[0][1].short_channel_id = chan_3_id; - route.paths[1][0].pubkey = nodes[2].node.get_our_node_id(); - route.paths[1][0].short_channel_id = chan_2_id; - route.paths[1][1].short_channel_id = chan_4_id; - send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret); - fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash); -} - -#[test] -fn test_dup_htlc_onchain_fails_on_reload() { - // When a Channel is closed, any outbound HTLCs which were relayed through it are simply - // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor - // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when - // the ChannelMonitor tells it to. - // - // If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the - // ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a - // PaymentFailed event appearing). However, because we may not serialize the relevant - // ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this - // consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs - // and de-duplicates ChannelMonitor events. - // - // This tests that explicit tracking behavior. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let persister: test_utils::TestPersister; - let new_chain_monitor: test_utils::TestChainMonitor; - let nodes_0_deserialized: ChannelManager; - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - - // Route a payment, but force-close the channel before the HTLC fulfill message arrives at - // nodes[0]. - let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 10000000); - nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap(); - check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - - // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction - connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(node_txn.len(), 3); - assert_eq!(node_txn[0], node_txn[1]); - - assert!(nodes[1].node.claim_funds(payment_preimage)); - check_added_monitors!(nodes[1], 1); - - let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]}); - check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - - header.prev_blockhash = nodes[0].best_block_hash(); - connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]}); - - // Serialize out the ChannelMonitor before connecting the on-chain claim transactions. This is - // fairly normal behavior as ChannelMonitor(s) are often not re-serialized when on-chain events - // happen, unlike ChannelManager which tends to be re-serialized after any relevant event(s). - let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); - - header.prev_blockhash = nodes[0].best_block_hash(); - let claim_block = Block { header, txdata: claim_txn}; - connect_block(&nodes[0], &claim_block); - expect_payment_sent!(nodes[0], payment_preimage); - - // ChannelManagers generally get re-serialized after any relevant event(s). Since we just - // connected a highly-relevant block, it likely gets serialized out now. - let mut chan_manager_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].node.write(&mut chan_manager_serialized).unwrap(); - - // Now reload nodes[0]... - persister = test_utils::TestPersister::new(); - let keys_manager = &chanmon_cfgs[0].keys_manager; - new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager); - nodes[0].chain_monitor = &new_chain_monitor; - let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; - let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read( - &mut chan_0_monitor_read, keys_manager).unwrap(); - assert!(chan_0_monitor_read.is_empty()); - - let (_, nodes_0_deserialized_tmp) = { - let mut channel_monitors = HashMap::new(); - channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); - <(BlockHash, ChannelManager)> - ::read(&mut io::Cursor::new(&chan_manager_serialized.0[..]), ChannelManagerReadArgs { - default_config: Default::default(), - keys_manager, - fee_estimator: node_cfgs[0].fee_estimator, - chain_monitor: nodes[0].chain_monitor, - tx_broadcaster: nodes[0].tx_broadcaster.clone(), - logger: nodes[0].logger, - channel_monitors, - }).unwrap() - }; - nodes_0_deserialized = nodes_0_deserialized_tmp; - - assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); - check_added_monitors!(nodes[0], 1); - nodes[0].node = &nodes_0_deserialized; - - // Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but - // which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of - // payment events should kick in, leaving us with no pending events here. - let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1; - nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height); - assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); -} - #[test] fn test_manager_serialize_deserialize_events() { // This test makes sure the events field in ChannelManager survives de/serialization @@ -4256,7 +4331,8 @@ fn test_manager_serialize_deserialize_events() { added_monitors.clear(); } - node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id())); + let bs_funding_signed = get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id()); + node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &bs_funding_signed); { let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); @@ -4271,7 +4347,7 @@ fn test_manager_serialize_deserialize_events() { // Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized let nodes_0_serialized = nodes[0].node.encode(); let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + get_monitor!(nodes[0], bs_funding_signed.channel_id).write(&mut chan_0_monitor_serialized).unwrap(); fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; logger = test_utils::TestLogger::new(); @@ -4349,7 +4425,7 @@ fn test_simple_manager_serialize_deserialize() { let new_chain_monitor: test_utils::TestChainMonitor; let nodes_0_deserialized: ChannelManager; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -4358,7 +4434,7 @@ fn test_simple_manager_serialize_deserialize() { let nodes_0_serialized = nodes[0].node.encode(); let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap(); logger = test_utils::TestLogger::new(); fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; @@ -4410,14 +4486,14 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let new_chain_monitor: test_utils::TestChainMonitor; let nodes_0_deserialized: ChannelManager; let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - create_announced_chan_between_nodes(&nodes, 2, 0, InitFeatures::known(), InitFeatures::known()); + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 2, 0, InitFeatures::known(), InitFeatures::known()).2; let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, InitFeatures::known(), InitFeatures::known()); let mut node_0_stale_monitors_serialized = Vec::new(); - for monitor in nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter() { + for chan_id_iter in &[chan_id_1, chan_id_2, channel_id] { let mut writer = test_utils::TestVecWriter(Vec::new()); - monitor.1.write(&mut writer).unwrap(); + get_monitor!(nodes[0], chan_id_iter).write(&mut writer).unwrap(); node_0_stale_monitors_serialized.push(writer.0); } @@ -4434,9 +4510,9 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/ // nodes[3]) let mut node_0_monitors_serialized = Vec::new(); - for monitor in nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter() { + for chan_id_iter in &[chan_id_1, chan_id_2, channel_id] { let mut writer = test_utils::TestVecWriter(Vec::new()); - monitor.1.write(&mut writer).unwrap(); + get_monitor!(nodes[0], chan_id_iter).write(&mut writer).unwrap(); node_0_monitors_serialized.push(writer.0); } @@ -4504,6 +4580,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { check_added_monitors!(nodes[0], 1); } nodes[0].node = &nodes_0_deserialized; + check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager); // nodes[1] and nodes[2] have no lost state with nodes[0]... reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); @@ -4563,11 +4640,12 @@ fn test_claim_sizeable_push_msat() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known()); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known()); nodes[1].node.force_close_channel(&chan.2).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan.3); assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening @@ -4591,10 +4669,11 @@ fn test_claim_on_remote_sizeable_push_msat() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known()); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known()); nodes[0].node.force_close_channel(&chan.2).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); @@ -4604,6 +4683,7 @@ fn test_claim_on_remote_sizeable_push_msat() { mine_transaction(&nodes[1], &node_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -4631,8 +4711,9 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); mine_transaction(&nodes[1], &node_txn[0]); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); @@ -4675,7 +4756,7 @@ fn test_static_spendable_outputs_preimage_tx() { } // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 2 (local commitment tx + HTLC-Success), ChannelMonitor: preimage tx + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (local commitment tx + HTLC-Success), ChannelMonitor: preimage tx assert_eq!(node_txn.len(), 3); check_spends!(node_txn[0], commitment_tx[0]); assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); @@ -4683,6 +4764,7 @@ fn test_static_spendable_outputs_preimage_tx() { check_spends!(node_txn[2], node_txn[1]); mine_transaction(&nodes[1], &node_txn[0]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -4727,6 +4809,7 @@ fn test_static_spendable_outputs_timeout_tx() { assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); mine_transaction(&nodes[1], &node_txn[1]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], our_payment_hash, true); @@ -4757,8 +4840,9 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 2); assert_eq!(node_txn[0].input.len(), 2); check_spends!(node_txn[0], revoked_local_txn[0]); @@ -4793,6 +4877,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -4808,8 +4893,9 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] }); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0] // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid @@ -4864,7 +4950,8 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(revoked_htlc_txn.len(), 2); assert_eq!(revoked_htlc_txn[0].input.len(), 1); @@ -4880,8 +4967,9 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0] @@ -4960,6 +5048,7 @@ fn test_onchain_to_onchain_claim() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx) assert_eq!(c_txn.len(), 3); @@ -4976,7 +5065,19 @@ fn test_onchain_to_onchain_claim() { let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]}); check_added_monitors!(nodes[1], 1); - expect_payment_forwarded!(nodes[1], Some(1000), true); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match events[0] { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => { + assert_eq!(fee_earned_msat, Some(1000)); + assert_eq!(claim_from_onchain_tx, true); + }, + _ => panic!("Unexpected event"), + } { let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelMonitor: claim tx @@ -4984,9 +5085,9 @@ fn test_onchain_to_onchain_claim() { check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager b_txn.clear(); } + check_added_monitors!(nodes[1], 1); let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); - check_added_monitors!(nodes[1], 1); match msg_events[0] { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexpected event"), @@ -5008,7 +5109,8 @@ fn test_onchain_to_onchain_claim() { // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); mine_transaction(&nodes[1], &commitment_tx[0]); - let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx assert_eq!(b_txn.len(), 3); check_spends!(b_txn[1], chan_1.3); @@ -5050,12 +5152,11 @@ fn test_duplicate_payment_hash_one_failure_one_success() { let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000); - let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, 0).unwrap(); + let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200).unwrap(); // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte // script push size limit so that the below script length checks match // ACCEPTED_HTLC_SCRIPT_WEIGHT. - let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, - &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 900000, TEST_FINAL_CLTV - 40, nodes[0].logger).unwrap(); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], vec![], 900000, TEST_FINAL_CLTV - 40); send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 900000, duplicate_payment_hash, payment_secret); let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2); @@ -5065,6 +5166,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { mine_transaction(&nodes[1], &commitment_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires let htlc_timeout_tx; @@ -5091,6 +5193,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { nodes[2].node.claim_funds(our_payment_preimage); mine_transaction(&nodes[2], &commitment_txn[0]); check_added_monitors!(nodes[2], 2); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); let events = nodes[2].node.get_and_clear_pending_msg_events(); match events[0] { MessageSendEvent::UpdateHTLCs { .. } => {}, @@ -5150,8 +5253,9 @@ fn test_duplicate_payment_hash_one_failure_one_success() { let events = nodes[0].node.get_and_clear_pending_events(); match events[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => { assert_eq!(*payment_preimage, our_payment_preimage); + assert_eq!(*payment_hash, duplicate_payment_hash); } _ => panic!("Unexpected event"), } @@ -5178,6 +5282,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() { check_added_monitors!(nodes[1], 1); mine_transaction(&nodes[1], &local_txn[0]); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let events = nodes[1].node.get_and_clear_pending_msg_events(); match events[0] { MessageSendEvent::UpdateHTLCs { .. } => {}, @@ -5231,7 +5336,6 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]); let nodes = create_network(6, &node_cfgs, &node_chanmgrs); - let logger = test_utils::TestLogger::new(); create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()); create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); @@ -5249,34 +5353,32 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee // 1st HTLC: let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let our_node_id = &nodes[1].node.get_our_node_id(); - let route = get_route(our_node_id, &net_graph_msg_handler.network_graph, &nodes[5].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), ds_dust_limit*1000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000); // 2nd HTLC: - send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee + send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200).unwrap()); // not added < dust limit + HTLC tx fee // 3rd HTLC: - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee + send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200).unwrap()); // not added < dust limit + HTLC tx fee // 4th HTLC: let (_, payment_hash_3, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); // 5th HTLC: let (_, payment_hash_4, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); - let route = get_route(our_node_id, &net_graph_msg_handler.network_graph, &nodes[5].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000); // 6th HTLC: - send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, 0).unwrap()); + send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200).unwrap()); // 7th HTLC: - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, 0).unwrap()); + send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200).unwrap()); // 8th HTLC: let (_, payment_hash_5, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); // 9th HTLC: - let route = get_route(our_node_id, &net_graph_msg_handler.network_graph, &nodes[5].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), ds_dust_limit*1000, TEST_FINAL_CLTV, &logger).unwrap(); - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000); + send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200).unwrap()); // not added < dust limit + HTLC tx fee // 10th HTLC: let (_, payment_hash_6, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee // 11th HTLC: - let route = get_route(our_node_id, &net_graph_msg_handler.network_graph, &nodes[5].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, 0).unwrap()); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000); + send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200).unwrap()); // Double-check that six of the new HTLC were added // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie, @@ -5348,9 +5450,26 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno } else { mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]); } + let events = nodes[2].node.get_and_clear_pending_events(); + let close_event = if deliver_last_raa { + assert_eq!(events.len(), 2); + events[1].clone() + } else { + assert_eq!(events.len(), 1); + events[0].clone() + }; + match close_event { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + _ => panic!("Unexpected event"), + } + connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1); check_closed_broadcast!(nodes[2], true); - expect_pending_htlcs_forwardable!(nodes[2]); + if deliver_last_raa { + expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true); + } else { + expect_pending_htlcs_forwardable!(nodes[2]); + } check_added_monitors!(nodes[2], 3); let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events(); @@ -5399,7 +5518,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let mut as_failds = HashSet::new(); let mut as_updates = 0; for event in as_events.iter() { - if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event { + if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event { assert!(as_failds.insert(*payment_hash)); if *payment_hash != payment_hash_2 { assert_eq!(*rejected_by_dest, deliver_last_raa); @@ -5424,7 +5543,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let mut bs_failds = HashSet::new(); let mut bs_updates = 0; for event in bs_events.iter() { - if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event { + if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event { assert!(bs_failds.insert(*payment_hash)); if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 { assert_eq!(*rejected_by_dest, deliver_last_raa); @@ -5487,6 +5606,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { mine_transaction(&nodes[0], &local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires let htlc_timeout = { @@ -5528,7 +5648,7 @@ fn test_key_derivation_params() { let seed = [42; 32]; let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet); let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager); - let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager: &keys_manager, node_seed: seed, features: InitFeatures::known() }; + let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager: &keys_manager, network_graph: &chanmon_cfgs[0].network_graph, node_seed: seed, features: InitFeatures::known() }; let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs); node_cfgs.remove(0); node_cfgs.insert(0, node); @@ -5570,6 +5690,7 @@ fn test_key_derivation_params() { connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); let htlc_timeout = { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -5610,6 +5731,7 @@ fn test_static_output_closing_tx() { let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2; mine_transaction(&nodes[0], &closing_tx); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); @@ -5617,6 +5739,7 @@ fn test_static_output_closing_tx() { check_spends!(spend_txn[0], closing_tx); mine_transaction(&nodes[1], &closing_tx); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -5631,23 +5754,16 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 }); + let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 }); // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being // present in B's local commitment transaction, but none of A's commitment transactions. - assert!(nodes[1].node.claim_funds(our_payment_preimage)); + assert!(nodes[1].node.claim_funds(payment_preimage)); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { payment_preimage } => { - assert_eq!(payment_preimage, our_payment_preimage); - }, - _ => panic!("Unexpected event"), - } + expect_payment_sent_without_paths!(nodes[0], payment_preimage); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); check_added_monitors!(nodes[0], 1); @@ -5667,6 +5783,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); } fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { @@ -5675,11 +5792,8 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), if use_dust { 50000 } else { 3000000 }, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 }); nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -5699,6 +5813,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); } fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { @@ -5747,8 +5862,16 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); } else { - expect_payment_failed!(nodes[0], our_payment_hash, true); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + if let Event::PaymentPathFailed { ref payment_hash, .. } = events[0] { + assert_eq!(*payment_hash, our_payment_hash); + } else { panic!("Unexpected event"); } + if let Event::PaymentFailed { ref payment_hash, .. } = events[1] { + assert_eq!(*payment_hash, our_payment_hash); + } else { panic!("Unexpected event"); } } } @@ -5859,7 +5982,7 @@ fn bolt2_open_channel_sane_dust_limit() { let push_msat=10001; nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap(); let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - node0_to_1_send_open_channel.dust_limit_satoshis = 661; + node0_to_1_send_open_channel.dust_limit_satoshis = 547; node0_to_1_send_open_channel.channel_reserve_satoshis = 100001; nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &node0_to_1_send_open_channel); @@ -5870,7 +5993,7 @@ fn bolt2_open_channel_sane_dust_limit() { }, _ => panic!("Unexpected event"), }; - assert_eq!(err_msg.data, "dust_limit_satoshis (661) is greater than the implementation limit (660)"); + assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)"); } // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC @@ -5884,7 +6007,6 @@ fn test_fail_holding_cell_htlc_upon_free() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // First nodes[0] generates an update_fee, setting the channel's // pending_update_fee. @@ -5909,15 +6031,14 @@ fn test_fail_holding_cell_htlc_upon_free() { let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; let feerate = get_feerate!(nodes[0], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], chan.2); // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], max_can_send, TEST_FINAL_CLTV, &logger).unwrap(); + let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); // Send a payment which passes reserve checks but gets stuck in the holding cell. - nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); + let our_payment_id = nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); chan_stat = get_channel_value_stat!(nodes[0], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); @@ -5942,11 +6063,13 @@ fn test_fail_holding_cell_htlc_upon_free() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match &events[0] { - &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed } => { + &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref rejected_by_dest, ref network_update, ref all_paths_failed, ref short_channel_id, ref error_code, ref error_data, .. } => { + assert_eq!(our_payment_id, *payment_id.as_ref().unwrap()); assert_eq!(our_payment_hash.clone(), *payment_hash); assert_eq!(*rejected_by_dest, false); assert_eq!(*all_paths_failed, true); assert_eq!(*network_update, None); + assert_eq!(*short_channel_id, None); assert_eq!(*error_code, None); assert_eq!(*error_data, None); }, @@ -5964,7 +6087,6 @@ fn test_free_and_fail_holding_cell_htlcs() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // First nodes[0] generates an update_fee, setting the channel's // pending_update_fee. @@ -5989,21 +6111,19 @@ fn test_free_and_fail_holding_cell_htlcs() { let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; let feerate = get_feerate!(nodes[0], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], chan.2); // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. - let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]); let amt_1 = 20000; - let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); - let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1) - amt_1; - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route_1 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], amt_1, TEST_FINAL_CLTV, &logger).unwrap(); - let route_2 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], amt_2, TEST_FINAL_CLTV, &logger).unwrap(); + let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors) - amt_1; + let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1); + let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2); // Send 2 payments which pass reserve checks but get stuck in the holding cell. nodes[0].node.send_payment(&route_1, payment_hash_1, &Some(payment_secret_1)).unwrap(); chan_stat = get_channel_value_stat!(nodes[0], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1); - nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2)).unwrap(); + let payment_id_2 = nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2)).unwrap(); chan_stat = get_channel_value_stat!(nodes[0], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2); @@ -6029,11 +6149,13 @@ fn test_free_and_fail_holding_cell_htlcs() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match &events[0] { - &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed } => { + &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref rejected_by_dest, ref network_update, ref all_paths_failed, ref short_channel_id, ref error_code, ref error_data, .. } => { + assert_eq!(payment_id_2, *payment_id.as_ref().unwrap()); assert_eq!(payment_hash_2.clone(), *payment_hash); assert_eq!(*rejected_by_dest, false); assert_eq!(*all_paths_failed, true); assert_eq!(*network_update, None); + assert_eq!(*short_channel_id, None); assert_eq!(*error_code, None); assert_eq!(*error_data, None); }, @@ -6072,14 +6194,7 @@ fn test_free_and_fail_holding_cell_htlcs() { let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { ref payment_preimage } => { - assert_eq!(*payment_preimage, payment_preimage_1); - } - _ => panic!("Unexpected event"), - } + expect_payment_sent!(nodes[0], payment_preimage_1); } // Test that if we fail to forward an HTLC that is being freed from the holding cell that the @@ -6098,7 +6213,6 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // First nodes[1] generates an update_fee, setting the channel's // pending_update_fee. @@ -6123,15 +6237,14 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { let mut chan_stat = get_channel_value_stat!(nodes[0], chan_0_1.2); let channel_reserve = chan_stat.channel_reserve_msat; let feerate = get_feerate!(nodes[0], chan_0_1.2); + let opt_anchors = get_opt_anchors!(nodes[0], chan_0_1.2); // Send a payment which passes reserve checks but gets stuck in the holding cell. let feemsat = 239; let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); - let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1) - total_routing_fee_msat; + let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors) - total_routing_fee_msat; + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send); let payment_event = { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], max_can_send, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -6231,10 +6344,7 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let logger = test_utils::TestLogger::new(); - let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); route.paths[0][0].fee_msat = 100; unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err }, @@ -6251,11 +6361,8 @@ fn test_update_add_htlc_bolt2_sender_zero_value_msat() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let logger = test_utils::TestLogger::new(); - let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); route.paths[0][0].fee_msat = 0; unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Cannot send 0-msat HTLC")); @@ -6273,10 +6380,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let logger = test_utils::TestLogger::new(); - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -6286,6 +6390,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1); check_closed_broadcast!(nodes[1], true).unwrap(); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }); } #[test] @@ -6297,12 +6402,8 @@ fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000000, 500000001, &logger).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], vec![], 100000000, 500000001); unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::RouteError { ref err }, assert_eq!(err, &"Channel CLTV overflowed?")); } @@ -6319,12 +6420,9 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, InitFeatures::known(), InitFeatures::known()); let max_accepted_htlcs = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64; - let logger = test_utils::TestLogger::new(); for i in 0..max_accepted_htlcs { - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); let payment_event = { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -6344,9 +6442,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], our_payment_hash, our_payment_secret, 100000); } - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err))); @@ -6367,14 +6463,10 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); + let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight); // Manually create a route over our max in flight (which our router normally automatically // limits us to. - let route = Route { paths: vec![vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), node_features: NodeFeatures::known(), channel_features: ChannelFeatures::known(), - short_channel_id: nodes[1].node.list_usable_channels()[0].short_channel_id.unwrap(), - fee_msat: max_in_flight + 1, cltv_expiry_delta: TEST_FINAL_CLTV - }]] }; + route.paths[0][0].fee_msat = max_in_flight + 1; unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err))); @@ -6400,10 +6492,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { htlc_minimum_msat = channel.get_holder_htlc_minimum_msat(); } - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let logger = test_utils::TestLogger::new(); - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], htlc_minimum_msat, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -6413,6 +6502,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6423,18 +6513,16 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); let chan_stat = get_channel_value_stat!(nodes[0], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; let feerate = get_feerate!(nodes[0], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], chan.2); // The 2* and +1 are for the fee spike reserve. - let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1); + let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound; - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], max_can_send, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -6449,6 +6537,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6460,14 +6549,9 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 3999999); let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); - - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 3999999, TEST_FINAL_CLTV, &logger).unwrap(); - let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1; let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap(); let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 3999999, &Some(our_payment_secret), cur_height, &None).unwrap(); @@ -6493,6 +6577,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6503,11 +6588,8 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -6518,6 +6600,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6527,12 +6610,9 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let logger = test_utils::TestLogger::new(); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -6543,6 +6623,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height"); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6554,12 +6635,9 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let logger = test_utils::TestLogger::new(); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -6592,6 +6670,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6602,11 +6681,8 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let logger = test_utils::TestLogger::new(); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -6625,6 +6701,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6636,11 +6713,8 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -6658,6 +6732,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6669,11 +6744,8 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -6691,6 +6763,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6732,6 +6805,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find"); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6773,6 +6847,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6784,11 +6859,8 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -6821,6 +6893,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set"); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6834,14 +6907,11 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known()); create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000); //First hop let mut payment_event = { - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -6963,16 +7033,17 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let events = nodes[0].node.get_and_clear_pending_events(); - // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx + // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx assert_eq!(events.len(), 2); let mut first_failed = false; for event in events { match event { - Event::PaymentFailed { payment_hash, .. } => { + Event::PaymentPathFailed { payment_hash, .. } => { if payment_hash == payment_hash_1 { assert!(!first_failed); first_failed = true; @@ -7023,6 +7094,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { if local { // We fail dust-HTLC 1 by broadcast of local commitment tx mine_transaction(&nodes[0], &as_commitment_tx[0]); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[0], dust_hash, true); @@ -7042,6 +7114,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { mine_transaction(&nodes[0], &bs_commitment_tx[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone()); @@ -7060,14 +7133,14 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { assert_eq!(events.len(), 2); let first; match events[0] { - Event::PaymentFailed { payment_hash, .. } => { + Event::PaymentPathFailed { payment_hash, .. } => { if payment_hash == dust_hash { first = true; } else { first = false; } }, _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentFailed { payment_hash, .. } => { + Event::PaymentPathFailed { payment_hash, .. } => { if first { assert_eq!(payment_hash, non_dust_hash); } else { assert_eq!(payment_hash, dust_hash); } }, @@ -7099,7 +7172,7 @@ fn test_user_configurable_csv_delay() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound() - if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), 1000000, 1000000, 0, &low_our_to_self_config) { + if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), 1000000, 1000000, 0, &low_our_to_self_config, 0) { match error { APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); }, _ => panic!("Unexpected event"), @@ -7110,7 +7183,7 @@ fn test_user_configurable_csv_delay() { nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &low_our_to_self_config) { + if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &low_our_to_self_config, 0, &nodes[0].logger) { match error { ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); }, _ => panic!("Unexpected event"), @@ -7123,20 +7196,23 @@ fn test_user_configurable_csv_delay() { let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); accept_channel.to_self_delay = 200; nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel); + let reason_msg; if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] { match action { &ErrorAction::SendErrorMessage { ref msg } => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str())); + reason_msg = msg.data.clone(); }, - _ => { assert!(false); } + _ => { panic!(); } } - } else { assert!(false); } + } else { panic!(); } + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }); // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req() nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &high_their_to_self_config) { + if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &high_their_to_self_config, 0, &nodes[0].logger) { match error { ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); }, _ => panic!("Unexpected event"), @@ -7173,7 +7249,7 @@ fn test_data_loss_protect() { // Cache node A state before any channel update let previous_node_state = nodes[0].node.encode(); let mut previous_chain_monitor_state = test_utils::TestVecWriter(Vec::new()); - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut previous_chain_monitor_state).unwrap(); + get_monitor!(nodes[0], chan.2).write(&mut previous_chain_monitor_state).unwrap(); send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); @@ -7243,10 +7319,10 @@ fn test_data_loss_protect() { // Check we close channel detecting A is fallen-behind nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() }); assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction"); check_added_monitors!(nodes[1], 1); - // Check A is able to claim to_remote output let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); @@ -7254,6 +7330,7 @@ fn test_data_loss_protect() { assert_eq!(node_txn[0].output.len(), 2); mine_transaction(&nodes[0], &node_txn[0]); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() }); let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); assert_eq!(spend_txn.len(), 1); check_spends!(spend_txn[0], node_txn[0]); @@ -7273,9 +7350,11 @@ fn test_check_htlc_underpaying() { // Create some initial channels create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 10_000, TEST_FINAL_CLTV, nodes[0].logger).unwrap(); + let scorer = test_utils::TestScorer::with_fixed_penalty(0); + let payee = Payee::from_node_id(nodes[1].node.get_our_node_id()).with_features(InvoiceFeatures::known()); + let route = get_route(&nodes[0].node.get_our_node_id(), &payee, nodes[0].network_graph, None, 10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer).unwrap(); let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]); - let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, 0).unwrap(); + let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -7328,9 +7407,9 @@ fn test_announce_disable_channels() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let short_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let short_id_2 = create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let short_id_3 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); // Disconnect peers nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); @@ -7340,13 +7419,13 @@ fn test_announce_disable_channels() { nodes[0].node.timer_tick_occurred(); // DisabledStaged -> Disabled let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); - let mut chans_disabled: HashSet = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect(); + let mut chans_disabled = HashMap::new(); for e in msg_events { match e { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set // Check that each channel gets updated exactly once - if !chans_disabled.remove(&msg.contents.short_channel_id) { + if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() { panic!("Generated ChannelUpdate for wrong chan!"); } }, @@ -7382,19 +7461,22 @@ fn test_announce_disable_channels() { nodes[0].node.timer_tick_occurred(); let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); - chans_disabled = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect(); for e in msg_events { match e { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off - // Check that each channel gets updated exactly once - if !chans_disabled.remove(&msg.contents.short_channel_id) { - panic!("Generated ChannelUpdate for wrong chan!"); + match chans_disabled.remove(&msg.contents.short_channel_id) { + // Each update should have a higher timestamp than the previous one, replacing + // the old one. + Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp), + None => panic!("Generated ChannelUpdate for wrong chan!"), } }, _ => panic!("Unexpected event"), } } + // Check that each channel gets updated exactly once + assert!(chans_disabled.is_empty()); } #[test] @@ -7413,7 +7495,7 @@ fn test_priv_forwarding_rejection() { let nodes_1_deserialized: ChannelManager; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known()); + let chan_id_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known()).2; // Note that the create_*_chan functions in utils requires announcement_signatures, which we do // not send for private channels. @@ -7428,7 +7510,8 @@ fn test_priv_forwarding_rejection() { nodes[2].node.handle_funding_created(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, nodes[2].node.get_our_node_id())); check_added_monitors!(nodes[2], 1); - nodes[1].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[1].node.get_our_node_id())); + let cs_funding_signed = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &cs_funding_signed); check_added_monitors!(nodes[1], 1); let conf_height = core::cmp::max(nodes[1].best_block_info().1 + 1, nodes[2].best_block_info().1 + 1); @@ -7452,18 +7535,16 @@ fn test_priv_forwarding_rejection() { // ... however, if we send to nodes[2], we will have to pass the private channel from nodes[1] // to nodes[2], which should be rejected: - let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); - let route = get_route(&nodes[0].node.get_our_node_id(), - &nodes[0].net_graph_msg_handler.network_graph, - &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, - &[&RouteHint(vec![RouteHintHop { - src_node_id: nodes[1].node.get_our_node_id(), - short_channel_id: nodes[2].node.list_channels()[0].short_channel_id.unwrap(), - fees: RoutingFees { base_msat: 1000, proportional_millionths: 0 }, - cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA, - htlc_minimum_msat: None, - htlc_maximum_msat: None, - }])], 10_000, TEST_FINAL_CLTV, nodes[0].logger).unwrap(); + let route_hint = RouteHint(vec![RouteHintHop { + src_node_id: nodes[1].node.get_our_node_id(), + short_channel_id: nodes[2].node.list_channels()[0].short_channel_id.unwrap(), + fees: RoutingFees { base_msat: 1000, proportional_millionths: 0 }, + cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA, + htlc_minimum_msat: None, + htlc_maximum_msat: None, + }]); + let last_hops = vec![route_hint]; + let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], last_hops, 10_000, TEST_FINAL_CLTV); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -7490,12 +7571,8 @@ fn test_priv_forwarding_rejection() { let nodes_1_serialized = nodes[1].node.encode(); let mut monitor_a_serialized = test_utils::TestVecWriter(Vec::new()); let mut monitor_b_serialized = test_utils::TestVecWriter(Vec::new()); - { - let mons = nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap(); - let mut mon_iter = mons.iter(); - mon_iter.next().unwrap().1.write(&mut monitor_a_serialized).unwrap(); - mon_iter.next().unwrap().1.write(&mut monitor_b_serialized).unwrap(); - } + get_monitor!(nodes[1], chan_id_1).write(&mut monitor_a_serialized).unwrap(); + get_monitor!(nodes[1], cs_funding_signed.channel_id).write(&mut monitor_b_serialized).unwrap(); persister = test_utils::TestPersister::new(); let keys_manager = &chanmon_cfgs[1].keys_manager; @@ -7569,11 +7646,9 @@ fn test_bump_penalty_txn_on_revoked_commitment() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3000000, 30, &logger).unwrap(); + let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], vec![], 3000000, 30); send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000); let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2); @@ -7677,11 +7752,14 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known()); // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps) - let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, - &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3_000_000, 50, nodes[0].logger).unwrap(); + let payee = Payee::from_node_id(nodes[1].node.get_our_node_id()).with_features(InvoiceFeatures::known()); + let scorer = test_utils::TestScorer::with_fixed_penalty(0); + let route = get_route(&nodes[0].node.get_our_node_id(), &payee, &nodes[0].network_graph, None, + 3_000_000, 50, nodes[0].logger, &scorer).unwrap(); let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0; - let route = get_route(&nodes[1].node.get_our_node_id(), &nodes[1].net_graph_msg_handler.network_graph, - &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3_000_000, 50, nodes[0].logger).unwrap(); + let payee = Payee::from_node_id(nodes[0].node.get_our_node_id()).with_features(InvoiceFeatures::known()); + let route = get_route(&nodes[1].node.get_our_node_id(), &payee, nodes[1].network_graph, None, + 3_000_000, 50, nodes[0].logger, &scorer).unwrap(); send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000); let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2); @@ -7696,6 +7774,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above) let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -7717,7 +7796,12 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] }); let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] }); - expect_pending_htlcs_forwardable_ignore!(nodes[0]); + let events = nodes[0].node.get_and_clear_pending_events(); + expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true); + match events[1] { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + _ => panic!("Unexpected event"), + } let first; let feerate_1; let penalty_txn; @@ -7963,6 +8047,7 @@ fn test_counterparty_raa_skip_no_crash() { &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point }); assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack"); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }); } #[test] @@ -7995,6 +8080,7 @@ fn test_bump_txn_sanitize_tracking_maps() { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); let penalty_txn = { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx @@ -8009,11 +8095,45 @@ fn test_bump_txn_sanitize_tracking_maps() { connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn }); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); { - let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap(); - if let Some(monitor) = monitors.get(&OutPoint { txid: chan.3.txid(), index: 0 }) { - assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty()); - assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty()); - } + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap(); + assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty()); + assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty()); + } +} + +#[test] +fn test_channel_conf_timeout() { + // Tests that, for inbound channels, we give up on them if the funding transaction does not + // confirm within 2016 blocks, as recommended by BOLT 2. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000, InitFeatures::known(), InitFeatures::known()); + + // The outbound node should wait forever for confirmation: + // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is + // copied here instead of directly referencing the constant. + connect_blocks(&nodes[0], 2016); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + // The inbound node should fail the channel after exactly 2016 blocks + connect_blocks(&nodes[1], 2015); + check_added_monitors!(nodes[1], 0); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + connect_blocks(&nodes[1], 1); + check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut); + let close_ev = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(close_ev.len(), 1); + match close_ev[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(msg.data, "Channel closed because funding transaction failed to confirm within 2016 blocks"); + }, + _ => panic!("Unexpected event"), } } @@ -8066,11 +8186,8 @@ fn test_simple_mpp() { let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let logger = test_utils::TestLogger::new(); - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); route.paths[0][0].pubkey = nodes[1].node.get_our_node_id(); @@ -8094,11 +8211,8 @@ fn test_preimage_storage() { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; { - let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, 42); - - let logger = test_utils::TestLogger::new(); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200).unwrap(); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -8114,8 +8228,7 @@ fn test_preimage_storage() { match events[0] { Event::PaymentReceived { ref purpose, .. } => { match &purpose { - PaymentPurpose::InvoicePayment { payment_preimage, user_payment_id, .. } => { - assert_eq!(*user_payment_id, 42); + PaymentPurpose::InvoicePayment { payment_preimage, .. } => { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap()); }, _ => panic!("expected PaymentPurpose::InvoicePayment") @@ -8126,8 +8239,10 @@ fn test_preimage_storage() { } #[test] +#[allow(deprecated)] fn test_secret_timeout() { - // Simple test of payment secret storage time outs + // Simple test of payment secret storage time outs. After + // `create_inbound_payment(_for_hash)_legacy` is removed, this test will be removed as well. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -8135,11 +8250,11 @@ fn test_secret_timeout() { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment(Some(100_000), 2, 0); + let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment_legacy(Some(100_000), 2).unwrap(); // We should fail to register the same payment hash twice, at least until we've connected a // block with time 7200 + CHAN_CONFIRM_DEPTH + 1. - if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 0) { + if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) { assert_eq!(err, "Duplicate payment hash"); } else { panic!(); } let mut block = { @@ -8154,22 +8269,20 @@ fn test_secret_timeout() { } }; connect_block(&nodes[1], &block); - if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 0) { + if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) { assert_eq!(err, "Duplicate payment hash"); } else { panic!(); } // If we then connect the second block, we should be able to register the same payment hash - // again with a different user_payment_id (this time getting a new payment secret). + // again (this time getting a new payment secret). block.header.prev_blockhash = block.header.block_hash(); block.header.time += 1; connect_block(&nodes[1], &block); - let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 42).unwrap(); + let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2).unwrap(); assert_ne!(payment_secret_1, our_payment_secret); { - let logger = test_utils::TestLogger::new(); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); nodes[0].node.send_payment(&route, payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -8183,9 +8296,8 @@ fn test_secret_timeout() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentReceived { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, user_payment_id }, .. } => { + Event::PaymentReceived { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret }, .. } => { assert!(payment_preimage.is_none()); - assert_eq!(user_payment_id, 42); assert_eq!(payment_secret, our_payment_secret); // We don't actually have the payment preimage with which to claim this payment! }, @@ -8205,11 +8317,8 @@ fn test_bad_secret_hash() { let random_payment_hash = PaymentHash([42; 32]); let random_payment_secret = PaymentSecret([43; 32]); - let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, 0); - - let logger = test_utils::TestLogger::new(); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2).unwrap(); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); // All the below cases should end up being handled exactly identically, so we macro the // resulting events. @@ -8289,8 +8398,7 @@ fn test_update_err_monitor_lockdown() { let logger = test_utils::TestLogger::with_id(format!("node {}", 0)); let persister = test_utils::TestPersister::new(); let watchtower = { - let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap(); - let monitor = monitors.get(&outpoint).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( @@ -8351,8 +8459,7 @@ fn test_concurrent_monitor_claim() { let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice")); let persister = test_utils::TestPersister::new(); let watchtower_alice = { - let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap(); - let monitor = monitors.get(&outpoint).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( @@ -8380,8 +8487,7 @@ fn test_concurrent_monitor_claim() { let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob")); let persister = test_utils::TestPersister::new(); let watchtower_bob = { - let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap(); - let monitor = monitors.get(&outpoint).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( @@ -8395,10 +8501,8 @@ fn test_concurrent_monitor_claim() { watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); // Route another payment to generate another update with still previous HTLC pending - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]); + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000); { - let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3000000 , TEST_FINAL_CLTV, &logger).unwrap(); nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); } check_added_monitors!(nodes[1], 1); @@ -8478,6 +8582,7 @@ fn test_pre_lockin_no_chan_closed_update() { let channel_id = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }); assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty()); + check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: "Hi".to_string() }, true); } #[test] @@ -8512,6 +8617,7 @@ fn test_htlc_no_detection() { chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); let htlc_timeout = { @@ -8571,6 +8677,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap(); check_closed_broadcast!(nodes[force_closing_node], true); check_added_monitors!(nodes[force_closing_node], 1); + check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed); if go_onchain_before_fulfill { let txn_to_broadcast = match broadcast_alice { true => alice_txn.clone(), @@ -8578,10 +8685,11 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain }; let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]}); - let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); } assert_eq!(bob_txn.len(), 1); check_spends!(bob_txn[0], chan_ab.3); @@ -8662,6 +8770,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); } let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); if broadcast_alice { @@ -8877,6 +8986,7 @@ fn test_error_chans_closed() { nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], false); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() }); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); assert_eq!(nodes[0].node.list_usable_channels().len(), 2); assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2); @@ -8886,6 +8996,7 @@ fn test_error_chans_closed() { let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known()); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() }); check_added_monitors!(nodes[0], 2); + check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() }); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); match events[0] { @@ -8949,14 +9060,16 @@ fn test_invalid_funding_tx() { assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + let expected_err = "funding tx had wrong script/value or output index"; confirm_transaction_at(&nodes[1], &tx, 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() }); check_added_monitors!(nodes[1], 1); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); if let msgs::ErrorAction::SendErrorMessage { msg } = action { - assert_eq!(msg.data, "funding tx had wrong script/value or output index"); + assert_eq!(msg.data, "Channel closed because of an exception: ".to_owned() + expected_err); } else { panic!(); } } else { panic!(); } assert_eq!(nodes[1].node.list_channels().len(), 0); @@ -8993,6 +9106,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t nodes[1].node.force_close_channel(&channel_id).unwrap(); check_closed_broadcast!(nodes[1], true); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); check_added_monitors!(nodes[1], 1); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -9045,6 +9159,121 @@ fn test_tx_confirmed_skipping_blocks_immediate_broadcast() { do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true); } +#[test] +fn test_forwardable_regen() { + // Tests that if we reload a ChannelManager while forwards are pending we will regenerate the + // PendingHTLCsForwardable event automatically, ensuring we don't forget to forward/receive + // HTLCs. + // We test it for both payment receipt and payment forwarding. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let persister: test_utils::TestPersister; + let new_chain_monitor: test_utils::TestChainMonitor; + let nodes_1_deserialized: ChannelManager; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()).2; + + // First send a payment to nodes[1] + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + + // Next send a payment which is forwarded by nodes[1] + let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 200_000); + nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + // There is already a PendingHTLCsForwardable event "pending" so another one will not be + // generated + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + + let nodes_1_serialized = nodes[1].node.encode(); + let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); + let mut chan_1_monitor_serialized = test_utils::TestVecWriter(Vec::new()); + get_monitor!(nodes[1], chan_id_1).write(&mut chan_0_monitor_serialized).unwrap(); + get_monitor!(nodes[1], chan_id_2).write(&mut chan_1_monitor_serialized).unwrap(); + + persister = test_utils::TestPersister::new(); + let keys_manager = &chanmon_cfgs[1].keys_manager; + new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[1].chain_source), nodes[1].tx_broadcaster.clone(), nodes[1].logger, node_cfgs[1].fee_estimator, &persister, keys_manager); + nodes[1].chain_monitor = &new_chain_monitor; + + let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; + let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read( + &mut chan_0_monitor_read, keys_manager).unwrap(); + assert!(chan_0_monitor_read.is_empty()); + let mut chan_1_monitor_read = &chan_1_monitor_serialized.0[..]; + let (_, mut chan_1_monitor) = <(BlockHash, ChannelMonitor)>::read( + &mut chan_1_monitor_read, keys_manager).unwrap(); + assert!(chan_1_monitor_read.is_empty()); + + let mut nodes_1_read = &nodes_1_serialized[..]; + let (_, nodes_1_deserialized_tmp) = { + let mut channel_monitors = HashMap::new(); + channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); + channel_monitors.insert(chan_1_monitor.get_funding_txo().0, &mut chan_1_monitor); + <(BlockHash, ChannelManager)>::read(&mut nodes_1_read, ChannelManagerReadArgs { + default_config: UserConfig::default(), + keys_manager, + fee_estimator: node_cfgs[1].fee_estimator, + chain_monitor: nodes[1].chain_monitor, + tx_broadcaster: nodes[1].tx_broadcaster.clone(), + logger: nodes[1].logger, + channel_monitors, + }).unwrap() + }; + nodes_1_deserialized = nodes_1_deserialized_tmp; + assert!(nodes_1_read.is_empty()); + + assert!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); + assert!(nodes[1].chain_monitor.watch_channel(chan_1_monitor.get_funding_txo().0, chan_1_monitor).is_ok()); + nodes[1].node = &nodes_1_deserialized; + check_added_monitors!(nodes[1], 2); + + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + // Note that nodes[1] and nodes[2] resend their funding_locked here since they haven't updated + // the commitment state. + reconnect_nodes(&nodes[1], &nodes[2], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash, payment_secret, 100_000); + check_added_monitors!(nodes[1], 1); + + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_payment_received!(nodes[2], payment_hash_2, payment_secret_2, 200_000); + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); +} + #[test] fn test_keysend_payments_to_public_node() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -9053,15 +9282,19 @@ fn test_keysend_payments_to_public_node() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known()); - let network_graph = &nodes[0].net_graph_msg_handler.network_graph; + let network_graph = nodes[0].network_graph; let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); - let route = get_route(&payer_pubkey, network_graph, &payee_pubkey, None, - None, &vec![], 10000, 40, - nodes[0].logger).unwrap(); + let params = RouteParameters { + payee: Payee::for_keysend(payee_pubkey), + final_value_msat: 10000, + final_cltv_expiry_delta: 40, + }; + let scorer = test_utils::TestScorer::with_fixed_penalty(0); + let route = find_route(&payer_pubkey, ¶ms, network_graph, None, nodes[0].logger, &scorer).unwrap(); let test_preimage = PaymentPreimage([42; 32]); - let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap(); + let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -9084,14 +9317,21 @@ fn test_keysend_payments_to_private_node() { nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() }); let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); - let network_graph = &nodes[0].net_graph_msg_handler.network_graph; + let params = RouteParameters { + payee: Payee::for_keysend(payee_pubkey), + final_value_msat: 10000, + final_cltv_expiry_delta: 40, + }; + let network_graph = nodes[0].network_graph; let first_hops = nodes[0].node.list_usable_channels(); - let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey, - Some(&first_hops.iter().collect::>()), &vec![], 10000, 40, - nodes[0].logger).unwrap(); + let scorer = test_utils::TestScorer::with_fixed_penalty(0); + let route = find_route( + &payer_pubkey, ¶ms, network_graph, Some(&first_hops.iter().collect::>()), + nodes[0].logger, &scorer + ).unwrap(); let test_preimage = PaymentPreimage([42; 32]); - let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap(); + let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -9101,41 +9341,55 @@ fn test_keysend_payments_to_private_node() { claim_payment(&nodes[0], &path, test_preimage); } -fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, at_forward: bool, on_holder_tx: bool) { - // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat` policy. +/// The possible events which may trigger a `max_dust_htlc_exposure` breach +#[derive(Clone, Copy, PartialEq)] +enum ExposureEvent { + /// Breach occurs at HTLC forwarding (see `send_htlc`) + AtHTLCForward, + /// Breach occurs at HTLC reception (see `update_add_htlc`) + AtHTLCReception, + /// Breach occurs at outbound update_fee (see `send_update_fee`) + AtUpdateFeeOutbound, +} + +fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool) { + // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat` + // policy. // // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and - // trimmed-to-dust HTLC outbound balance and this new payment as included on next counterparty - // commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the update. - // At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC inbound - // and trimmed-to-dust HTLC outbound balance and this new received HTLC as included on next - // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail the update. - // Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel might be - // available again for HTLC processing once the dust bandwidth has cleared up. + // trimmed-to-dust HTLC outbound balance and this new payment as included on next + // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the + // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC + // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included + // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail + // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel + // might be available again for HTLC processing once the dust bandwidth has cleared up. let chanmon_cfgs = create_chanmon_cfgs(2); let mut config = test_default_channel_config(); config.channel_options.max_dust_htlc_exposure_msat = 5_000_000; // default setting value let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); open_channel.max_htlc_value_in_flight_msat = 50_000_000; open_channel.max_accepted_htlcs = 60; - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); - let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); if on_holder_tx { - accept_channel.dust_limit_satoshis = 660; + open_channel.dust_limit_satoshis = 546; } + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); + let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel); + let opt_anchors = false; + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 1_000_000, 42); if on_holder_tx { - if let Some(mut chan) = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) { - chan.holder_dust_limit_satoshis = 660; + if let Some(mut chan) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) { + chan.holder_dust_limit_satoshis = 546; } } @@ -9146,70 +9400,117 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, at_forward: bool, nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); check_added_monitors!(nodes[0], 1); - let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); + let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked); update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); + let dust_buffer_feerate = { + let chan_lock = nodes[0].node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_dust_buffer_feerate(None) as u64 + }; + let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; + let dust_outbound_htlc_on_holder_tx: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat; + + let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; + let dust_inbound_htlc_on_holder_tx: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat; + + let dust_htlc_on_counterparty_tx: u64 = 25; + let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx; + if on_holder_tx { if dust_outbound_balance { - for i in 0..2 { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 2_300_000); - if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); } + // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`) + // Outbound dust balance: 4372 sats + // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats + for i in 0..dust_outbound_htlc_on_holder_tx { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat); + if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); } } } else { - for _ in 0..2 { - route_payment(&nodes[0], &[&nodes[1]], 2_300_000); + // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`) + // Inbound dust balance: 4372 sats + // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats + for _ in 0..dust_inbound_htlc_on_holder_tx { + route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat); } } } else { if dust_outbound_balance { - for i in 0..25 { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 200_000); // + 177_000 msat of HTLC-success tx at 253 sats/kWU - if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); } + // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) + // Outbound dust balance: 5000 sats + for i in 0..dust_htlc_on_counterparty_tx { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat); + if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); } } - } else { - for _ in 0..25 { - route_payment(&nodes[0], &[&nodes[1]], 200_000); // + 167_000 msat of HTLC-timeout tx at 253 sats/kWU + } else { + // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) + // Inbound dust balance: 5000 sats + for _ in 0..dust_htlc_on_counterparty_tx { + route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat); } } } - if at_forward { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { 2_300_000 } else { 200_000 }); + let dust_overflow = dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx + 1); + if exposure_breach_event == ExposureEvent::AtHTLCForward { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat }); let mut config = UserConfig::default(); + // With default dust exposure: 5000 sats if on_holder_tx { - unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat))); + let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * (dust_outbound_htlc_on_holder_tx + 1); + let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * dust_inbound_htlc_on_holder_tx + dust_outbound_htlc_on_holder_tx_msat; + unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_options.max_dust_htlc_exposure_msat))); } else { - unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat))); + unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_options.max_dust_htlc_exposure_msat))); } - } else { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1 ], if on_holder_tx { 2_300_000 } else { 200_000 }); - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); - check_added_monitors!(nodes[0], 1); - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + } else if exposure_breach_event == ExposureEvent::AtHTLCReception { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat }); + nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.remove(0)); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + // With default dust exposure: 5000 sats if on_holder_tx { - nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat), 1); + // Outbound dust balance: 6399 sats + let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1); + let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat; + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_options.max_dust_htlc_exposure_msat), 1); } else { - nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat), 1); + // Outbound dust balance: 5200 sats + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_options.max_dust_htlc_exposure_msat), 1); + } + } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 2_500_000); + if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at update_fee-swallowed HTLC", ); } + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = *feerate_lock * 10; } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + nodes[0].logger.assert_log_contains("lightning::ln::channel".to_string(), "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure".to_string(), 1); } - let _ = nodes[1].node.get_and_clear_pending_msg_events(); - let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); + let _ = nodes[0].node.get_and_clear_pending_msg_events(); + let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); added_monitors.clear(); } #[test] fn test_max_dust_htlc_exposure() { - do_test_max_dust_htlc_exposure(true, true, true); - do_test_max_dust_htlc_exposure(false, true, true); - do_test_max_dust_htlc_exposure(false, false, true); - do_test_max_dust_htlc_exposure(false, false, false); - do_test_max_dust_htlc_exposure(true, true, false); - do_test_max_dust_htlc_exposure(true, false, false); - do_test_max_dust_htlc_exposure(true, false, true); - do_test_max_dust_htlc_exposure(false, true, false); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true); } diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index b5e433270a5..41d978e8d04 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -37,7 +37,7 @@ pub(crate) mod peer_channel_encryptor; #[cfg(feature = "fuzztarget")] pub mod channel; #[cfg(not(feature = "fuzztarget"))] -mod channel; +pub(crate) mod channel; mod onion_utils; pub mod wire; @@ -51,6 +51,9 @@ pub mod wire; mod functional_tests; #[cfg(test)] #[allow(unused_mut)] +mod payment_tests; +#[cfg(test)] +#[allow(unused_mut)] mod chanmon_update_fail_tests; #[cfg(test)] #[allow(unused_mut)] diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index c621f4eba22..2b582fe877b 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -11,16 +11,11 @@ use chain::channelmonitor::{ANTI_REORG_DELAY, Balance}; use chain::transaction::OutPoint; -use ln::{channel, PaymentPreimage, PaymentHash}; +use ln::channel; use ln::channelmanager::BREAKDOWN_TIMEOUT; use ln::features::InitFeatures; -use ln::msgs::{ChannelMessageHandler, ErrorAction}; -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; -use routing::network_graph::NetworkUpdate; -use routing::router::get_route; - -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; +use ln::msgs::ChannelMessageHandler; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use bitcoin::blockdata::script::Builder; use bitcoin::blockdata::opcodes; @@ -74,6 +69,7 @@ fn chanmon_fail_from_stale_commitment() { mine_transaction(&nodes[1], &bs_txn[0]); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); @@ -103,13 +99,14 @@ fn chanmon_claim_value_coop_close() { assert_eq!(funding_outpoint.to_channel_id(), chan_id); let chan_feerate = get_feerate!(nodes[0], chan_id) as u64; + let opt_anchors = get_opt_anchors!(nodes[0], chan_id); assert_eq!(vec![Balance::ClaimableOnChannelClose { - claimable_amount_satoshis: 1_000_000 - 1_000 - chan_feerate * channel::COMMITMENT_TX_BASE_WEIGHT / 1000 + claimable_amount_satoshis: 1_000_000 - 1_000 - chan_feerate * channel::commitment_tx_base_weight(opt_anchors) / 1000 }], - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); assert_eq!(vec![Balance::ClaimableOnChannelClose { claimable_amount_satoshis: 1_000, }], - nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); nodes[0].node.close_channel(&chan_id).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); @@ -140,23 +137,23 @@ fn chanmon_claim_value_coop_close() { assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); assert_eq!(vec![Balance::ClaimableAwaitingConfirmations { - claimable_amount_satoshis: 1_000_000 - 1_000 - chan_feerate * channel::COMMITMENT_TX_BASE_WEIGHT / 1000, + claimable_amount_satoshis: 1_000_000 - 1_000 - chan_feerate * channel::commitment_tx_base_weight(opt_anchors) / 1000, confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1, }], - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); assert_eq!(vec![Balance::ClaimableAwaitingConfirmations { claimable_amount_satoshis: 1000, confirmation_height: nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1, }], - nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); assert_eq!(Vec::::new(), - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); assert_eq!(Vec::::new(), - nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(node_a_spendable.len(), 1); @@ -175,6 +172,8 @@ fn chanmon_claim_value_coop_close() { Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap(); check_spends!(spend_tx, shutdown_tx[0]); } + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); } fn sorted_vec(mut v: Vec) -> Vec { @@ -214,13 +213,14 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety let chan_feerate = get_feerate!(nodes[0], chan_id) as u64; + let opt_anchors = get_opt_anchors!(nodes[0], chan_id); let remote_txn = get_local_commitment_txn!(nodes[1], chan_id); // Before B receives the payment preimage, it only suggests the push_msat value of 1_000 sats // as claimable. A lists both its to-self balance and the (possibly-claimable) HTLCs. assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose { claimable_amount_satoshis: 1_000_000 - 3_000 - 4_000 - 1_000 - 3 - chan_feerate * - (channel::COMMITMENT_TX_BASE_WEIGHT + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000, + (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000, }, Balance::MaybeClaimableHTLCAwaitingTimeout { claimable_amount_satoshis: 3_000, claimable_height: htlc_cltv_timeout, @@ -228,11 +228,11 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { claimable_amount_satoshis: 4_000, claimable_height: htlc_cltv_timeout, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); assert_eq!(vec![Balance::ClaimableOnChannelClose { claimable_amount_satoshis: 1_000, }], - nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); nodes[1].node.claim_funds(payment_preimage); check_added_monitors!(nodes[1], 1); @@ -247,7 +247,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { if prev_commitment_tx { // To build a previous commitment transaction, deliver one round of commitment messages. nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &b_htlc_msgs.update_fulfill_htlcs[0]); - expect_payment_sent!(nodes[0], payment_preimage); + expect_payment_sent_without_paths!(nodes[0], payment_preimage); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &b_htlc_msgs.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_raa, as_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -268,7 +268,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { 1_000 - // The push_msat value in satoshis 3 - // The dust HTLC value in satoshis // The commitment transaction fee with two HTLC outputs: - chan_feerate * (channel::COMMITMENT_TX_BASE_WEIGHT + + chan_feerate * (channel::commitment_tx_base_weight(opt_anchors) + if prev_commitment_tx { 1 } else { 2 } * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000, }, Balance::MaybeClaimableHTLCAwaitingTimeout { @@ -282,11 +282,11 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { }); } assert_eq!(sorted_vec(a_expected_balances), - sorted_vec(nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); assert_eq!(vec![Balance::ClaimableOnChannelClose { claimable_amount_satoshis: 1_000 + 3_000 + 4_000, }], - nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); // Broadcast the closing transaction (which has both pending HTLCs in it) and get B's // broadcasted HTLC claim transaction with preimage. @@ -314,9 +314,11 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); assert!(nodes[1].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -329,7 +331,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations { claimable_amount_satoshis: 1_000_000 - 3_000 - 4_000 - 1_000 - 3 - chan_feerate * - (channel::COMMITMENT_TX_BASE_WEIGHT + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000, + (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000, confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1, }, Balance::MaybeClaimableHTLCAwaitingTimeout { claimable_amount_satoshis: 3_000, @@ -338,7 +340,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { claimable_amount_satoshis: 4_000, claimable_height: htlc_cltv_timeout, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); // The main non-HTLC balance is just awaiting confirmations, but the claimable height is the // CSV delay, not ANTI_REORG_DELAY. assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations { @@ -354,7 +356,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { claimable_amount_satoshis: 4_000, timeout_height: htlc_cltv_timeout, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[0], dust_payment_hash, true); @@ -369,7 +371,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { claimable_amount_satoshis: 4_000, claimable_height: htlc_cltv_timeout, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations { claimable_amount_satoshis: 1_000, confirmation_height: node_b_commitment_claimable, @@ -380,7 +382,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { claimable_amount_satoshis: 4_000, timeout_height: htlc_cltv_timeout, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(node_a_spendable.len(), 1); @@ -396,7 +398,9 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { // After broadcasting the HTLC claim transaction, node A will still consider the HTLC // possibly-claimable up to ANTI_REORG_DELAY, at which point it will drop it. mine_transaction(&nodes[0], &b_broadcast_txn[0]); - if !prev_commitment_tx { + if prev_commitment_tx { + expect_payment_path_successful!(nodes[0]); + } else { expect_payment_sent!(nodes[0], payment_preimage); } assert_eq!(sorted_vec(vec![Balance::MaybeClaimableHTLCAwaitingTimeout { @@ -406,13 +410,13 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { claimable_amount_satoshis: 4_000, claimable_height: htlc_cltv_timeout, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); assert_eq!(vec![Balance::MaybeClaimableHTLCAwaitingTimeout { claimable_amount_satoshis: 4_000, claimable_height: htlc_cltv_timeout, }], - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); // When the HTLC timeout output is spendable in the next block, A should broadcast it connect_blocks(&nodes[0], htlc_cltv_timeout - nodes[0].best_block_info().1 - 1); @@ -437,12 +441,12 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { claimable_amount_satoshis: 4_000, confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1, }], - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); // After ANTI_REORG_DELAY, A will generate a SpendableOutputs event and drop the claimable // balance entry. connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); assert_eq!(Vec::::new(), - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); expect_payment_failed!(nodes[0], timeout_payment_hash, true); let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); @@ -470,7 +474,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { claimable_amount_satoshis: 4_000, timeout_height: htlc_cltv_timeout, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); // After reaching the commitment output CSV, we'll get a SpendableOutputs event for it and have // only the HTLCs claimable on node B. @@ -492,7 +496,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { claimable_amount_satoshis: 4_000, timeout_height: htlc_cltv_timeout, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); // After reaching the claimed HTLC output CSV, we'll get a SpendableOutptus event for it and // have only one HTLC output left spendable. @@ -511,7 +515,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { claimable_amount_satoshis: 4_000, timeout_height: htlc_cltv_timeout, }], - nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); // Finally, mine the HTLC timeout transaction that A broadcasted (even though B should be able // to claim this HTLC with the preimage it knows!). It will remain listed as a claimable HTLC @@ -521,10 +525,10 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { claimable_amount_satoshis: 4_000, timeout_height: htlc_cltv_timeout, }], - nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); assert_eq!(Vec::::new(), - nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap().get(&funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); } #[test] diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 122dd7737b9..9007f3f2ea7 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -30,10 +30,10 @@ use bitcoin::secp256k1; use bitcoin::blockdata::script::Script; use bitcoin::hash_types::{Txid, BlockHash}; -use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; +use ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; use prelude::*; -use core::{cmp, fmt}; +use core::fmt; use core::fmt::Debug; use io::{self, Read}; use io_extras::read_to_end; @@ -80,12 +80,29 @@ pub struct Init { /// An error message to be sent or received from a peer #[derive(Clone, Debug, PartialEq)] pub struct ErrorMessage { - /// The channel ID involved in the error + /// The channel ID involved in the error. + /// + /// All-0s indicates a general error unrelated to a specific channel, after which all channels + /// with the sending peer should be closed. pub channel_id: [u8; 32], /// A possibly human-readable error description. - /// The string should be sanitized before it is used (e.g. emitted to logs - /// or printed to stdout). Otherwise, a well crafted error message may trigger a security - /// vulnerability in the terminal emulator or the logging subsystem. + /// The string should be sanitized before it is used (e.g. emitted to logs or printed to + /// stdout). Otherwise, a well crafted error message may trigger a security vulnerability in + /// the terminal emulator or the logging subsystem. + pub data: String, +} + +/// A warning message to be sent or received from a peer +#[derive(Clone, Debug, PartialEq)] +pub struct WarningMessage { + /// The channel ID involved in the warning. + /// + /// All-0s indicates a warning unrelated to a specific channel. + pub channel_id: [u8; 32], + /// A possibly human-readable warning description. + /// The string should be sanitized before it is used (e.g. emitted to logs or printed to + /// stdout). Otherwise, a well crafted error message may trigger a security vulnerability in + /// the terminal emulator or the logging subsystem. pub data: String, } @@ -148,6 +165,10 @@ pub struct OpenChannel { pub channel_flags: u8, /// Optionally, a request to pre-set the to-sender output's scriptPubkey for when we collaboratively close pub shutdown_scriptpubkey: OptionalField