From 7956af05aa8a092a3e546db65f8f0ad6a013e889 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 13:31:21 +0000 Subject: [PATCH 01/42] Move `lightning-transaction-sync` back into the main workspace Now that it has the same MSRV as everything else in the workspace, it doesn't need to live on its own. --- .github/workflows/build.yml | 27 +++++-------------------- Cargo.toml | 2 +- ci/ci-tests.sh | 21 +++++++++++++++++++- ci/ci-tx-sync-tests.sh | 39 ------------------------------------- 4 files changed, 26 insertions(+), 63 deletions(-) delete mode 100755 ci/ci-tx-sync-tests.sh diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3c50b2a0041..2658ff454e9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -62,52 +62,35 @@ jobs: - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == '1.75.0'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - - name: Run CI script - shell: bash # Default on Winblows is powershell - run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh - - build-tx-sync: - strategy: - fail-fast: false - matrix: - platform: [ ubuntu-latest, macos-latest ] - toolchain: [ stable, beta, 1.75.0 ] - runs-on: ${{ matrix.platform }} - steps: - - name: Checkout source code - uses: actions/checkout@v4 - - name: Install Rust ${{ matrix.toolchain }} toolchain - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ matrix.toolchain }} - - name: Set RUSTFLAGS to deny warnings - if: "matrix.toolchain == '1.75.0'" - run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - name: Enable caching for bitcoind + if: matrix.platform != 'windows-latest' id: cache-bitcoind uses: actions/cache@v4 with: path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} key: bitcoind-${{ runner.os }}-${{ runner.arch }} - name: Enable caching for electrs + if: matrix.platform != 'windows-latest' id: cache-electrs uses: actions/cache@v4 with: path: bin/electrs-${{ runner.os }}-${{ runner.arch }} key: electrs-${{ runner.os }}-${{ runner.arch }} - name: Download bitcoind/electrs - if: "steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true'" + if: "matrix.platform != 'windows-latest' && (steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" run: | source ./contrib/download_bitcoind_electrs.sh mkdir bin mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} - name: Set bitcoind/electrs environment variables + if: matrix.platform != 'windows-latest' run: | echo "BITCOIND_EXE=$( pwd )/bin/bitcoind-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" - name: Run CI script shell: bash # Default on Winblows is powershell - run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tx-sync-tests.sh + run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh coverage: needs: fuzz diff --git a/Cargo.toml b/Cargo.toml index f9f7406339e..a0895fe1641 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,11 +16,11 @@ members = [ "lightning-macros", "lightning-dns-resolver", "lightning-liquidity", + "lightning-transaction-sync", "possiblyrandom", ] exclude = [ - "lightning-transaction-sync", "lightning-tests", "ext-functional-test-demo", "no-std-check", diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 91ead9903cb..488c5ac4826 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -2,7 +2,6 @@ #shellcheck disable=SC2002,SC2207 set -eox pipefail -# Currently unused as we don't have to pin anything for MSRV: RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }') # Some crates require pinning to meet our MSRV even for our downstream users, @@ -20,6 +19,9 @@ PIN_RELEASE_DEPS # pin the release dependencies in our main workspace # proptest 1.9.0 requires rustc 1.82.0 [ "$RUSTC_MINOR_VERSION" -lt 82 ] && cargo update -p proptest --precise "1.8.0" --verbose +# Starting with version 1.2.0, the `idna_adapter` crate has an MSRV of rustc 1.81.0. +[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --verbose + export RUST_BACKTRACE=1 echo -e "\n\nChecking the workspace, except lightning-transaction-sync." @@ -57,6 +59,23 @@ cargo check -p lightning-block-sync --verbose --color always --features rpc-clie cargo test -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio cargo check -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio +echo -e "\n\nChecking Transaction Sync Clients with features." +cargo check -p lightning-transaction-sync --verbose --color always --features esplora-blocking +cargo check -p lightning-transaction-sync --verbose --color always --features esplora-async +cargo check -p lightning-transaction-sync --verbose --color always --features esplora-async-https +cargo check -p lightning-transaction-sync --verbose --color always --features electrum + +if [ -z "$CI_ENV" ] && [[ -z "$BITCOIND_EXE" || -z "$ELECTRS_EXE" ]]; then + echo -e "\n\nSkipping testing Transaction Sync Clients due to BITCOIND_EXE or ELECTRS_EXE being unset." + cargo check -p lightning-transaction-sync --tests +else + echo -e "\n\nTesting Transaction Sync Clients with features." + cargo test -p lightning-transaction-sync --verbose --color always --features esplora-blocking + cargo test -p lightning-transaction-sync --verbose --color always --features esplora-async + cargo test -p lightning-transaction-sync --verbose --color always --features esplora-async-https + cargo test -p lightning-transaction-sync --verbose --color always --features electrum +fi + echo -e "\n\nChecking and testing lightning-persister with features" cargo test -p lightning-persister --verbose --color always --features tokio cargo check -p lightning-persister --verbose --color always --features tokio diff --git a/ci/ci-tx-sync-tests.sh b/ci/ci-tx-sync-tests.sh deleted file mode 100755 index 0839e2ced3d..00000000000 --- a/ci/ci-tx-sync-tests.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -set -eox pipefail - -RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }') - -pushd lightning-transaction-sync - -# Some crates require pinning to meet our MSRV even for our downstream users, -# which we do here. -# Further crates which appear only as dev-dependencies are pinned further down. -function PIN_RELEASE_DEPS { - return 0 # Don't fail the script if our rustc is higher than the last check -} - -PIN_RELEASE_DEPS # pin the release dependencies - -# Starting with version 1.2.0, the `idna_adapter` crate has an MSRV of rustc 1.81.0. -[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --verbose - -export RUST_BACKTRACE=1 - -echo -e "\n\nChecking Transaction Sync Clients with features." -cargo check --verbose --color always --features esplora-blocking -cargo check --verbose --color always --features esplora-async -cargo check --verbose --color always --features esplora-async-https -cargo check --verbose --color always --features electrum - -if [ -z "$CI_ENV" ] && [[ -z "$BITCOIND_EXE" || -z "$ELECTRS_EXE" ]]; then - echo -e "\n\nSkipping testing Transaction Sync Clients due to BITCOIND_EXE or ELECTRS_EXE being unset." - cargo check --tests -else - echo -e "\n\nTesting Transaction Sync Clients with features." - cargo test --verbose --color always --features esplora-blocking - cargo test --verbose --color always --features esplora-async - cargo test --verbose --color always --features esplora-async-https - cargo test --verbose --color always --features electrum -fi - -popd From 210528475e876b96b59d2844727b09c28f427e4b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 13:41:47 +0000 Subject: [PATCH 02/42] Trivially replace `Box::pin` with `pin!` in a few places Now that our MSRV is above 1.68 we can use the `pin!` macro to avoid having to `Box` various futures, avoiding some allocations, especially in `lightning-net-tokio`, which happens in a tight loop. --- lightning-liquidity/src/lsps2/service.rs | 17 ++++++++--------- lightning-liquidity/src/manager.rs | 8 ++++---- lightning-net-tokio/src/lib.rs | 13 ++++++------- lightning/src/events/bump_transaction/sync.rs | 3 ++- lightning/src/util/persist.rs | 5 ++--- lightning/src/util/sweep.rs | 5 +++-- 6 files changed, 25 insertions(+), 26 deletions(-) diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index a6736e63eef..5c4bd63bc48 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -9,7 +9,6 @@ //! Contains the main bLIP-52 / LSPS2 server-side object, [`LSPS2ServiceHandler`]. -use alloc::boxed::Box; use alloc::string::{String, ToString}; use alloc::vec::Vec; use lightning::util::persist::KVStore; @@ -17,6 +16,7 @@ use lightning::util::persist::KVStore; use core::cmp::Ordering as CmpOrdering; use core::future::Future as StdFuture; use core::ops::Deref; +use core::pin::pin; use core::sync::atomic::{AtomicUsize, Ordering}; use core::task; @@ -2173,7 +2173,7 @@ where &self, counterparty_node_id: &PublicKey, request_id: LSPSRequestId, intercept_scid: u64, cltv_expiry_delta: u32, client_trusts_lsp: bool, user_channel_id: u128, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.invoice_parameters_generated( + let mut fut = pin!(self.inner.invoice_parameters_generated( counterparty_node_id, request_id, intercept_scid, @@ -2202,7 +2202,7 @@ where &self, intercept_scid: u64, intercept_id: InterceptId, expected_outbound_amount_msat: u64, payment_hash: PaymentHash, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.htlc_intercepted( + let mut fut = pin!(self.inner.htlc_intercepted( intercept_scid, intercept_id, expected_outbound_amount_msat, @@ -2228,7 +2228,7 @@ where pub fn htlc_handling_failed( &self, failure_type: HTLCHandlingFailureType, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.htlc_handling_failed(failure_type)); + let mut fut = pin!(self.inner.htlc_handling_failed(failure_type)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2249,7 +2249,7 @@ where pub fn payment_forwarded( &self, next_channel_id: ChannelId, skimmed_fee_msat: u64, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.payment_forwarded(next_channel_id, skimmed_fee_msat)); + let mut fut = pin!(self.inner.payment_forwarded(next_channel_id, skimmed_fee_msat)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2290,7 +2290,7 @@ where &self, counterparty_node_id: &PublicKey, user_channel_id: u128, ) -> Result<(), APIError> { let mut fut = - Box::pin(self.inner.channel_open_abandoned(counterparty_node_id, user_channel_id)); + pin!(self.inner.channel_open_abandoned(counterparty_node_id, user_channel_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2309,8 +2309,7 @@ where pub fn channel_open_failed( &self, counterparty_node_id: &PublicKey, user_channel_id: u128, ) -> Result<(), APIError> { - let mut fut = - Box::pin(self.inner.channel_open_failed(counterparty_node_id, user_channel_id)); + let mut fut = pin!(self.inner.channel_open_failed(counterparty_node_id, user_channel_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2332,7 +2331,7 @@ where &self, user_channel_id: u128, channel_id: &ChannelId, counterparty_node_id: &PublicKey, ) -> Result<(), APIError> { let mut fut = - Box::pin(self.inner.channel_ready(user_channel_id, channel_id, counterparty_node_id)); + pin!(self.inner.channel_ready(user_channel_id, channel_id, counterparty_node_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); diff --git a/lightning-liquidity/src/manager.rs b/lightning-liquidity/src/manager.rs index f0143fc624f..d3822715b8d 100644 --- a/lightning-liquidity/src/manager.rs +++ b/lightning-liquidity/src/manager.rs @@ -7,7 +7,6 @@ // You may not use this file except in accordance with one or both of these // licenses. -use alloc::boxed::Box; use alloc::string::ToString; use alloc::vec::Vec; @@ -61,6 +60,7 @@ use bitcoin::secp256k1::PublicKey; use core::future::Future as StdFuture; use core::ops::Deref; +use core::pin::pin; use core::task; const LSPS_FEATURE_BIT: usize = 729; @@ -1106,7 +1106,7 @@ where ) -> Result { let kv_store = KVStoreSyncWrapper(kv_store_sync); - let mut fut = Box::pin(LiquidityManager::new( + let mut fut = pin!(LiquidityManager::new( entropy_source, node_signer, channel_manager, @@ -1159,7 +1159,7 @@ where client_config: Option, time_provider: TP, ) -> Result { let kv_store = KVStoreSyncWrapper(kv_store_sync); - let mut fut = Box::pin(LiquidityManager::new_with_custom_time_provider( + let mut fut = pin!(LiquidityManager::new_with_custom_time_provider( entropy_source, node_signer, channel_manager, @@ -1289,7 +1289,7 @@ where pub fn persist(&self) -> Result<(), lightning::io::Error> { let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match Box::pin(self.inner.persist()).as_mut().poll(&mut ctx) { + match pin!(self.inner.persist()).as_mut().poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { // In a sync context, we can't wait for the future to complete. diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 068f77a84bb..c6fbd3dc3c5 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -43,7 +43,7 @@ use std::hash::Hash; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; use std::ops::Deref; -use std::pin::Pin; +use std::pin::{pin, Pin}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::task::{self, Poll}; @@ -205,18 +205,17 @@ impl Connection { } us_lock.read_paused }; - // TODO: Drop the Box'ing of the futures once Rust has pin-on-stack support. let select_result = if read_paused { TwoSelector { - a: Box::pin(write_avail_receiver.recv()), - b: Box::pin(read_wake_receiver.recv()), + a: pin!(write_avail_receiver.recv()), + b: pin!(read_wake_receiver.recv()), } .await } else { ThreeSelector { - a: Box::pin(write_avail_receiver.recv()), - b: Box::pin(read_wake_receiver.recv()), - c: Box::pin(reader.readable()), + a: pin!(write_avail_receiver.recv()), + b: pin!(read_wake_receiver.recv()), + c: pin!(reader.readable()), } .await }; diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index 653710a3358..cbc686ed8fe 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -11,6 +11,7 @@ use core::future::Future; use core::ops::Deref; +use core::pin::pin; use core::task; use crate::chain::chaininterface::BroadcasterInterface; @@ -289,7 +290,7 @@ where /// Handles all variants of [`BumpTransactionEvent`]. pub fn handle_event(&self, event: &BumpTransactionEvent) { - let mut fut = Box::pin(self.bump_transaction_event_handler.handle_event(event)); + let mut fut = pin!(self.bump_transaction_event_handler.handle_event(event)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); match fut.as_mut().poll(&mut ctx) { diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index d00e29e686a..3ad9b4270c5 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -19,7 +19,7 @@ use bitcoin::{BlockHash, Txid}; use core::future::Future; use core::mem; use core::ops::Deref; -use core::pin::Pin; +use core::pin::{pin, Pin}; use core::str::FromStr; use core::task; @@ -490,8 +490,7 @@ impl FutureSpawner for PanicingSpawner { fn poll_sync_future(future: F) -> F::Output { let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - // TODO A future MSRV bump to 1.68 should allow for the pin macro - match Pin::new(&mut Box::pin(future)).poll(&mut ctx) { + match pin!(future).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { // In a sync context, we can't wait for the future to complete. diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 5a1ffad3e04..a3ded6f32b8 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -35,6 +35,7 @@ use bitcoin::{BlockHash, ScriptBuf, Transaction, Txid}; use core::future::Future; use core::ops::Deref; +use core::pin::pin; use core::sync::atomic::{AtomicBool, Ordering}; use core::task; @@ -970,7 +971,7 @@ where &self, output_descriptors: Vec, channel_id: Option, exclude_static_outputs: bool, delay_until_height: Option, ) -> Result<(), ()> { - let mut fut = Box::pin(self.sweeper.track_spendable_outputs( + let mut fut = pin!(self.sweeper.track_spendable_outputs( output_descriptors, channel_id, exclude_static_outputs, @@ -1005,7 +1006,7 @@ where /// /// Wraps [`OutputSweeper::regenerate_and_broadcast_spend_if_necessary`]. pub fn regenerate_and_broadcast_spend_if_necessary(&self) -> Result<(), ()> { - let mut fut = Box::pin(self.sweeper.regenerate_and_broadcast_spend_if_necessary()); + let mut fut = pin!(self.sweeper.regenerate_and_broadcast_spend_if_necessary()); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); match fut.as_mut().poll(&mut ctx) { From 0b4e1b5c58a864a2ca7dda7f3b0737df2ec2b8fa Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 31 Oct 2025 14:37:38 +0000 Subject: [PATCH 03/42] Drop required `Box`ing of `KVStore` `Future`s Now that our MSRV is 1.75, we can return `impl Trait` from trait methods. Here we use this to clean up `KVStore` methods, dropping the `Pin>` we had to use to have trait methods return a concrete type. Sadly, there's two places where we can't drop a `Box::pin` until we switch to edition 2024. --- ci/check-lint.sh | 3 +- lightning-background-processor/src/lib.rs | 43 +++++++- lightning-persister/src/fs_store.rs | 113 ++++++++++------------ lightning/src/util/persist.rs | 71 ++++++++------ lightning/src/util/sweep.rs | 27 +++++- lightning/src/util/test_utils.rs | 18 ++-- 6 files changed, 165 insertions(+), 110 deletions(-) diff --git a/ci/check-lint.sh b/ci/check-lint.sh index 39c10692310..c1f1b08a1e1 100755 --- a/ci/check-lint.sh +++ b/ci/check-lint.sh @@ -107,7 +107,8 @@ CLIPPY() { -A clippy::useless_conversion \ -A clippy::manual_repeat_n `# to be removed once we hit MSRV 1.86` \ -A clippy::manual_is_multiple_of `# to be removed once we hit MSRV 1.87` \ - -A clippy::uninlined-format-args + -A clippy::uninlined-format-args \ + -A clippy::manual-async-fn # Not really sure why this is even a warning when there's a Send bound } CLIPPY diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 19333c5823a..bc0d42ac191 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -41,6 +41,8 @@ use lightning::events::ReplayEvent; use lightning::events::{Event, PathFailure}; use lightning::util::ser::Writeable; +#[cfg(not(c_bindings))] +use lightning::io::Error; use lightning::ln::channelmanager::AChannelManager; use lightning::ln::msgs::OnionMessageHandler; use lightning::ln::peer_handler::APeerManager; @@ -51,6 +53,8 @@ use lightning::routing::utxo::UtxoLookup; use lightning::sign::{ ChangeDestinationSource, ChangeDestinationSourceSync, EntropySource, OutputSpender, }; +#[cfg(not(c_bindings))] +use lightning::util::async_poll::MaybeSend; use lightning::util::logger::Logger; use lightning::util::persist::{ KVStore, KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_PERSISTENCE_KEY, @@ -83,7 +87,11 @@ use std::time::Instant; #[cfg(not(feature = "std"))] use alloc::boxed::Box; #[cfg(all(not(c_bindings), not(feature = "std")))] +use alloc::string::String; +#[cfg(all(not(c_bindings), not(feature = "std")))] use alloc::sync::Arc; +#[cfg(all(not(c_bindings), not(feature = "std")))] +use alloc::vec::Vec; /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its @@ -416,6 +424,37 @@ pub const NO_ONION_MESSENGER: Option< >, > = None; +#[cfg(not(c_bindings))] +/// A panicking implementation of [`KVStore`] that is used in [`NO_LIQUIDITY_MANAGER`]. +pub struct DummyKVStore; + +#[cfg(not(c_bindings))] +impl KVStore for DummyKVStore { + fn read( + &self, _: &str, _: &str, _: &str, + ) -> impl core::future::Future, Error>> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn write( + &self, _: &str, _: &str, _: &str, _: Vec, + ) -> impl core::future::Future> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn remove( + &self, _: &str, _: &str, _: &str, _: bool, + ) -> impl core::future::Future> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn list( + &self, _: &str, _: &str, + ) -> impl core::future::Future, Error>> + MaybeSend + 'static { + async { unimplemented!() } + } +} + /// When initializing a background processor without a liquidity manager, this can be used to avoid /// specifying a concrete `LiquidityManager` type. #[cfg(not(c_bindings))] @@ -430,8 +469,8 @@ pub const NO_LIQUIDITY_MANAGER: Option< CM = &DynChannelManager, Filter = dyn chain::Filter + Send + Sync, C = &(dyn chain::Filter + Send + Sync), - KVStore = dyn lightning::util::persist::KVStore + Send + Sync, - K = &(dyn lightning::util::persist::KVStore + Send + Sync), + KVStore = DummyKVStore, + K = &DummyKVStore, TimeProvider = dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync, TP = &(dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync), BroadcasterInterface = dyn lightning::chain::chaininterface::BroadcasterInterface diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs index 9b15398d4d1..b2d327f6bc1 100644 --- a/lightning-persister/src/fs_store.rs +++ b/lightning-persister/src/fs_store.rs @@ -14,8 +14,6 @@ use std::sync::{Arc, Mutex, RwLock}; #[cfg(feature = "tokio")] use core::future::Future; #[cfg(feature = "tokio")] -use core::pin::Pin; -#[cfg(feature = "tokio")] use lightning::util::persist::KVStore; #[cfg(target_os = "windows")] @@ -464,93 +462,85 @@ impl FilesystemStoreInner { impl KVStore for FilesystemStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> Pin, lightning::io::Error>> + 'static + Send>> { + ) -> impl Future, lightning::io::Error>> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( + let path = this.get_checked_dest_file_path( primary_namespace, secondary_namespace, Some(key), "read", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; + ); - Box::pin(async move { + async move { + let path = match path { + Ok(path) => path, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || this.read(path)).await.unwrap_or_else(|e| { Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e)) }) - }) + } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> Pin> + 'static + Send>> { + ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - Some(key), - "write", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; - - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); - Box::pin(async move { + let path = this + .get_checked_dest_file_path(primary_namespace, secondary_namespace, Some(key), "write") + .map(|path| (self.get_new_version_and_lock_ref(path.clone()), path)); + + async move { + let ((inner_lock_ref, version), path) = match path { + Ok(res) => res, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || { this.write_version(inner_lock_ref, path, buf, version) }) .await .unwrap_or_else(|e| Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e))) - }) + } } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> Pin> + 'static + Send>> { + ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - Some(key), - "remove", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; - - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); - Box::pin(async move { + let path = this + .get_checked_dest_file_path(primary_namespace, secondary_namespace, Some(key), "remove") + .map(|path| (self.get_new_version_and_lock_ref(path.clone()), path)); + + async move { + let ((inner_lock_ref, version), path) = match path { + Ok(res) => res, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || { this.remove_version(inner_lock_ref, path, lazy, version) }) .await .unwrap_or_else(|e| Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e))) - }) + } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> Pin, lightning::io::Error>> + 'static + Send>> { + ) -> impl Future, lightning::io::Error>> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - None, - "list", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; + let path = + this.get_checked_dest_file_path(primary_namespace, secondary_namespace, None, "list"); - Box::pin(async move { + async move { + let path = match path { + Ok(path) => path, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || this.list(path)).await.unwrap_or_else(|e| { Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e)) }) - }) + } } } @@ -758,24 +748,24 @@ mod tests { let fs_store = Arc::new(FilesystemStore::new(temp_path)); assert_eq!(fs_store.state_size(), 0); - let async_fs_store: Arc = fs_store.clone(); + let async_fs_store = Arc::clone(&fs_store); let data1 = vec![42u8; 32]; let data2 = vec![43u8; 32]; - let primary_namespace = "testspace"; - let secondary_namespace = "testsubspace"; + let primary = "testspace"; + let secondary = "testsubspace"; let key = "testkey"; // Test writing the same key twice with different data. Execute the asynchronous part out of order to ensure // that eventual consistency works. - let fut1 = async_fs_store.write(primary_namespace, secondary_namespace, key, data1); + let fut1 = KVStore::write(&*async_fs_store, primary, secondary, key, data1); assert_eq!(fs_store.state_size(), 1); - let fut2 = async_fs_store.remove(primary_namespace, secondary_namespace, key, false); + let fut2 = KVStore::remove(&*async_fs_store, primary, secondary, key, false); assert_eq!(fs_store.state_size(), 1); - let fut3 = async_fs_store.write(primary_namespace, secondary_namespace, key, data2.clone()); + let fut3 = KVStore::write(&*async_fs_store, primary, secondary, key, data2.clone()); assert_eq!(fs_store.state_size(), 1); fut3.await.unwrap(); @@ -788,21 +778,18 @@ mod tests { assert_eq!(fs_store.state_size(), 0); // Test list. - let listed_keys = - async_fs_store.list(primary_namespace, secondary_namespace).await.unwrap(); + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); assert_eq!(listed_keys.len(), 1); assert_eq!(listed_keys[0], key); // Test read. We expect to read data2, as the write call was initiated later. - let read_data = - async_fs_store.read(primary_namespace, secondary_namespace, key).await.unwrap(); + let read_data = KVStore::read(&*async_fs_store, primary, secondary, key).await.unwrap(); assert_eq!(data2, &*read_data); // Test remove. - async_fs_store.remove(primary_namespace, secondary_namespace, key, false).await.unwrap(); + KVStore::remove(&*async_fs_store, primary, secondary, key, false).await.unwrap(); - let listed_keys = - async_fs_store.list(primary_namespace, secondary_namespace).await.unwrap(); + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); assert_eq!(listed_keys.len(), 0); } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 3ad9b4270c5..7feb781a57a 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -34,7 +34,7 @@ use crate::chain::transaction::OutPoint; use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::sync::Mutex; -use crate::util::async_poll::{dummy_waker, AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::{dummy_waker, MaybeSend, MaybeSync}; use crate::util::logger::Logger; use crate::util::native_async::FutureSpawner; use crate::util::ser::{Readable, ReadableArgs, Writeable}; @@ -216,34 +216,34 @@ where { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.0.read(primary_namespace, secondary_namespace, key); - Box::pin(async move { res }) + async move { res } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.0.write(primary_namespace, secondary_namespace, key, buf); - Box::pin(async move { res }) + async move { res } } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.0.remove(primary_namespace, secondary_namespace, key, lazy); - Box::pin(async move { res }) + async move { res } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.0.list(primary_namespace, secondary_namespace); - Box::pin(async move { res }) + async move { res } } } @@ -283,16 +283,18 @@ pub trait KVStore { /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error>; + ) -> impl Future, io::Error>> + 'static + MaybeSend; /// Persists the given data under the given `key`. /// - /// The order of multiple writes to the same key needs to be retained while persisting - /// asynchronously. In other words, if two writes to the same key occur, the state (as seen by - /// [`Self::read`]) must either see the first write then the second, or only ever the second, - /// no matter when the futures complete (and must always contain the second write once the - /// second future completes). The state should never contain the first write after the second - /// write's future completes, nor should it contain the second write, then contain the first - /// write at any point thereafter (even if the second write's future hasn't yet completed). + /// Note that this is *not* an `async fn`. Rather, the order of multiple writes to the same key + /// (as defined by the order of the synchronous function calls) needs to be retained while + /// persisting asynchronously. In other words, if two writes to the same key occur, the state + /// (as seen by [`Self::read`]) must either see the first write then the second, or only ever + /// the second, no matter when the futures complete (and must always contain the second write + /// once the second future completes). The state should never contain the first write after the + /// second write's future completes, nor should it contain the second write, then contain the + /// first write at any point thereafter (even if the second write's future hasn't yet + /// completed). /// /// One way to ensure this requirement is met is by assigning a version number to each write /// before returning the future, and then during asynchronous execution, ensuring that the @@ -303,7 +305,7 @@ pub trait KVStore { /// Will create the given `primary_namespace` and `secondary_namespace` if not already present in the store. fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error>; + ) -> impl Future> + 'static + MaybeSend; /// Removes any data that had previously been persisted under the given `key`. /// /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily @@ -311,6 +313,10 @@ pub trait KVStore { /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to /// [`KVStoreSync::list`] might include the removed key until the changes are actually persisted. /// + /// Note that similar to [`Self::write`] this is *not* an `async fn`, but rather a sync fn + /// which defines the order of writes to a given key, but which may complete its operation + /// asynchronously. + /// /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could /// potentially get lost on crash after the method returns. Therefore, this flag should only be @@ -321,12 +327,13 @@ pub trait KVStore { /// to the same key which occur before a removal completes must cancel/overwrite the pending /// removal. /// + /// /// Returns successfully if no data will be stored for the given `primary_namespace`, /// `secondary_namespace`, and `key`, independently of whether it was present before its /// invokation or not. fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error>; + ) -> impl Future> + 'static + MaybeSend; /// Returns a list of keys that are stored under the given `secondary_namespace` in /// `primary_namespace`. /// @@ -334,7 +341,7 @@ pub trait KVStore { /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown. fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error>; + ) -> impl Future, io::Error>> + 'static + MaybeSend; } /// Provides additional interface methods that are required for [`KVStore`]-to-[`KVStore`] @@ -1005,6 +1012,9 @@ where } } +trait MaybeSendableFuture: Future> + MaybeSend {} +impl> + MaybeSend> MaybeSendableFuture for F {} + impl MonitorUpdatingPersisterAsyncInner where @@ -1178,9 +1188,9 @@ where Ok(()) } - fn persist_new_channel( - &self, monitor_name: MonitorName, monitor: &ChannelMonitor, - ) -> impl Future> { + fn persist_new_channel<'a, ChannelSigner: EcdsaChannelSigner>( + &'a self, monitor_name: MonitorName, monitor: &'a ChannelMonitor, + ) -> Pin> + 'static>> { // Determine the proper key for this monitor let monitor_key = monitor_name.to_string(); // Serialize and write the new monitor @@ -1199,7 +1209,10 @@ where // completion of the write. This ensures monitor persistence ordering is preserved. let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; - self.kv_store.write(primary, secondary, monitor_key.as_str(), monitor_bytes) + // There's no real reason why this needs to be boxed, but dropping it rams into the "hidden + // type for impl... captures lifetime that does not appear in bounds" issue. This can + // trivially be dropped once we upgrade to edition 2024/MSRV 1.85. + Box::pin(self.kv_store.write(primary, secondary, monitor_key.as_str(), monitor_bytes)) } fn update_persisted_channel<'a, ChannelSigner: EcdsaChannelSigner + 'a>( @@ -1225,12 +1238,10 @@ where // write method, allowing it to do its queueing immediately, and then return a // future for the completion of the write. This ensures monitor persistence // ordering is preserved. - res_a = Some(self.kv_store.write( - primary, - &monitor_key, - update_name.as_str(), - update.encode(), - )); + let encoded = update.encode(); + res_a = Some(async move { + self.kv_store.write(primary, &monitor_key, update_name.as_str(), encoded).await + }); } else { // We could write this update, but it meets criteria of our design that calls for a full monitor write. // Note that this is NOT an async function, but rather calls the *sync* KVStore diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index a3ded6f32b8..bf048efdae1 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -35,11 +35,11 @@ use bitcoin::{BlockHash, ScriptBuf, Transaction, Txid}; use core::future::Future; use core::ops::Deref; -use core::pin::pin; +use core::pin::{pin, Pin}; use core::sync::atomic::{AtomicBool, Ordering}; use core::task; -use super::async_poll::{dummy_waker, AsyncResult}; +use super::async_poll::dummy_waker; /// The number of blocks we wait before we prune the tracked spendable outputs. pub const PRUNE_DELAY_BLOCKS: u32 = ARCHIVAL_DELAY_BLOCKS + ANTI_REORG_DELAY; @@ -610,15 +610,32 @@ where sweeper_state.dirty = true; } - fn persist_state<'a>(&self, sweeper_state: &SweeperState) -> AsyncResult<'a, (), io::Error> { + #[cfg(feature = "std")] + fn persist_state<'a>( + &'a self, sweeper_state: &SweeperState, + ) -> Pin> + Send + 'static>> { let encoded = sweeper_state.encode(); - self.kv_store.write( + Box::pin(self.kv_store.write( OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_KEY, encoded, - ) + )) + } + + #[cfg(not(feature = "std"))] + fn persist_state<'a>( + &'a self, sweeper_state: &SweeperState, + ) -> Pin> + 'static>> { + let encoded = sweeper_state.encode(); + + Box::pin(self.kv_store.write( + OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_KEY, + encoded, + )) } /// Updates the sweeper state by executing the given callback. Persists the state afterwards if it is marked dirty, diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index ad8ea224205..b4db17bee20 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -50,7 +50,7 @@ use crate::sign::{self, ReceiveAuthKey}; use crate::sign::{ChannelSigner, PeerStorageKey}; use crate::sync::RwLock; use crate::types::features::{ChannelFeatures, InitFeatures, NodeFeatures}; -use crate::util::async_poll::AsyncResult; +use crate::util::async_poll::MaybeSend; use crate::util::config::UserConfig; use crate::util::dyn_signer::{ DynKeysInterface, DynKeysInterfaceTrait, DynPhantomKeysInterface, DynSigner, @@ -1012,13 +1012,13 @@ impl TestStore { impl KVStore for TestStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.read_internal(&primary_namespace, &secondary_namespace, &key); - Box::pin(async move { res }) + async move { res } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let path = format!("{primary_namespace}/{secondary_namespace}/{key}"); let future = Arc::new(Mutex::new((None, None))); @@ -1027,19 +1027,19 @@ impl KVStore for TestStore { let new_id = pending_writes.last().map(|(id, _, _)| id + 1).unwrap_or(0); pending_writes.push((new_id, Arc::clone(&future), buf)); - Box::pin(OneShotChannel(future)) + OneShotChannel(future) } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.remove_internal(&primary_namespace, &secondary_namespace, &key, lazy); - Box::pin(async move { res }) + async move { res } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.list_internal(primary_namespace, secondary_namespace); - Box::pin(async move { res }) + async move { res } } } From a435228ce6a9a668a0cf065012d42e7abd6f83b8 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 18:11:18 +0000 Subject: [PATCH 04/42] Drop required `Box`ing of `lightning-block-sync` `Future`s Now that our MSRV is 1.75, we can return `impl Trait` from trait methods. Here we use this to clean up `lightning-block-sync` trait methods, dropping the `Pin>` we had to use to have trait methods return a concrete type. --- lightning-block-sync/src/gossip.rs | 49 +++++++++++++++----------- lightning-block-sync/src/lib.rs | 18 ++++------ lightning-block-sync/src/poll.rs | 30 +++++++--------- lightning-block-sync/src/rest.rs | 37 ++++++++++--------- lightning-block-sync/src/rpc.rs | 35 ++++++++++-------- lightning-block-sync/src/test_utils.rs | 24 +++++++------ 6 files changed, 101 insertions(+), 92 deletions(-) diff --git a/lightning-block-sync/src/gossip.rs b/lightning-block-sync/src/gossip.rs index 0fe221b9231..596098350c7 100644 --- a/lightning-block-sync/src/gossip.rs +++ b/lightning-block-sync/src/gossip.rs @@ -2,7 +2,7 @@ //! current UTXO set. This module defines an implementation of the LDK API required to do so //! against a [`BlockSource`] which implements a few additional methods for accessing the UTXO set. -use crate::{AsyncBlockSourceResult, BlockData, BlockSource, BlockSourceError}; +use crate::{BlockData, BlockSource, BlockSourceError, BlockSourceResult}; use bitcoin::block::Block; use bitcoin::constants::ChainHash; @@ -18,7 +18,7 @@ use lightning::util::native_async::FutureSpawner; use std::collections::VecDeque; use std::future::Future; use std::ops::Deref; -use std::pin::Pin; +use std::pin::{pin, Pin}; use std::sync::{Arc, Mutex}; use std::task::Poll; @@ -35,11 +35,13 @@ pub trait UtxoSource: BlockSource + 'static { /// for gossip validation. fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash>; + ) -> impl Future> + Send + 'a; /// Returns true if the given output has *not* been spent, i.e. is a member of the current UTXO /// set. - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool>; + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a; } #[cfg(feature = "tokio")] @@ -55,34 +57,37 @@ impl FutureSpawner for TokioSpawner { /// A trivial future which joins two other futures and polls them at the same time, returning only /// once both complete. pub(crate) struct Joiner< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, + 'a, + A: Future), BlockSourceError>>, + B: Future>, > { - pub a: A, - pub b: B, + pub a: Pin<&'a mut A>, + pub b: Pin<&'a mut B>, a_res: Option<(BlockHash, Option)>, b_res: Option, } impl< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, - > Joiner + 'a, + A: Future), BlockSourceError>>, + B: Future>, + > Joiner<'a, A, B> { - fn new(a: A, b: B) -> Self { + fn new(a: Pin<&'a mut A>, b: Pin<&'a mut B>) -> Self { Self { a, b, a_res: None, b_res: None } } } impl< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, - > Future for Joiner + 'a, + A: Future), BlockSourceError>>, + B: Future>, + > Future for Joiner<'a, A, B> { type Output = Result<((BlockHash, Option), BlockHash), BlockSourceError>; fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll { if self.a_res.is_none() { - match Pin::new(&mut self.a).poll(ctx) { + match self.a.as_mut().poll(ctx) { Poll::Ready(res) => { if let Ok(ok) = res { self.a_res = Some(ok); @@ -94,7 +99,7 @@ impl< } } if self.b_res.is_none() { - match Pin::new(&mut self.b).poll(ctx) { + match self.b.as_mut().poll(ctx) { Poll::Ready(res) => { if let Ok(ok) = res { self.b_res = Some(ok); @@ -200,10 +205,12 @@ where } } - let ((_, tip_height_opt), block_hash) = - Joiner::new(source.get_best_block(), source.get_block_hash_by_height(block_height)) - .await - .map_err(|_| UtxoLookupError::UnknownTx)?; + let ((_, tip_height_opt), block_hash) = Joiner::new( + pin!(source.get_best_block()), + pin!(source.get_block_hash_by_height(block_height)), + ) + .await + .map_err(|_| UtxoLookupError::UnknownTx)?; if let Some(tip_height) = tip_height_opt { // If the block doesn't yet have five confirmations, error out. // diff --git a/lightning-block-sync/src/lib.rs b/lightning-block-sync/src/lib.rs index 8656ba6ec6b..02593047658 100644 --- a/lightning-block-sync/src/lib.rs +++ b/lightning-block-sync/src/lib.rs @@ -53,7 +53,6 @@ use lightning::chain::{BestBlock, Listen}; use std::future::Future; use std::ops::Deref; -use std::pin::Pin; /// Abstract type for retrieving block headers and data. pub trait BlockSource: Sync + Send { @@ -65,12 +64,13 @@ pub trait BlockSource: Sync + Send { /// when `height_hint` is `None`. fn get_header<'a>( &'a self, header_hash: &'a BlockHash, height_hint: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData>; + ) -> impl Future> + Send + 'a; /// Returns the block for a given hash. A headers-only block source should return a `Transient` /// error. - fn get_block<'a>(&'a self, header_hash: &'a BlockHash) - -> AsyncBlockSourceResult<'a, BlockData>; + fn get_block<'a>( + &'a self, header_hash: &'a BlockHash, + ) -> impl Future> + Send + 'a; /// Returns the hash of the best block and, optionally, its height. /// @@ -78,18 +78,14 @@ pub trait BlockSource: Sync + Send { /// to allow for a more efficient lookup. /// /// [`get_header`]: Self::get_header - fn get_best_block(&self) -> AsyncBlockSourceResult<'_, (BlockHash, Option)>; + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a; } /// Result type for `BlockSource` requests. pub type BlockSourceResult = Result; -// TODO: Replace with BlockSourceResult once `async` trait functions are supported. For details, -// see: https://areweasyncyet.rs. -/// Result type for asynchronous `BlockSource` requests. -pub type AsyncBlockSourceResult<'a, T> = - Pin> + 'a + Send>>; - /// Error type for `BlockSource` requests. /// /// Transient errors may be resolved when re-polling, but no attempt will be made to re-poll on diff --git a/lightning-block-sync/src/poll.rs b/lightning-block-sync/src/poll.rs index 843cc961899..13e0403c3b6 100644 --- a/lightning-block-sync/src/poll.rs +++ b/lightning-block-sync/src/poll.rs @@ -1,14 +1,12 @@ //! Adapters that make one or more [`BlockSource`]s simpler to poll for new chain tip transitions. -use crate::{ - AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, BlockSourceError, - BlockSourceResult, -}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::network::Network; use lightning::chain::BestBlock; +use std::future::Future; use std::ops::Deref; /// The `Poll` trait defines behavior for polling block sources for a chain tip and retrieving @@ -22,17 +20,17 @@ pub trait Poll { /// Returns a chain tip in terms of its relationship to the provided chain tip. fn poll_chain_tip<'a>( &'a self, best_known_chain_tip: ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ChainTip>; + ) -> impl Future> + Send + 'a; /// Returns the header that preceded the given header in the chain. fn look_up_previous_header<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlockHeader>; + ) -> impl Future> + Send + 'a; /// Returns the block associated with the given header. fn fetch_block<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlock>; + ) -> impl Future> + Send + 'a; } /// A chain tip relative to another chain tip in terms of block hash and chainwork. @@ -217,8 +215,8 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll { fn poll_chain_tip<'a>( &'a self, best_known_chain_tip: ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ChainTip> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let (block_hash, height) = self.block_source.get_best_block().await?; if block_hash == best_known_chain_tip.header.block_hash() { return Ok(ChainTip::Common); @@ -231,13 +229,13 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll } else { Ok(ChainTip::Worse(chain_tip)) } - }) + } } fn look_up_previous_header<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlockHeader> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { if header.height == 0 { return Err(BlockSourceError::persistent("genesis block reached")); } @@ -252,15 +250,13 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll header.check_builds_on(&previous_header, self.network)?; Ok(previous_header) - }) + } } fn fetch_block<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlock> { - Box::pin(async move { - self.block_source.get_block(&header.block_hash).await?.validate(header.block_hash) - }) + ) -> impl Future> + Send + 'a { + async move { self.block_source.get_block(&header.block_hash).await?.validate(header.block_hash) } } } diff --git a/lightning-block-sync/src/rest.rs b/lightning-block-sync/src/rest.rs index 1f79ab4a0b0..619981bb4d0 100644 --- a/lightning-block-sync/src/rest.rs +++ b/lightning-block-sync/src/rest.rs @@ -4,13 +4,14 @@ use crate::convert::GetUtxosResponse; use crate::gossip::UtxoSource; use crate::http::{BinaryResponse, HttpClient, HttpEndpoint, JsonResponse}; -use crate::{AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::OutPoint; use std::convert::TryFrom; use std::convert::TryInto; +use std::future::Future; use std::sync::Mutex; /// A simple REST client for requesting resources using HTTP `GET`. @@ -49,49 +50,51 @@ impl RestClient { impl BlockSource for RestClient { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("headers/1/{}.json", header_hash.to_string()); Ok(self.request_resource::(&resource_path).await?) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("block/{}.bin", header_hash.to_string()); Ok(BlockData::FullBlock( self.request_resource::(&resource_path).await?, )) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin( - async move { Ok(self.request_resource::("chaininfo.json").await?) }, - ) + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { Ok(self.request_resource::("chaininfo.json").await?) } } } impl UtxoSource for RestClient { fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("blockhashbyheight/{}.bin", block_height); Ok(self.request_resource::(&resource_path).await?) - }) + } } - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool> { - Box::pin(async move { + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("getutxos/{}-{}.json", outpoint.txid.to_string(), outpoint.vout); let utxo_result = self.request_resource::(&resource_path).await?; Ok(utxo_result.hit_bitmap_nonempty) - }) + } } } diff --git a/lightning-block-sync/src/rpc.rs b/lightning-block-sync/src/rpc.rs index 3df50a2267b..d851ba2ccf0 100644 --- a/lightning-block-sync/src/rpc.rs +++ b/lightning-block-sync/src/rpc.rs @@ -3,7 +3,7 @@ use crate::gossip::UtxoSource; use crate::http::{HttpClient, HttpEndpoint, HttpError, JsonResponse}; -use crate::{AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::OutPoint; @@ -16,6 +16,7 @@ use std::convert::TryFrom; use std::convert::TryInto; use std::error::Error; use std::fmt; +use std::future::Future; use std::sync::atomic::{AtomicUsize, Ordering}; /// An error returned by the RPC server. @@ -135,47 +136,51 @@ impl RpcClient { impl BlockSource for RpcClient { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let header_hash = serde_json::json!(header_hash.to_string()); Ok(self.call_method("getblockheader", &[header_hash]).await?) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let header_hash = serde_json::json!(header_hash.to_string()); let verbosity = serde_json::json!(0); Ok(BlockData::FullBlock(self.call_method("getblock", &[header_hash, verbosity]).await?)) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin(async move { Ok(self.call_method("getblockchaininfo", &[]).await?) }) + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { Ok(self.call_method("getblockchaininfo", &[]).await?) } } } impl UtxoSource for RpcClient { fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let height_param = serde_json::json!(block_height); Ok(self.call_method("getblockhash", &[height_param]).await?) - }) + } } - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool> { - Box::pin(async move { + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a { + async move { let txid_param = serde_json::json!(outpoint.txid.to_string()); let vout_param = serde_json::json!(outpoint.vout); let include_mempool = serde_json::json!(false); let utxo_opt: serde_json::Value = self.call_method("gettxout", &[txid_param, vout_param, include_mempool]).await?; Ok(!utxo_opt.is_null()) - }) + } } } diff --git a/lightning-block-sync/src/test_utils.rs b/lightning-block-sync/src/test_utils.rs index d307c4506eb..40788e4d08c 100644 --- a/lightning-block-sync/src/test_utils.rs +++ b/lightning-block-sync/src/test_utils.rs @@ -1,7 +1,6 @@ use crate::poll::{Validate, ValidatedBlockHeader}; use crate::{ - AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, BlockSourceError, - UnboundedCache, + BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceResult, UnboundedCache, }; use bitcoin::block::{Block, Header, Version}; @@ -17,6 +16,7 @@ use lightning::chain::BestBlock; use std::cell::RefCell; use std::collections::VecDeque; +use std::future::Future; #[derive(Default)] pub struct Blockchain { @@ -141,8 +141,8 @@ impl Blockchain { impl BlockSource for Blockchain { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height_hint: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { if self.without_headers { return Err(BlockSourceError::persistent("header not found")); } @@ -158,13 +158,13 @@ impl BlockSource for Blockchain { } } Err(BlockSourceError::transient("header not found")) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { for (height, block) in self.blocks.iter().enumerate() { if block.header.block_hash() == *header_hash { if let Some(without_blocks) = &self.without_blocks { @@ -181,11 +181,13 @@ impl BlockSource for Blockchain { } } Err(BlockSourceError::transient("block not found")) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin(async move { + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { match self.blocks.last() { None => Err(BlockSourceError::transient("empty chain")), Some(block) => { @@ -193,7 +195,7 @@ impl BlockSource for Blockchain { Ok((block.block_hash(), Some(height))) }, } - }) + } } } From 3da5f583e503d742a185ebf7b0207b2f6cd6c0d6 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 18:24:51 +0000 Subject: [PATCH 05/42] Drop required `Box`ing of `lightning` trait `Future`s Now that our MSRV is 1.75, we can return `impl Trait` from trait methods. Here we use this to clean up `lightning` crate trait methods, dropping the `Pin>`/`AsyncResult` we had to use to have trait methods return a concrete type. --- lightning/src/events/bump_transaction/mod.rs | 31 ++++++++++----- lightning/src/events/bump_transaction/sync.rs | 38 +++++++++++-------- lightning/src/sign/mod.rs | 14 +++++-- lightning/src/util/async_poll.rs | 12 ------ 4 files changed, 54 insertions(+), 41 deletions(-) diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 3d9beb82c07..e141d9b8abc 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -14,6 +14,7 @@ pub mod sync; use alloc::collections::BTreeMap; +use core::future::Future; use core::ops::Deref; use crate::chain::chaininterface::{ @@ -36,7 +37,7 @@ use crate::sign::{ ChannelDerivationParameters, HTLCDescriptor, SignerProvider, P2WPKH_WITNESS_WEIGHT, }; use crate::sync::Mutex; -use crate::util::async_poll::{AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::{MaybeSend, MaybeSync}; use crate::util::logger::Logger; use bitcoin::amount::Amount; @@ -394,13 +395,15 @@ pub trait CoinSelectionSource { fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()>; + ) -> impl Future> + MaybeSend + 'a; /// Signs and provides the full witness for all inputs within the transaction known to the /// trait (i.e., any provided via [`CoinSelectionSource::select_confirmed_utxos`]). /// /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the /// unsigned transaction and then sign it with your wallet. - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a; } /// An alternative to [`CoinSelectionSource`] that can be implemented and used along [`Wallet`] to @@ -412,17 +415,23 @@ pub trait CoinSelectionSource { // Note that updates to documentation on this trait should be copied to the synchronous version. pub trait WalletSource { /// Returns all UTXOs, with at least 1 confirmation each, that are available to spend. - fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()>; + fn list_confirmed_utxos<'a>( + &'a self, + ) -> impl Future, ()>> + MaybeSend + 'a; /// Returns a script to use for change above dust resulting from a successful coin selection /// attempt. - fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()>; + fn get_change_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a; /// Signs and provides the full [`TxIn::script_sig`] and [`TxIn::witness`] for all inputs within /// the transaction known to the wallet (i.e., any provided via /// [`WalletSource::list_confirmed_utxos`]). /// /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the /// unsigned transaction and then sign it with your wallet. - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a; } /// A wrapper over [`WalletSource`] that implements [`CoinSelectionSource`] by preferring UTXOs @@ -617,8 +626,8 @@ where fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()> { - Box::pin(async move { + ) -> impl Future> + MaybeSend + 'a { + async move { let utxos = self.source.list_confirmed_utxos().await?; // TODO: Use fee estimation utils when we upgrade to bitcoin v0.30.0. let total_output_size: u64 = must_pay_to @@ -665,10 +674,12 @@ where } } Err(()) - }) + } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { self.source.sign_psbt(psbt) } } diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index cbc686ed8fe..1328c2c1b3a 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -18,7 +18,7 @@ use crate::chain::chaininterface::BroadcasterInterface; use crate::chain::ClaimId; use crate::prelude::*; use crate::sign::SignerProvider; -use crate::util::async_poll::{dummy_waker, AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::{dummy_waker, MaybeSend, MaybeSync}; use crate::util::logger::Logger; use bitcoin::{Psbt, ScriptBuf, Transaction, TxOut}; @@ -72,19 +72,25 @@ impl WalletSource for WalletSourceSyncWrapper where T::Target: WalletSourceSync, { - fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()> { + fn list_confirmed_utxos<'a>( + &'a self, + ) -> impl Future, ()>> + MaybeSend + 'a { let utxos = self.0.list_confirmed_utxos(); - Box::pin(async move { utxos }) + async move { utxos } } - fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()> { + fn get_change_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a { let script = self.0.get_change_script(); - Box::pin(async move { script }) + async move { script } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { let signed_psbt = self.0.sign_psbt(psbt); - Box::pin(async move { signed_psbt }) + async move { signed_psbt } } } @@ -123,7 +129,7 @@ where &self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &[TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, ) -> Result { - let mut fut = self.wallet.select_confirmed_utxos( + let fut = self.wallet.select_confirmed_utxos( claim_id, must_spend, must_pay_to, @@ -132,7 +138,7 @@ where ); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match fut.as_mut().poll(&mut ctx) { + match pin!(fut).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { unreachable!( @@ -143,10 +149,10 @@ where } fn sign_psbt(&self, psbt: Psbt) -> Result { - let mut fut = self.wallet.sign_psbt(psbt); + let fut = self.wallet.sign_psbt(psbt); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match fut.as_mut().poll(&mut ctx) { + match pin!(fut).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { unreachable!("Wallet::sign_psbt should not be pending in a sync context"); @@ -234,7 +240,7 @@ where fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()> { + ) -> impl Future> + MaybeSend + 'a { let coins = self.0.select_confirmed_utxos( claim_id, must_spend, @@ -242,12 +248,14 @@ where target_feerate_sat_per_1000_weight, max_tx_weight, ); - Box::pin(async move { coins }) + async move { coins } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { let psbt = self.0.sign_psbt(psbt); - Box::pin(async move { psbt }) + async move { psbt } } } diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 1d771d22783..6d0d5bf405a 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -58,7 +58,7 @@ use crate::ln::script::ShutdownScript; use crate::offers::invoice::UnsignedBolt12Invoice; use crate::types::features::ChannelTypeFeatures; use crate::types::payment::PaymentPreimage; -use crate::util::async_poll::AsyncResult; +use crate::util::async_poll::MaybeSend; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::transaction_utils; @@ -68,7 +68,9 @@ use crate::sign::ecdsa::EcdsaChannelSigner; #[cfg(taproot)] use crate::sign::taproot::TaprootChannelSigner; use crate::util::atomic_counter::AtomicCounter; + use core::convert::TryInto; +use core::future::Future; use core::ops::Deref; use core::sync::atomic::{AtomicUsize, Ordering}; #[cfg(taproot)] @@ -1066,7 +1068,9 @@ pub trait ChangeDestinationSource { /// /// This method should return a different value each time it is called, to avoid linking /// on-chain funds controlled to the same user. - fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()>; + fn get_change_destination_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a; } /// A synchronous helper trait that describes an on-chain wallet capable of returning a (change) destination script. @@ -1101,9 +1105,11 @@ impl ChangeDestinationSource for ChangeDestinationSourceSyncWrapper where T::Target: ChangeDestinationSourceSync, { - fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()> { + fn get_change_destination_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a { let script = self.0.get_change_destination_script(); - Box::pin(async move { script }) + async move { script } } } diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index eefa40d1055..9c2ca4c247f 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -9,7 +9,6 @@ //! Some utilities to make working with the standard library's [`Future`]s easier -use alloc::boxed::Box; use alloc::vec::Vec; use core::future::Future; use core::marker::Unpin; @@ -92,17 +91,6 @@ pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } } -#[cfg(feature = "std")] -/// A type alias for a future that returns a result of type `T` or error `E`. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub type AsyncResult<'a, T, E> = Pin> + 'a + Send>>; -#[cfg(not(feature = "std"))] -/// A type alias for a future that returns a result of type `T` or error `E`. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub type AsyncResult<'a, T, E> = Pin> + 'a>>; - /// Marker trait to optionally implement `Sync` under std. /// /// This is not exported to bindings users as async is only supported in Rust. From b1f1ee2a1d36f611c187b21b7a0dbbb2efb52036 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 14:41:09 +0000 Subject: [PATCH 06/42] Drop `Box`ing of iterators during BOLT 11 invoice serialization Now that we have an MSRV that supports returning `impl Trait` in trait methods, we can use it to avoid the `Box` we had spewed all over our BOLT 11 invoice serialization. --- lightning-invoice/src/lib.rs | 5 +- lightning-invoice/src/ser.rs | 136 ++++++++++++++++++----------------- 2 files changed, 71 insertions(+), 70 deletions(-) diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index 47f929377de..60d413cf76a 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -40,7 +40,6 @@ use bitcoin::secp256k1::ecdsa::RecoverableSignature; use bitcoin::secp256k1::PublicKey; use bitcoin::secp256k1::{Message, Secp256k1}; -use alloc::boxed::Box; use alloc::string; use core::cmp::Ordering; use core::fmt::{self, Display, Formatter}; @@ -1081,8 +1080,8 @@ macro_rules! find_all_extract { #[allow(missing_docs)] impl RawBolt11Invoice { /// Hash the HRP (as bytes) and signatureless data part (as Fe32 iterator) - fn hash_from_parts<'s>( - hrp_bytes: &[u8], data_without_signature: Box + 's>, + fn hash_from_parts<'s, I: Iterator + 's>( + hrp_bytes: &[u8], data_without_signature: I, ) -> [u8; 32] { use crate::bech32::Fe32IterExt; use bitcoin::hashes::HashEngine; diff --git a/lightning-invoice/src/ser.rs b/lightning-invoice/src/ser.rs index 5c93fa84ae0..853accdd3ca 100644 --- a/lightning-invoice/src/ser.rs +++ b/lightning-invoice/src/ser.rs @@ -1,4 +1,3 @@ -use alloc::boxed::Box; use core::fmt; use core::fmt::{Display, Formatter}; use core::{array, iter}; @@ -13,14 +12,28 @@ use super::{ SignedRawBolt11Invoice, TaggedField, }; +macro_rules! define_iterator_enum { + ($name: ident, $($n: ident),*) => { + enum $name<$($n: Iterator,)*> { + $($n($n),)* + } + impl<$($n: Iterator,)*> Iterator for $name<$($n,)*> { + type Item = Fe32; + fn next(&mut self) -> Option { + match self { + $(Self::$n(iter) => iter.next(),)* + } + } + } + } +} + /// Objects that can be encoded to base32 (bech32). /// -/// Private to this crate to avoid polluting the API. +/// Private to this crate (except in fuzzing) to avoid polluting the API. pub trait Base32Iterable { - /// apoelstra: In future we want to replace this Box with an explicit - /// associated type, to avoid the allocation. But we cannot do this until - /// Rust 1.65 and GATs since the iterator may contain a reference to self. - fn fe_iter<'s>(&'s self) -> Box + 's>; + /// Serialize this object, returning an iterator over bech32 field elements. + fn fe_iter<'s>(&'s self) -> impl Iterator + 's; } /// Interface to calculate the length of the base32 representation before actually serializing @@ -32,7 +45,7 @@ pub(crate) trait Base32Len: Base32Iterable { // Base32Iterable & Base32Len implementations are here, because the traits are in this module. impl Base32Iterable for [u8; N] { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { self[..].fe_iter() } } @@ -45,8 +58,8 @@ impl Base32Len for [u8; N] { } impl Base32Iterable for [u8] { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.iter().copied().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.iter().copied().bytes_to_fes() } } @@ -58,8 +71,8 @@ impl Base32Len for [u8] { } impl Base32Iterable for Vec { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.iter().copied().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.iter().copied().bytes_to_fes() } } @@ -71,8 +84,8 @@ impl Base32Len for Vec { } impl Base32Iterable for PaymentSecret { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0[..].fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0[..].fe_iter() } } @@ -88,7 +101,7 @@ impl Base32Iterable for Bolt11InvoiceFeatures { /// starting from the rightmost bit, /// and taking the resulting 5-bit values in reverse (left-to-right), /// with the leading 0's skipped. - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { // Fe32 conversion cannot be used, because this packs from right, right-to-left let mut input_iter = self.le_flags().iter(); // Carry bits, 0..7 bits @@ -126,7 +139,7 @@ impl Base32Iterable for Bolt11InvoiceFeatures { output.push(Fe32::try_from(next_out8 & 31u8).expect("<32")) } // Take result in reverse order, and skip leading 0s - Box::new(output.into_iter().rev().skip_while(|e| *e == Fe32::Q)) + output.into_iter().rev().skip_while(|e| *e == Fe32::Q) } } @@ -241,36 +254,35 @@ fn encoded_int_be_base32_size(int: u64) -> usize { } impl Base32Iterable for RawDataPart { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let ts_iter = self.timestamp.fe_iter(); let fields_iter = self.tagged_fields.iter().map(RawTaggedField::fe_iter).flatten(); - Box::new(ts_iter.chain(fields_iter)) + ts_iter.chain(fields_iter) } } impl Base32Iterable for PositiveTimestamp { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let fes = encode_int_be_base32(self.as_unix_timestamp()); debug_assert!(fes.len() <= 7, "Invalid timestamp length"); let to_pad = 7 - fes.len(); - Box::new(core::iter::repeat(Fe32::Q).take(to_pad).chain(fes)) + core::iter::repeat(Fe32::Q).take(to_pad).chain(fes) } } impl Base32Iterable for RawTaggedField { - fn fe_iter<'s>(&'s self) -> Box + 's> { - // Annoyingly, when we move to explicit types, we will need an - // explicit enum holding the two iterator variants. + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + define_iterator_enum!(TwoIters, A, B); match *self { - RawTaggedField::UnknownSemantics(ref content) => Box::new(content.iter().copied()), - RawTaggedField::KnownSemantics(ref tagged_field) => tagged_field.fe_iter(), + RawTaggedField::UnknownSemantics(ref content) => TwoIters::A(content.iter().copied()), + RawTaggedField::KnownSemantics(ref tagged_field) => TwoIters::B(tagged_field.fe_iter()), } } } impl Base32Iterable for Sha256 { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0[..].fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0[..].fe_iter() } } @@ -281,8 +293,8 @@ impl Base32Len for Sha256 { } impl Base32Iterable for Description { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0 .0.as_bytes().fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0 .0.as_bytes().fe_iter() } } @@ -293,8 +305,8 @@ impl Base32Len for Description { } impl Base32Iterable for PayeePubKey { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.serialize().into_iter().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.serialize().into_iter().bytes_to_fes() } } @@ -305,8 +317,8 @@ impl Base32Len for PayeePubKey { } impl Base32Iterable for ExpiryTime { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(encode_int_be_base32(self.as_seconds())) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + encode_int_be_base32(self.as_seconds()) } } @@ -317,8 +329,8 @@ impl Base32Len for ExpiryTime { } impl Base32Iterable for MinFinalCltvExpiryDelta { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(encode_int_be_base32(self.0)) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + encode_int_be_base32(self.0) } } @@ -329,8 +341,8 @@ impl Base32Len for MinFinalCltvExpiryDelta { } impl Base32Iterable for Fallback { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(match *self { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + match *self { Fallback::SegWitProgram { version: v, program: ref p } => { let v = Fe32::try_from(v.to_num()).expect("valid version"); core::iter::once(v).chain(p[..].fe_iter()) @@ -343,7 +355,7 @@ impl Base32Iterable for Fallback { // 18 'J' core::iter::once(Fe32::J).chain(hash[..].fe_iter()) }, - }) + } } } @@ -371,7 +383,7 @@ type RouteHintHopIter = iter::Chain< >; impl Base32Iterable for PrivateRoute { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { fn serialize_to_iter(hop: &RouteHintHop) -> RouteHintHopIter { let i1 = hop.src_node_id.serialize().into_iter(); let i2 = u64::to_be_bytes(hop.short_channel_id).into_iter(); @@ -381,7 +393,7 @@ impl Base32Iterable for PrivateRoute { i1.chain(i2).chain(i3).chain(i4).chain(i5) } - Box::new(self.0 .0.iter().map(serialize_to_iter).flatten().bytes_to_fes()) + self.0 .0.iter().map(serialize_to_iter).flatten().bytes_to_fes() } } @@ -391,16 +403,11 @@ impl Base32Len for PrivateRoute { } } -// Shorthand type -type TaggedFieldIter = core::iter::Chain, I>; - impl Base32Iterable for TaggedField { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { /// Writes a tagged field: tag, length and data. `tag` should be in `0..32` otherwise the /// function will panic. - fn write_tagged_field<'s, P>( - tag: u8, payload: &'s P, - ) -> TaggedFieldIter + 's>> + fn write_tagged_field<'s, P>(tag: u8, payload: &'s P) -> impl Iterator + 's where P: Base32Iterable + Base32Len + ?Sized, { @@ -416,54 +423,49 @@ impl Base32Iterable for TaggedField { .chain(payload.fe_iter()) } - // we will also need a giant enum for this - Box::new(match *self { + define_iterator_enum!(ManyIters, A, B, C, D, E, F, G, H, I, J, K); + match *self { TaggedField::PaymentHash(ref hash) => { - write_tagged_field(constants::TAG_PAYMENT_HASH, hash) + ManyIters::A(write_tagged_field(constants::TAG_PAYMENT_HASH, hash)) }, TaggedField::Description(ref description) => { - write_tagged_field(constants::TAG_DESCRIPTION, description) + ManyIters::B(write_tagged_field(constants::TAG_DESCRIPTION, description)) }, TaggedField::PayeePubKey(ref pub_key) => { - write_tagged_field(constants::TAG_PAYEE_PUB_KEY, pub_key) + ManyIters::C(write_tagged_field(constants::TAG_PAYEE_PUB_KEY, pub_key)) }, TaggedField::DescriptionHash(ref hash) => { - write_tagged_field(constants::TAG_DESCRIPTION_HASH, hash) + ManyIters::D(write_tagged_field(constants::TAG_DESCRIPTION_HASH, hash)) }, TaggedField::ExpiryTime(ref duration) => { - write_tagged_field(constants::TAG_EXPIRY_TIME, duration) + ManyIters::E(write_tagged_field(constants::TAG_EXPIRY_TIME, duration)) }, TaggedField::MinFinalCltvExpiryDelta(ref expiry) => { - write_tagged_field(constants::TAG_MIN_FINAL_CLTV_EXPIRY_DELTA, expiry) + ManyIters::F(write_tagged_field(constants::TAG_MIN_FINAL_CLTV_EXPIRY_DELTA, expiry)) }, TaggedField::Fallback(ref fallback_address) => { - write_tagged_field(constants::TAG_FALLBACK, fallback_address) + ManyIters::G(write_tagged_field(constants::TAG_FALLBACK, fallback_address)) }, TaggedField::PrivateRoute(ref route_hops) => { - write_tagged_field(constants::TAG_PRIVATE_ROUTE, route_hops) + ManyIters::H(write_tagged_field(constants::TAG_PRIVATE_ROUTE, route_hops)) }, TaggedField::PaymentSecret(ref payment_secret) => { - write_tagged_field(constants::TAG_PAYMENT_SECRET, payment_secret) + ManyIters::I(write_tagged_field(constants::TAG_PAYMENT_SECRET, payment_secret)) }, TaggedField::PaymentMetadata(ref payment_metadata) => { - write_tagged_field(constants::TAG_PAYMENT_METADATA, payment_metadata) + ManyIters::J(write_tagged_field(constants::TAG_PAYMENT_METADATA, payment_metadata)) }, TaggedField::Features(ref features) => { - write_tagged_field(constants::TAG_FEATURES, features) + ManyIters::K(write_tagged_field(constants::TAG_FEATURES, features)) }, - }) + } } } impl Base32Iterable for Bolt11InvoiceSignature { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let (recovery_id, signature) = self.0.serialize_compact(); - Box::new( - signature - .into_iter() - .chain(core::iter::once(recovery_id.to_i32() as u8)) - .bytes_to_fes(), - ) + signature.into_iter().chain(core::iter::once(recovery_id.to_i32() as u8)).bytes_to_fes() } } From 4561bc5bf3887897f077bddb96330cccf3ccff0d Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 1 Dec 2025 16:12:17 -0800 Subject: [PATCH 07/42] Git-ignore lightning-tests/target Similar to the other /target directories we ignore where a bunch of files are generated during testing. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index ed10eb14387..56e94616eeb 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ lightning-dns-resolver/target ext-functional-test-demo/target no-std-check/target msrv-no-dev-deps-check/target +lightning-tests/target From 4de6b5c8bf43a746320e3abf4a3d83ef779bf62c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 14 Nov 2025 11:36:18 +0100 Subject: [PATCH 08/42] Channel logging improvements Additional trace logs to help with debugging. --- lightning/src/ln/channel.rs | 4 ++-- lightning/src/ln/channelmanager.rs | 14 ++++++++++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2068a254f45..b1c2458014c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8905,8 +8905,8 @@ where ); return_with_htlcs_to_fail!(htlcs_to_fail); } else { - log_debug!(logger, "Received a valid revoke_and_ack with no reply necessary. {} monitor update.", - release_state_str); + log_debug!(logger, "Received a valid revoke_and_ack with no reply necessary. {} monitor update {}.", + release_state_str, monitor_update.update_id); self.monitor_updating_paused( false, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 399c51b9d9a..f938939f279 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9451,6 +9451,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ for action in actions.into_iter() { match action { MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => { + let (peer_id, chan_id) = pending_mpp_claim.as_ref().map(|c| (Some(c.0), Some(c.1))).unwrap_or_default(); + let logger = WithContext::from(&self.logger, peer_id, chan_id, Some(payment_hash)); + log_trace!(logger, "Handling PaymentClaimed monitor update completion action"); + if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim { let per_peer_state = self.per_peer_state.read().unwrap(); per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| { @@ -9526,6 +9530,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // `payment_id` should suffice to ensure we never spuriously drop a second // event for a duplicate payment. if !pending_events.contains(&event_action) { + log_trace!(logger, "Queuing PaymentClaimed event with event completion action {:?}", event_action.1); pending_events.push_back(event_action); } } @@ -17109,10 +17114,6 @@ where let logger = WithChannelMonitor::from(&args.logger, monitor, None); let channel_id = monitor.channel_id(); - log_info!( - logger, - "Queueing monitor update to ensure missing channel is force closed", - ); let monitor_update = ChannelMonitorUpdate { update_id: monitor.get_latest_update_id().saturating_add(1), updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { @@ -17120,6 +17121,11 @@ where }], channel_id: Some(monitor.channel_id()), }; + log_info!( + logger, + "Queueing monitor update {} to ensure missing channel is force closed", + monitor_update.update_id + ); let funding_txo = monitor.get_funding_txo(); let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, From f312c24f5375b47be6488f7af03ec1448adf73c8 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 3 Dec 2025 09:05:34 +0100 Subject: [PATCH 09/42] Rustfmt handle_monitor_update_completion_actions --- lightning/src/ln/channelmanager.rs | 109 +++++++++++++++++++++-------- 1 file changed, 78 insertions(+), 31 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f938939f279..ea6409d0e1e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9440,8 +9440,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } - #[rustfmt::skip] - fn handle_monitor_update_completion_actions>(&self, actions: I) { + fn handle_monitor_update_completion_actions< + I: IntoIterator, + >( + &self, actions: I, + ) { debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread); debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); @@ -9450,40 +9453,71 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ for action in actions.into_iter() { match action { - MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => { - let (peer_id, chan_id) = pending_mpp_claim.as_ref().map(|c| (Some(c.0), Some(c.1))).unwrap_or_default(); - let logger = WithContext::from(&self.logger, peer_id, chan_id, Some(payment_hash)); + MonitorUpdateCompletionAction::PaymentClaimed { + payment_hash, + pending_mpp_claim, + } => { + let (peer_id, chan_id) = pending_mpp_claim + .as_ref() + .map(|c| (Some(c.0), Some(c.1))) + .unwrap_or_default(); + let logger = + WithContext::from(&self.logger, peer_id, chan_id, Some(payment_hash)); log_trace!(logger, "Handling PaymentClaimed monitor update completion action"); if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim { let per_peer_state = self.per_peer_state.read().unwrap(); per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| { let mut peer_state = peer_state_mutex.lock().unwrap(); - let blockers_entry = peer_state.actions_blocking_raa_monitor_updates.entry(chan_id); + let blockers_entry = + peer_state.actions_blocking_raa_monitor_updates.entry(chan_id); if let btree_map::Entry::Occupied(mut blockers) = blockers_entry { - blockers.get_mut().retain(|blocker| - if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim } = &blocker { + blockers.get_mut().retain(|blocker| { + if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { + pending_claim, + } = &blocker + { if *pending_claim == claim_ptr { - let mut pending_claim_state_lock = pending_claim.0.lock().unwrap(); - let pending_claim_state = &mut *pending_claim_state_lock; - pending_claim_state.channels_without_preimage.retain(|(cp, cid)| { - let this_claim = - *cp == counterparty_node_id && *cid == chan_id; - if this_claim { - pending_claim_state.channels_with_preimage.push((*cp, *cid)); - false - } else { true } - }); - if pending_claim_state.channels_without_preimage.is_empty() { - for (cp, cid) in pending_claim_state.channels_with_preimage.iter() { + let mut pending_claim_state_lock = + pending_claim.0.lock().unwrap(); + let pending_claim_state = + &mut *pending_claim_state_lock; + pending_claim_state.channels_without_preimage.retain( + |(cp, cid)| { + let this_claim = *cp == counterparty_node_id + && *cid == chan_id; + if this_claim { + pending_claim_state + .channels_with_preimage + .push((*cp, *cid)); + false + } else { + true + } + }, + ); + if pending_claim_state + .channels_without_preimage + .is_empty() + { + for (cp, cid) in pending_claim_state + .channels_with_preimage + .iter() + { let freed_chan = (*cp, *cid, blocker.clone()); freed_channels.push(freed_chan); } } - !pending_claim_state.channels_without_preimage.is_empty() - } else { true } - } else { true } - ); + !pending_claim_state + .channels_without_preimage + .is_empty() + } else { + true + } + } else { + true + } + }); if blockers.get().is_empty() { blockers.remove(); } @@ -9491,7 +9525,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }); } - let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); + let payment = self + .claimable_payments + .lock() + .unwrap() + .pending_claiming_payments + .remove(&payment_hash); if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, @@ -9501,7 +9540,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ onion_fields, payment_id, durable_preimage_channel, - }) = payment { + }) = payment + { let event = events::Event::PaymentClaimed { payment_hash, purpose, @@ -9512,8 +9552,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ onion_fields, payment_id, }; - let action = if let Some((outpoint, counterparty_node_id, channel_id)) - = durable_preimage_channel + let action = if let Some((outpoint, counterparty_node_id, channel_id)) = + durable_preimage_channel { Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { channel_funding_outpoint: Some(outpoint), @@ -9530,13 +9570,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // `payment_id` should suffice to ensure we never spuriously drop a second // event for a duplicate payment. if !pending_events.contains(&event_action) { - log_trace!(logger, "Queuing PaymentClaimed event with event completion action {:?}", event_action.1); + log_trace!( + logger, + "Queuing PaymentClaimed event with event completion action {:?}", + event_action.1 + ); pending_events.push_back(event_action); } } }, MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { - event, downstream_counterparty_and_funding_outpoint + event, + downstream_counterparty_and_funding_outpoint, } => { self.pending_events.lock().unwrap().push_back((event, None)); if let Some(unblocked) = downstream_counterparty_and_funding_outpoint { @@ -9548,7 +9593,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, MonitorUpdateCompletionAction::FreeOtherChannelImmediately { - downstream_counterparty_node_id, downstream_channel_id, blocking_action, + downstream_counterparty_node_id, + downstream_channel_id, + blocking_action, } => { self.handle_monitor_update_release( downstream_counterparty_node_id, From cdba25fa8797ee39e69e0276b28fe7473dac5f19 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Fri, 5 Dec 2025 22:55:26 +0000 Subject: [PATCH 10/42] Assert peer supports splicing before splicing channel --- lightning/src/ln/channelmanager.rs | 13 +++++- lightning/src/ln/splicing_tests.rs | 67 +++++++++++++++++++++++++++++- 2 files changed, 77 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 72585d69f80..8595b23bee7 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4774,8 +4774,17 @@ where Err(e) => return Err(e), }; - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; + let mut peer_state = peer_state_mutex.lock().unwrap(); + if !peer_state.latest_features.supports_splicing() { + return Err(APIError::ChannelUnavailable { + err: "Peer does not support splicing".to_owned(), + }); + } + if !peer_state.latest_features.supports_quiescence() { + return Err(APIError::ChannelUnavailable { + err: "Peer does not support quiescence, a splicing prerequisite".to_owned(), + }); + } // Look for the channel match peer_state.channel_by_id.entry(*channel_id) { diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index a96af7bbc5d..a05c0bd92d8 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -17,7 +17,9 @@ use crate::events::bump_transaction::sync::WalletSourceSync; use crate::events::{ClosureReason, Event, FundingInfo, HTLCHandlingFailureType}; use crate::ln::chan_utils; use crate::ln::channel::CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY; -use crate::ln::channelmanager::{PaymentId, RecipientOnionFields, BREAKDOWN_TIMEOUT}; +use crate::ln::channelmanager::{ + provided_init_features, PaymentId, RecipientOnionFields, BREAKDOWN_TIMEOUT, +}; use crate::ln::functional_test_utils::*; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; @@ -30,6 +32,69 @@ use crate::util::test_channel_signer::SignerOp; use bitcoin::secp256k1::PublicKey; use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut}; +#[test] +fn test_splicing_not_supported_api_error() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut features = provided_init_features(&test_default_channel_config()); + features.clear_splicing(); + *node_cfgs[0].override_init_features.borrow_mut() = Some(features); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_id_0 = nodes[0].node.get_our_node_id(); + let node_id_1 = nodes[1].node.get_our_node_id(); + + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let bs_contribution = SpliceContribution::SpliceIn { + value: Amount::ZERO, + inputs: Vec::new(), + change_script: None, + }; + + let res = nodes[1].node.splice_channel( + &channel_id, + &node_id_0, + bs_contribution.clone(), + 0, // funding_feerate_per_kw, + None, // locktime + ); + match res { + Err(APIError::ChannelUnavailable { err }) => { + assert!(err.contains("Peer does not support splicing")) + }, + _ => panic!("Wrong error {:?}", res.err().unwrap()), + } + + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + + let mut features = nodes[0].node.init_features(); + features.set_splicing_optional(); + features.clear_quiescence(); + *nodes[0].override_init_features.borrow_mut() = Some(features); + + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_channel_ready = (true, true); + reconnect_args.send_announcement_sigs = (true, true); + reconnect_nodes(reconnect_args); + + let res = nodes[1].node.splice_channel( + &channel_id, + &node_id_0, + bs_contribution, + 0, // funding_feerate_per_kw, + None, // locktime + ); + match res { + Err(APIError::ChannelUnavailable { err }) => { + assert!(err.contains("Peer does not support quiescence, a splicing prerequisite")) + }, + _ => panic!("Wrong error {:?}", res.err().unwrap()), + } +} + #[test] fn test_v1_splice_in_negative_insufficient_inputs() { let chanmon_cfgs = create_chanmon_cfgs(2); From 173481f6e77cd1c91e9bc6fa3ff2771d31413a4d Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 14 Nov 2025 23:20:41 +0000 Subject: [PATCH 11/42] Avoid force-closing 0-conf channels when funding is reorg'd When we see a funding transaction for one of our chanels reorg'd out, we worry that its possible we've been double-spent and immediately force-close the channel to avoid accepting any more HTLCs on it. This isn't ideal, but is mostly fine as most nodes require 6 confirmations and 6 block reorgs are exceedingly rare. However, this isn't so okay for 0-conf channels - in that case we elected to trust the funder anyway, so reorgs shouldn't worry us. Still, to handle this correctly we needed to track the old SCID and ensure our logic is safe across an SCID change. Luckily, we did that work for splices, and can now take advantage of it here. Fixes #3836. --- lightning/src/ln/channel.rs | 41 +++- lightning/src/ln/functional_test_utils.rs | 43 ++-- lightning/src/ln/priv_short_conf_tests.rs | 236 ++++++++++++++++++---- lightning/src/ln/reorg_tests.rs | 4 +- 4 files changed, 261 insertions(+), 63 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index e55e4144ef2..ddaa729f50c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -11344,9 +11344,13 @@ where } // Check if the funding transaction was unconfirmed + let original_scid = self.funding.short_channel_id; + let was_confirmed = self.funding.funding_tx_confirmed_in.is_some(); let funding_tx_confirmations = self.funding.get_funding_tx_confirmations(height); if funding_tx_confirmations == 0 { self.funding.funding_tx_confirmation_height = 0; + self.funding.short_channel_id = None; + self.funding.funding_tx_confirmed_in = None; } if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { @@ -11361,18 +11365,33 @@ where self.context.channel_state.is_our_channel_ready() { // If we've sent channel_ready (or have both sent and received channel_ready), and - // the funding transaction has become unconfirmed, - // close the channel and hope we can get the latest state on chain (because presumably - // the funding transaction is at least still in the mempool of most nodes). + // the funding transaction has become unconfirmed, we'll probably get a new SCID when + // it re-confirms. // - // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or - // 0-conf channel, but not doing so may lead to the - // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have - // to. - if funding_tx_confirmations == 0 && self.funding.funding_tx_confirmed_in.is_some() { - let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", - self.context.minimum_depth.unwrap(), funding_tx_confirmations); - return Err(ClosureReason::ProcessingError { err: err_reason }); + // Worse, if the funding has un-confirmed we could have accepted some HTLC(s) over it + // and are now at risk of double-spend. While its possible, even likely, that this is + // just a trivial reorg and we should wait to see the new block connected in the next + // call, its also possible we've been double-spent. To avoid further loss of funds, we + // need some kind of method to freeze the channel and avoid accepting further HTLCs, + // but absent such a method, we just force-close. + // + // The one exception we make is for 0-conf channels, which we decided to trust anyway, + // in which case we simply track the previous SCID as a `historical_scids` the same as + // after a channel is spliced. + if funding_tx_confirmations == 0 && was_confirmed { + if let Some(scid) = original_scid { + self.context.historical_scids.push(scid); + } else { + debug_assert!(false); + } + if self.context.minimum_depth(&self.funding).expect("set for a ready channel") > 0 { + // Reset the original short_channel_id so that we'll generate a closure + // `channel_update` broadcast event. + self.funding.short_channel_id = original_scid; + let err_reason = format!("Funding transaction was un-confirmed, originally locked at {} confs.", + self.context.minimum_depth.unwrap()); + return Err(ClosureReason::ProcessingError { err: err_reason }); + } } } else if !self.funding.is_outbound() && self.funding.funding_tx_confirmed_in.is_none() && height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e31630a4926..be32e1fd23a 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -3224,12 +3224,13 @@ pub fn expect_probe_successful_events( } pub struct PaymentFailedConditions<'a> { - pub(crate) expected_htlc_error_data: Option<(LocalHTLCFailureReason, &'a [u8])>, - pub(crate) expected_blamed_scid: Option, - pub(crate) expected_blamed_chan_closed: Option, - pub(crate) expected_mpp_parts_remain: bool, - pub(crate) retry_expected: bool, - pub(crate) from_mon_update: bool, + pub expected_htlc_error_data: Option<(LocalHTLCFailureReason, &'a [u8])>, + pub expected_blamed_scid: Option, + pub expected_blamed_chan_closed: Option, + pub expected_mpp_parts_remain: bool, + pub retry_expected: bool, + pub from_mon_update: bool, + pub reason: Option, } impl<'a> PaymentFailedConditions<'a> { @@ -3241,6 +3242,7 @@ impl<'a> PaymentFailedConditions<'a> { expected_mpp_parts_remain: false, retry_expected: false, from_mon_update: false, + reason: None, } } pub fn mpp_parts_remain(mut self) -> Self { @@ -3321,14 +3323,21 @@ pub fn expect_payment_failed_conditions_event<'a, 'b, 'c, 'd, 'e>( *payment_failed_permanently, expected_payment_failed_permanently, "unexpected payment_failed_permanently value" ); - { - assert!(error_code.is_some(), "expected error_code.is_some() = true"); - assert!(error_data.is_some(), "expected error_data.is_some() = true"); - let reason: LocalHTLCFailureReason = error_code.unwrap().into(); - if let Some((code, data)) = conditions.expected_htlc_error_data { - assert_eq!(reason, code, "unexpected error code"); - assert_eq!(&error_data.as_ref().unwrap()[..], data, "unexpected error data"); - } + match failure { + PathFailure::OnPath { .. } => { + assert!(error_code.is_some(), "expected error_code.is_some() = true"); + assert!(error_data.is_some(), "expected error_data.is_some() = true"); + let reason: LocalHTLCFailureReason = error_code.unwrap().into(); + if let Some((code, data)) = conditions.expected_htlc_error_data { + assert_eq!(reason, code, "unexpected error code"); + assert_eq!(&error_data.as_ref().unwrap()[..], data); + } + }, + PathFailure::InitialSend { .. } => { + assert!(error_code.is_none()); + assert!(error_data.is_none()); + assert!(conditions.expected_htlc_error_data.is_none()); + }, } if let Some(chan_closed) = conditions.expected_blamed_chan_closed { @@ -3362,7 +3371,9 @@ pub fn expect_payment_failed_conditions_event<'a, 'b, 'c, 'd, 'e>( assert_eq!(*payment_id, expected_payment_id); assert_eq!( reason.unwrap(), - if expected_payment_failed_permanently { + if let Some(expected_reason) = conditions.reason { + expected_reason + } else if expected_payment_failed_permanently { PaymentFailureReason::RecipientRejected } else { PaymentFailureReason::RetriesExhausted @@ -3414,7 +3425,7 @@ pub fn send_along_route_with_secret<'a, 'b, 'c>( payment_id } -fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) { +pub fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) { let origin_node_id = expected_path[0].node.get_our_node_id(); // iterate from the receiving node to the origin node and handle update fail htlc. diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index ea34e88f619..ab7cad9be44 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -12,7 +12,8 @@ //! LSP). use crate::chain::ChannelMonitorUpdateStatus; -use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; +use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentFailureReason}; +use crate::ln::channel::CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY; use crate::ln::channelmanager::{PaymentId, RecipientOnionFields, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::msgs; use crate::ln::msgs::{ @@ -1078,66 +1079,233 @@ fn test_public_0conf_channel() { #[test] fn test_0conf_channel_reorg() { // If we accept a 0conf channel, which is then confirmed, but then changes SCID in a reorg, we - // have to make sure we handle this correctly (or, currently, just force-close the channel). + // have to ensure we still accept relays to the previous SCID, at least for some time, as well + // as send a fresh channel announcement. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); chan_config.manually_accept_inbound_channels = true; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let node_a_id = nodes[0].node.get_our_node_id(); + let node_chanmgrs = + create_node_chanmgrs(3, &node_cfgs, &[None, None, Some(chan_config.clone())]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + create_chan_between_nodes(&nodes[0], &nodes[1]); + + // Make sure all nodes are at the same starting height + connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); // This is the default but we force it on anyway chan_config.channel_handshake_config.announce_for_forwarding = true; - let (tx, ..) = open_zero_conf_channel(&nodes[0], &nodes[1], Some(chan_config)); + let (tx, ..) = open_zero_conf_channel(&nodes[1], &nodes[2], Some(chan_config)); // We can use the channel immediately, but we can't announce it until we get 6+ confirmations - send_payment(&nodes[0], &[&nodes[1]], 100_000); + send_payment(&nodes[1], &[&nodes[2]], 100_000); - mine_transaction(&nodes[0], &tx); mine_transaction(&nodes[1], &tx); + mine_transaction(&nodes[2], &tx); // Send a payment using the channel's real SCID, which will be public in a few blocks once we // can generate a channel_announcement. - let real_scid = nodes[0].node.list_usable_channels()[0].short_channel_id.unwrap(); - assert_eq!(nodes[1].node.list_usable_channels()[0].short_channel_id.unwrap(), real_scid); + let bs_chans = nodes[1].node.list_usable_channels(); + let bs_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + let original_scid = bs_chan.short_channel_id.unwrap(); + assert_eq!(nodes[2].node.list_usable_channels()[0].short_channel_id.unwrap(), original_scid); let (mut route, payment_hash, payment_preimage, payment_secret) = - get_route_and_payment_hash!(nodes[0], nodes[1], 10_000); - assert_eq!(route.paths[0].hops[0].short_channel_id, real_scid); + get_route_and_payment_hash!(nodes[1], nodes[2], 10_000); + assert_eq!(route.paths[0].hops[0].short_channel_id, original_scid); + send_along_route_with_secret( + &nodes[1], + route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + // Check that we can forward a payment over the channel's SCID as well (i.e. as if node C + // generated an invoice with a route hint through the 0-conf channel). + let mut forwarded_route = route.clone(); + let (ab_route, ..) = get_route_and_payment_hash!(nodes[0], nodes[1], 10_000); + forwarded_route.paths[0].hops.insert(0, ab_route.paths[0].hops[0].clone()); + forwarded_route.paths[0].hops[0].fee_msat = 1000; + forwarded_route.paths[0].hops[0].cltv_expiry_delta = MIN_CLTV_EXPIRY_DELTA.into(); send_along_route_with_secret( &nodes[0], - route, - &[&[&nodes[1]]], + forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], 10_000, payment_hash, payment_secret, ); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); - disconnect_blocks(&nodes[0], 1); + // Now disconnect blocks, checking that the SCID was wiped but that it still works both for a + // forwarded HTLC and a directly-sent one. disconnect_blocks(&nodes[1], 1); + disconnect_blocks(&nodes[2], 1); - // At this point the channel no longer has an SCID again. In the future we should likely - // support simply un-setting the SCID and waiting until the channel gets re-confirmed, but for - // now we force-close the channel here. - let reason = ClosureReason::ProcessingError { - err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned(), - }; - check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); - check_closed_broadcast!(nodes[0], true); + let bs_chans = nodes[1].node.list_usable_channels(); + let bs_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert!(bs_chan.short_channel_id.is_none()); + assert!(nodes[2].node.list_usable_channels()[0].short_channel_id.is_none()); + + send_along_route_with_secret( + &nodes[1], + route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + send_along_route_with_secret( + &nodes[0], + forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + // Finally, connect an extra block then re-mine the funding tx, giving the channel a new SCID. + connect_blocks(&nodes[1], 1); + connect_blocks(&nodes[2], 1); + + mine_transaction(&nodes[1], &tx); + mine_transaction(&nodes[2], &tx); + + let bs_chans = nodes[1].node.list_usable_channels(); + let bs_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + let new_scid = bs_chan.short_channel_id.unwrap(); + assert_ne!(original_scid, new_scid); + assert_eq!(nodes[2].node.list_usable_channels()[0].short_channel_id.unwrap(), new_scid); + + // At this point, the channel should happily forward or send payments with either the old SCID + // or the new SCID... + send_along_route_with_secret( + &nodes[1], + route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + send_along_route_with_secret( + &nodes[0], + forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + let mut new_scid_route = route.clone(); + new_scid_route.paths[0].hops[0].short_channel_id = new_scid; + send_along_route_with_secret( + &nodes[1], + new_scid_route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + let mut new_scid_forwarded_route = forwarded_route.clone(); + new_scid_forwarded_route.paths[0].hops[1].short_channel_id = new_scid; + send_along_route_with_secret( + &nodes[0], + new_scid_forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + // However after CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY blocks, the old SCID should be removed + // and will no longer work for sent or forwarded payments (but the new one still will). + connect_blocks(&nodes[1], 5); + let bs_announcement_sigs = + get_event_msg!(nodes[1], MessageSendEvent::SendAnnouncementSignatures, node_c_id); + + connect_blocks(&nodes[2], 5); + let cs_announcement_sigs = + get_event_msg!(nodes[2], MessageSendEvent::SendAnnouncementSignatures, node_b_id); + + nodes[2].node.handle_announcement_signatures(node_b_id, &bs_announcement_sigs); + let cs_broadcast = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(cs_broadcast.len(), 1); + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = cs_broadcast[0] { + } else { + panic!("Expected broadcast"); + } + + nodes[1].node.handle_announcement_signatures(node_c_id, &cs_announcement_sigs); + let bs_broadcast = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(bs_broadcast.len(), 1); + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = bs_broadcast[0] { + } else { + panic!("Expected broadcast"); + } + + connect_blocks(&nodes[0], CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY); + connect_blocks(&nodes[1], CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY - 5); + connect_blocks(&nodes[2], CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY - 5); + + send_along_route_with_secret( + &nodes[1], + new_scid_route, + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + send_along_route_with_secret( + &nodes[0], + new_scid_forwarded_route, + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId([0; 32]); + nodes[1].node.send_payment_with_route(route, payment_hash, onion.clone(), id).unwrap(); + let mut conditions = PaymentFailedConditions::new(); + conditions.reason = Some(PaymentFailureReason::RouteNotFound); + expect_payment_failed_conditions(&nodes[1], payment_hash, false, conditions); + + nodes[0].node.send_payment_with_route(forwarded_route, payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); - let reason = ClosureReason::ProcessingError { - err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned(), - }; - check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); - check_closed_broadcast!(nodes[1], true); - check_added_monitors(&nodes[1], 1); + let mut ev = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(ev.len(), 1); + let ev = ev.pop().unwrap(); + let path = &[&nodes[1]]; + let failure = HTLCHandlingFailureType::InvalidForward { requested_forward_scid: original_scid }; + let args = + PassAlongPathArgs::new(&nodes[0], path, 10_000, payment_hash, ev).expect_failure(failure); + do_pass_along_path(args); + fail_payment_along_path(&[&nodes[0], &nodes[1]]); + expect_payment_failed!(nodes[0], payment_hash, false); } #[test] diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 97e4429fbd6..043862fea90 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -386,9 +386,9 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ assert_eq!(txn.len(), 1); } - let expected_err = "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs."; + let expected_err = "Funding transaction was un-confirmed, originally locked at 6 confs."; if reorg_after_reload || !reload_node { - handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs."); + handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed, originally locked at 6 confs."); check_added_monitors!(nodes[1], 1); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) }; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 100000); From 4289db5ccee70a658371f99181b2b269f84dd479 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 1 Dec 2025 13:39:45 +0100 Subject: [PATCH 12/42] Consistently use `wire::Message` for encoding network messages Previously, `enqueue_message` took an `M: Type + Writeable` reference, which didn't make use of our `wire::Message` type, which turned out to be rather confusing. Here, we use `Message` consistently in `PeerManager`'s `enqueue_message`, but also in `encrypt_message`, etc. While at it we also switch to move semantics, which is a nice cleanup. --- lightning/src/ln/channelmanager.rs | 2 + lightning/src/ln/functional_test_utils.rs | 2 + lightning/src/ln/msgs.rs | 2 + lightning/src/ln/peer_channel_encryptor.rs | 4 +- lightning/src/ln/peer_handler.rs | 371 ++++++++++++--------- 5 files changed, 226 insertions(+), 155 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 399c51b9d9a..d52d8535114 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -13872,7 +13872,9 @@ where &MessageSendEvent::UpdateHTLCs { .. } => false, &MessageSendEvent::SendRevokeAndACK { .. } => false, &MessageSendEvent::SendClosingSigned { .. } => false, + #[cfg(simple_close)] &MessageSendEvent::SendClosingComplete { .. } => false, + #[cfg(simple_close)] &MessageSendEvent::SendClosingSig { .. } => false, &MessageSendEvent::SendShutdown { .. } => false, &MessageSendEvent::SendChannelReestablish { .. } => false, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e31630a4926..e4e9c583f32 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1124,7 +1124,9 @@ pub fn remove_first_msg_event_to_node( MessageSendEvent::UpdateHTLCs { node_id, .. } => node_id == msg_node_id, MessageSendEvent::SendRevokeAndACK { node_id, .. } => node_id == msg_node_id, MessageSendEvent::SendClosingSigned { node_id, .. } => node_id == msg_node_id, + #[cfg(simple_close)] MessageSendEvent::SendClosingComplete { node_id, .. } => node_id == msg_node_id, + #[cfg(simple_close)] MessageSendEvent::SendClosingSig { node_id, .. } => node_id == msg_node_id, MessageSendEvent::SendShutdown { node_id, .. } => node_id == msg_node_id, MessageSendEvent::SendChannelReestablish { node_id, .. } => node_id == msg_node_id, diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 8e230fab1d9..0484ebe7530 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -1857,6 +1857,7 @@ pub enum MessageSendEvent { msg: ClosingSigned, }, /// Used to indicate that a `closing_complete` message should be sent to the peer with the given `node_id`. + #[cfg(simple_close)] SendClosingComplete { /// The node_id of the node which should receive this message node_id: PublicKey, @@ -1864,6 +1865,7 @@ pub enum MessageSendEvent { msg: ClosingComplete, }, /// Used to indicate that a `closing_sig` message should be sent to the peer with the given `node_id`. + #[cfg(simple_close)] SendClosingSig { /// The node_id of the node which should receive this message node_id: PublicKey, diff --git a/lightning/src/ln/peer_channel_encryptor.rs b/lightning/src/ln/peer_channel_encryptor.rs index 09b970a9ab2..1d34d9a8674 100644 --- a/lightning/src/ln/peer_channel_encryptor.rs +++ b/lightning/src/ln/peer_channel_encryptor.rs @@ -565,12 +565,12 @@ impl PeerChannelEncryptor { /// Encrypts the given message, returning the encrypted version. /// panics if the length of `message`, once encoded, is greater than 65535 or if the Noise /// handshake has not finished. - pub fn encrypt_message(&mut self, message: &M) -> Vec { + pub fn encrypt_message(&mut self, message: wire::Message) -> Vec { // Allocate a buffer with 2KB, fitting most common messages. Reserve the first 16+2 bytes // for the 2-byte message type prefix and its MAC. let mut res = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); res.0.resize(16 + 2, 0); - wire::write(message, &mut res).expect("In-memory messages must never fail to serialize"); + wire::write(&message, &mut res).expect("In-memory messages must never fail to serialize"); self.encrypt_message_with_header_0s(&mut res.0); res.0 diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index c3b490ef31a..8a6c6a786b1 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -29,7 +29,7 @@ use crate::ln::peer_channel_encryptor::{ }; use crate::ln::types::ChannelId; use crate::ln::wire; -use crate::ln::wire::{Encode, Type}; +use crate::ln::wire::{Encode, Message, Type}; use crate::onion_message::async_payments::{ AsyncPaymentsMessageHandler, HeldHtlcAvailable, OfferPaths, OfferPathsRequest, ReleaseHeldHtlc, ServeStaticInvoice, StaticInvoicePersisted, @@ -53,12 +53,14 @@ use crate::util::ser::{VecWriter, Writeable, Writer}; #[allow(unused_imports)] use crate::prelude::*; +use super::wire::CustomMessageReader; use crate::io; use crate::sync::{FairRwLock, Mutex, MutexGuard}; use core::convert::Infallible; use core::ops::Deref; use core::sync::atomic::{AtomicBool, AtomicI32, AtomicU32, Ordering}; use core::{cmp, fmt, hash, mem}; + #[cfg(not(c_bindings))] use { crate::chain::chainmonitor::ChainMonitor, @@ -1121,7 +1123,7 @@ pub struct PeerManager< } enum LogicalMessage { - FromWire(wire::Message), + FromWire(Message), CommitmentSignedBatch(ChannelId, Vec), } @@ -1572,7 +1574,8 @@ where if let Some(next_onion_message) = handler.next_onion_message_for_peer(peer_node_id) { - self.enqueue_message(peer, &next_onion_message); + let msg = Message::OnionMessage(next_onion_message); + self.enqueue_message(peer, msg); } } } @@ -1590,16 +1593,20 @@ where if let Some((announce, update_a_option, update_b_option)) = self.message_handler.route_handler.get_next_channel_announcement(c) { - self.enqueue_message(peer, &announce); + peer.sync_status = InitSyncTracker::ChannelsSyncing( + announce.contents.short_channel_id + 1, + ); + let msg = Message::ChannelAnnouncement(announce); + self.enqueue_message(peer, msg); + if let Some(update_a) = update_a_option { - self.enqueue_message(peer, &update_a); + let msg = Message::ChannelUpdate(update_a); + self.enqueue_message(peer, msg); } if let Some(update_b) = update_b_option { - self.enqueue_message(peer, &update_b); + let msg = Message::ChannelUpdate(update_b); + self.enqueue_message(peer, msg); } - peer.sync_status = InitSyncTracker::ChannelsSyncing( - announce.contents.short_channel_id + 1, - ); } else { peer.sync_status = InitSyncTracker::ChannelsSyncing(0xffff_ffff_ffff_ffff); @@ -1608,8 +1615,9 @@ where InitSyncTracker::ChannelsSyncing(c) if c == 0xffff_ffff_ffff_ffff => { let handler = &self.message_handler.route_handler; if let Some(msg) = handler.get_next_node_announcement(None) { - self.enqueue_message(peer, &msg); peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id); + let msg = Message::NodeAnnouncement(msg); + self.enqueue_message(peer, msg); } else { peer.sync_status = InitSyncTracker::NoSyncRequested; } @@ -1618,8 +1626,9 @@ where InitSyncTracker::NodesSyncing(sync_node_id) => { let handler = &self.message_handler.route_handler; if let Some(msg) = handler.get_next_node_announcement(Some(&sync_node_id)) { - self.enqueue_message(peer, &msg); peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id); + let msg = Message::NodeAnnouncement(msg); + self.enqueue_message(peer, msg); } else { peer.sync_status = InitSyncTracker::NoSyncRequested; } @@ -1727,7 +1736,10 @@ where } /// Append a message to a peer's pending outbound/write buffer - fn enqueue_message(&self, peer: &mut Peer, message: &M) { + fn enqueue_message( + &self, peer: &mut Peer, + message: Message<::CustomMessage>, + ) { let their_node_id = peer.their_node_id.map(|p| p.0); if their_node_id.is_some() { let logger = WithContext::from(&self.logger, their_node_id, None, None); @@ -1792,12 +1804,14 @@ where }, msgs::ErrorAction::SendErrorMessage { msg } => { log_debug!(logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err); - self.enqueue_message($peer, &msg); + let msg = Message::Error(msg); + self.enqueue_message($peer, msg); continue; }, msgs::ErrorAction::SendWarningMessage { msg, log_level } => { log_given_level!(logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err); - self.enqueue_message($peer, &msg); + let msg = Message::Warning(msg); + self.enqueue_message($peer, msg); continue; }, } @@ -1892,7 +1906,8 @@ where peer.their_socket_address.clone(), ), }; - self.enqueue_message(peer, &resp); + let msg = Message::Init(resp); + self.enqueue_message(peer, msg); }, NextNoiseStep::ActThree => { let res = peer @@ -1912,7 +1927,8 @@ where peer.their_socket_address.clone(), ), }; - self.enqueue_message(peer, &resp); + let msg = Message::Init(resp); + self.enqueue_message(peer, msg); }, NextNoiseStep::NoiseComplete => { if peer.pending_read_is_header { @@ -1972,8 +1988,11 @@ where let channel_id = ChannelId::new_zero(); let data = "Unsupported message compression: zlib" .to_owned(); - let msg = msgs::WarningMessage { channel_id, data }; - self.enqueue_message(peer, &msg); + let msg = Message::Warning(msgs::WarningMessage { + channel_id, + data, + }); + self.enqueue_message(peer, msg); continue; }, (_, Some(ty)) if is_gossip_msg(ty) => { @@ -1983,8 +2002,11 @@ where "Unreadable/bogus gossip message of type {}", ty ); - let msg = msgs::WarningMessage { channel_id, data }; - self.enqueue_message(peer, &msg); + let msg = Message::Warning(msgs::WarningMessage { + channel_id, + data, + }); + self.enqueue_message(peer, msg); continue; }, (msgs::DecodeError::UnknownRequiredFeature, _) => { @@ -2060,9 +2082,7 @@ where /// Returns the message back if it needs to be broadcasted to all other peers. fn handle_message( &self, peer_mutex: &Mutex, peer_lock: MutexGuard, - message: wire::Message< - <::Target as wire::CustomMessageReader>::CustomMessage, - >, + message: Message<<::Target as wire::CustomMessageReader>::CustomMessage>, ) -> Result, MessageHandlingError> { let their_node_id = peer_lock .their_node_id @@ -2103,9 +2123,7 @@ where // allow it to be subsequently processed by `do_handle_message_without_peer_lock`. fn do_handle_message_holding_peer_lock<'a>( &self, mut peer_lock: MutexGuard, - message: wire::Message< - <::Target as wire::CustomMessageReader>::CustomMessage, - >, + message: Message<<::Target as wire::CustomMessageReader>::CustomMessage>, their_node_id: PublicKey, logger: &WithContext<'a, L>, ) -> Result< Option< @@ -2116,7 +2134,7 @@ where peer_lock.received_message_since_timer_tick = true; // Need an Init as first message - if let wire::Message::Init(msg) = message { + if let Message::Init(msg) = message { // Check if we have any compatible chains if the `networks` field is specified. if let Some(networks) = &msg.networks { let chan_handler = &self.message_handler.chan_handler; @@ -2225,7 +2243,7 @@ where // During splicing, commitment_signed messages need to be collected into a single batch // before they are handled. - if let wire::Message::StartBatch(msg) = message { + if let Message::StartBatch(msg) = message { if peer_lock.message_batch.is_some() { let error = format!( "Peer {} sent start_batch for channel {} before previous batch completed", @@ -2296,7 +2314,7 @@ where return Ok(None); } - if let wire::Message::CommitmentSigned(msg) = message { + if let Message::CommitmentSigned(msg) = message { if let Some(message_batch) = &mut peer_lock.message_batch { let MessageBatchImpl::CommitmentSigned(ref mut messages) = &mut message_batch.messages; @@ -2325,7 +2343,7 @@ where return Ok(None); } } else { - return Ok(Some(LogicalMessage::FromWire(wire::Message::CommitmentSigned(msg)))); + return Ok(Some(LogicalMessage::FromWire(Message::CommitmentSigned(msg)))); } } else if let Some(message_batch) = &peer_lock.message_batch { match message_batch.messages { @@ -2341,7 +2359,7 @@ where return Err(PeerHandleError {}.into()); } - if let wire::Message::GossipTimestampFilter(_msg) = message { + if let Message::GossipTimestampFilter(_msg) = message { // When supporting gossip messages, start initial gossip sync only after we receive // a GossipTimestampFilter if peer_lock.their_features.as_ref().unwrap().supports_gossip_queries() @@ -2373,7 +2391,7 @@ where return Ok(None); } - if let wire::Message::ChannelAnnouncement(ref _msg) = message { + if let Message::ChannelAnnouncement(ref _msg) = message { peer_lock.received_channel_announce_since_backlogged = true; } @@ -2385,9 +2403,7 @@ where // Returns the message back if it needs to be broadcasted to all other peers. fn do_handle_message_without_peer_lock<'a>( &self, peer_mutex: &Mutex, - message: wire::Message< - <::Target as wire::CustomMessageReader>::CustomMessage, - >, + message: Message<<::Target as wire::CustomMessageReader>::CustomMessage>, their_node_id: PublicKey, logger: &WithContext<'a, L>, ) -> Result, MessageHandlingError> { if is_gossip_msg(message.type_id()) { @@ -2400,13 +2416,13 @@ where match message { // Setup and Control messages: - wire::Message::Init(_) => { + Message::Init(_) => { // Handled above }, - wire::Message::GossipTimestampFilter(_) => { + Message::GossipTimestampFilter(_) => { // Handled above }, - wire::Message::Error(msg) => { + Message::Error(msg) => { log_debug!( logger, "Got Err message from {}: {}", @@ -2418,149 +2434,150 @@ where return Err(PeerHandleError {}.into()); } }, - wire::Message::Warning(msg) => { + Message::Warning(msg) => { log_debug!(logger, "Got warning message: {}", PrintableString(&msg.data)); }, - wire::Message::Ping(msg) => { + Message::Ping(msg) => { if msg.ponglen < 65532 { let resp = msgs::Pong { byteslen: msg.ponglen }; - self.enqueue_message(&mut *peer_mutex.lock().unwrap(), &resp); + let msg = Message::Pong(resp); + self.enqueue_message(&mut *peer_mutex.lock().unwrap(), msg); } }, - wire::Message::Pong(_msg) => { + Message::Pong(_msg) => { let mut peer_lock = peer_mutex.lock().unwrap(); peer_lock.awaiting_pong_timer_tick_intervals = 0; peer_lock.msgs_sent_since_pong = 0; }, // Channel messages: - wire::Message::StartBatch(_msg) => { + Message::StartBatch(_msg) => { debug_assert!(false); }, - wire::Message::OpenChannel(msg) => { + Message::OpenChannel(msg) => { self.message_handler.chan_handler.handle_open_channel(their_node_id, &msg); }, - wire::Message::OpenChannelV2(_msg) => { + Message::OpenChannelV2(_msg) => { self.message_handler.chan_handler.handle_open_channel_v2(their_node_id, &_msg); }, - wire::Message::AcceptChannel(msg) => { + Message::AcceptChannel(msg) => { self.message_handler.chan_handler.handle_accept_channel(their_node_id, &msg); }, - wire::Message::AcceptChannelV2(msg) => { + Message::AcceptChannelV2(msg) => { self.message_handler.chan_handler.handle_accept_channel_v2(their_node_id, &msg); }, - wire::Message::FundingCreated(msg) => { + Message::FundingCreated(msg) => { self.message_handler.chan_handler.handle_funding_created(their_node_id, &msg); }, - wire::Message::FundingSigned(msg) => { + Message::FundingSigned(msg) => { self.message_handler.chan_handler.handle_funding_signed(their_node_id, &msg); }, - wire::Message::ChannelReady(msg) => { + Message::ChannelReady(msg) => { self.message_handler.chan_handler.handle_channel_ready(their_node_id, &msg); }, - wire::Message::PeerStorage(msg) => { + Message::PeerStorage(msg) => { self.message_handler.chan_handler.handle_peer_storage(their_node_id, msg); }, - wire::Message::PeerStorageRetrieval(msg) => { + Message::PeerStorageRetrieval(msg) => { self.message_handler.chan_handler.handle_peer_storage_retrieval(their_node_id, msg); }, // Quiescence messages: - wire::Message::Stfu(msg) => { + Message::Stfu(msg) => { self.message_handler.chan_handler.handle_stfu(their_node_id, &msg); }, // Splicing messages: - wire::Message::SpliceInit(msg) => { + Message::SpliceInit(msg) => { self.message_handler.chan_handler.handle_splice_init(their_node_id, &msg); }, - wire::Message::SpliceAck(msg) => { + Message::SpliceAck(msg) => { self.message_handler.chan_handler.handle_splice_ack(their_node_id, &msg); }, - wire::Message::SpliceLocked(msg) => { + Message::SpliceLocked(msg) => { self.message_handler.chan_handler.handle_splice_locked(their_node_id, &msg); }, // Interactive transaction construction messages: - wire::Message::TxAddInput(msg) => { + Message::TxAddInput(msg) => { self.message_handler.chan_handler.handle_tx_add_input(their_node_id, &msg); }, - wire::Message::TxAddOutput(msg) => { + Message::TxAddOutput(msg) => { self.message_handler.chan_handler.handle_tx_add_output(their_node_id, &msg); }, - wire::Message::TxRemoveInput(msg) => { + Message::TxRemoveInput(msg) => { self.message_handler.chan_handler.handle_tx_remove_input(their_node_id, &msg); }, - wire::Message::TxRemoveOutput(msg) => { + Message::TxRemoveOutput(msg) => { self.message_handler.chan_handler.handle_tx_remove_output(their_node_id, &msg); }, - wire::Message::TxComplete(msg) => { + Message::TxComplete(msg) => { self.message_handler.chan_handler.handle_tx_complete(their_node_id, &msg); }, - wire::Message::TxSignatures(msg) => { + Message::TxSignatures(msg) => { self.message_handler.chan_handler.handle_tx_signatures(their_node_id, &msg); }, - wire::Message::TxInitRbf(msg) => { + Message::TxInitRbf(msg) => { self.message_handler.chan_handler.handle_tx_init_rbf(their_node_id, &msg); }, - wire::Message::TxAckRbf(msg) => { + Message::TxAckRbf(msg) => { self.message_handler.chan_handler.handle_tx_ack_rbf(their_node_id, &msg); }, - wire::Message::TxAbort(msg) => { + Message::TxAbort(msg) => { self.message_handler.chan_handler.handle_tx_abort(their_node_id, &msg); }, - wire::Message::Shutdown(msg) => { + Message::Shutdown(msg) => { self.message_handler.chan_handler.handle_shutdown(their_node_id, &msg); }, - wire::Message::ClosingSigned(msg) => { + Message::ClosingSigned(msg) => { self.message_handler.chan_handler.handle_closing_signed(their_node_id, &msg); }, #[cfg(simple_close)] - wire::Message::ClosingComplete(msg) => { + Message::ClosingComplete(msg) => { self.message_handler.chan_handler.handle_closing_complete(their_node_id, msg); }, #[cfg(simple_close)] - wire::Message::ClosingSig(msg) => { + Message::ClosingSig(msg) => { self.message_handler.chan_handler.handle_closing_sig(their_node_id, msg); }, // Commitment messages: - wire::Message::UpdateAddHTLC(msg) => { + Message::UpdateAddHTLC(msg) => { self.message_handler.chan_handler.handle_update_add_htlc(their_node_id, &msg); }, - wire::Message::UpdateFulfillHTLC(msg) => { + Message::UpdateFulfillHTLC(msg) => { self.message_handler.chan_handler.handle_update_fulfill_htlc(their_node_id, msg); }, - wire::Message::UpdateFailHTLC(msg) => { + Message::UpdateFailHTLC(msg) => { self.message_handler.chan_handler.handle_update_fail_htlc(their_node_id, &msg); }, - wire::Message::UpdateFailMalformedHTLC(msg) => { + Message::UpdateFailMalformedHTLC(msg) => { let chan_handler = &self.message_handler.chan_handler; chan_handler.handle_update_fail_malformed_htlc(their_node_id, &msg); }, - wire::Message::CommitmentSigned(msg) => { + Message::CommitmentSigned(msg) => { self.message_handler.chan_handler.handle_commitment_signed(their_node_id, &msg); }, - wire::Message::RevokeAndACK(msg) => { + Message::RevokeAndACK(msg) => { self.message_handler.chan_handler.handle_revoke_and_ack(their_node_id, &msg); }, - wire::Message::UpdateFee(msg) => { + Message::UpdateFee(msg) => { self.message_handler.chan_handler.handle_update_fee(their_node_id, &msg); }, - wire::Message::ChannelReestablish(msg) => { + Message::ChannelReestablish(msg) => { self.message_handler.chan_handler.handle_channel_reestablish(their_node_id, &msg); }, // Routing messages: - wire::Message::AnnouncementSignatures(msg) => { + Message::AnnouncementSignatures(msg) => { let chan_handler = &self.message_handler.chan_handler; chan_handler.handle_announcement_signatures(their_node_id, &msg); }, - wire::Message::ChannelAnnouncement(msg) => { + Message::ChannelAnnouncement(msg) => { let route_handler = &self.message_handler.route_handler; if route_handler .handle_channel_announcement(Some(their_node_id), &msg) @@ -2570,7 +2587,7 @@ where } self.update_gossip_backlogged(); }, - wire::Message::NodeAnnouncement(msg) => { + Message::NodeAnnouncement(msg) => { let route_handler = &self.message_handler.route_handler; if route_handler .handle_node_announcement(Some(their_node_id), &msg) @@ -2580,7 +2597,7 @@ where } self.update_gossip_backlogged(); }, - wire::Message::ChannelUpdate(msg) => { + Message::ChannelUpdate(msg) => { let chan_handler = &self.message_handler.chan_handler; chan_handler.handle_channel_update(their_node_id, &msg); @@ -2594,31 +2611,31 @@ where } self.update_gossip_backlogged(); }, - wire::Message::QueryShortChannelIds(msg) => { + Message::QueryShortChannelIds(msg) => { let route_handler = &self.message_handler.route_handler; route_handler.handle_query_short_channel_ids(their_node_id, msg)?; }, - wire::Message::ReplyShortChannelIdsEnd(msg) => { + Message::ReplyShortChannelIdsEnd(msg) => { let route_handler = &self.message_handler.route_handler; route_handler.handle_reply_short_channel_ids_end(their_node_id, msg)?; }, - wire::Message::QueryChannelRange(msg) => { + Message::QueryChannelRange(msg) => { let route_handler = &self.message_handler.route_handler; route_handler.handle_query_channel_range(their_node_id, msg)?; }, - wire::Message::ReplyChannelRange(msg) => { + Message::ReplyChannelRange(msg) => { let route_handler = &self.message_handler.route_handler; route_handler.handle_reply_channel_range(their_node_id, msg)?; }, // Onion message: - wire::Message::OnionMessage(msg) => { + Message::OnionMessage(msg) => { let onion_message_handler = &self.message_handler.onion_message_handler; onion_message_handler.handle_onion_message(their_node_id, &msg); }, // Unknown messages: - wire::Message::Unknown(type_id) if message.is_even() => { + Message::Unknown(type_id) if message.is_even() => { log_debug!( logger, "Received unknown even message of type {}, disconnecting peer!", @@ -2626,10 +2643,10 @@ where ); return Err(PeerHandleError {}.into()); }, - wire::Message::Unknown(type_id) => { + Message::Unknown(type_id) => { log_trace!(logger, "Received unknown odd message of type {}, ignoring", type_id); }, - wire::Message::Custom(custom) => { + Message::Custom(custom) => { let custom_message_handler = &self.message_handler.custom_message_handler; custom_message_handler.handle_custom_message(custom, their_node_id)?; }, @@ -2858,68 +2875,77 @@ where // robustly gossip broadcast events even if a peer's message buffer is full. let mut handle_event = |event, from_chan_handler| { match event { - MessageSendEvent::SendPeerStorage { ref node_id, ref msg } => { + MessageSendEvent::SendPeerStorage { ref node_id, msg } => { log_debug!( WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendPeerStorage event in peer_handler for {}", node_id, ); + let msg = Message::PeerStorage(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendPeerStorageRetrieval { ref node_id, ref msg } => { + MessageSendEvent::SendPeerStorageRetrieval { ref node_id, msg } => { log_debug!( WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendPeerStorageRetrieval event in peer_handler for {}", node_id, ); + let msg = Message::PeerStorageRetrieval(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => { + MessageSendEvent::SendAcceptChannel { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendAcceptChannel event in peer_handler for node {} for channel {}", node_id, &msg.common_fields.temporary_channel_id); + let msg = Message::AcceptChannel(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => { + MessageSendEvent::SendAcceptChannelV2 { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}", node_id, &msg.common_fields.temporary_channel_id); + let msg = Message::AcceptChannelV2(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => { + MessageSendEvent::SendOpenChannel { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendOpenChannel event in peer_handler for node {} for channel {}", node_id, &msg.common_fields.temporary_channel_id); + let msg = Message::OpenChannel(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => { + MessageSendEvent::SendOpenChannelV2 { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}", node_id, &msg.common_fields.temporary_channel_id); + let msg = Message::OpenChannelV2(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => { + MessageSendEvent::SendFundingCreated { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id), None), "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})", node_id, &msg.temporary_channel_id, ChannelId::v1_from_funding_txid(msg.funding_txid.as_byte_array(), msg.funding_output_index)); // TODO: If the peer is gone we should generate a DiscardFunding event // indicating to the wallet that they should just throw away this funding transaction + let msg = Message::FundingCreated(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => { + MessageSendEvent::SendFundingSigned { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendFundingSigned event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::FundingSigned(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendChannelReady { ref node_id, ref msg } => { + MessageSendEvent::SendChannelReady { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendChannelReady event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ChannelReady(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendStfu { ref node_id, ref msg } => { + MessageSendEvent::SendStfu { ref node_id, msg } => { let logger = WithContext::from( &self.logger, Some(*node_id), @@ -2929,9 +2955,10 @@ where log_debug!(logger, "Handling SendStfu event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::Stfu(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { + MessageSendEvent::SendSpliceInit { ref node_id, msg } => { let logger = WithContext::from( &self.logger, Some(*node_id), @@ -2941,9 +2968,10 @@ where log_debug!(logger, "Handling SendSpliceInit event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::SpliceInit(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { + MessageSendEvent::SendSpliceAck { ref node_id, msg } => { let logger = WithContext::from( &self.logger, Some(*node_id), @@ -2953,9 +2981,10 @@ where log_debug!(logger, "Handling SendSpliceAck event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::SpliceAck(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { + MessageSendEvent::SendSpliceLocked { ref node_id, msg } => { let logger = WithContext::from( &self.logger, Some(*node_id), @@ -2965,66 +2994,77 @@ where log_debug!(logger, "Handling SendSpliceLocked event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::SpliceLocked(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { + MessageSendEvent::SendTxAddInput { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAddInput event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxAddInput(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { + MessageSendEvent::SendTxAddOutput { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAddOutput event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxAddOutput(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { + MessageSendEvent::SendTxRemoveInput { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxRemoveInput(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { + MessageSendEvent::SendTxRemoveOutput { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxRemoveOutput(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { + MessageSendEvent::SendTxComplete { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxComplete event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxComplete(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => { + MessageSendEvent::SendTxSignatures { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxSignatures event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxSignatures(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => { + MessageSendEvent::SendTxInitRbf { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxInitRbf event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxInitRbf(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => { + MessageSendEvent::SendTxAckRbf { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAckRbf event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxAckRbf(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { + MessageSendEvent::SendTxAbort { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAbort event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxAbort(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { + MessageSendEvent::SendAnnouncementSignatures { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})", node_id, &msg.channel_id); + let msg = Message::AnnouncementSignatures(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::UpdateHTLCs { @@ -3032,12 +3072,12 @@ where ref channel_id, updates: msgs::CommitmentUpdate { - ref update_add_htlcs, - ref update_fulfill_htlcs, - ref update_fail_htlcs, - ref update_fail_malformed_htlcs, - ref update_fee, - ref commitment_signed, + update_add_htlcs, + update_fulfill_htlcs, + update_fail_htlcs, + update_fail_malformed_htlcs, + update_fee, + commitment_signed, }, } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(*channel_id), None), "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails, {} commits for channel {}", @@ -3049,18 +3089,23 @@ where channel_id); let mut peer = get_peer_for_forwarding!(node_id)?; for msg in update_fulfill_htlcs { + let msg = Message::UpdateFulfillHTLC(msg); self.enqueue_message(&mut *peer, msg); } for msg in update_fail_htlcs { + let msg = Message::UpdateFailHTLC(msg); self.enqueue_message(&mut *peer, msg); } for msg in update_fail_malformed_htlcs { + let msg = Message::UpdateFailMalformedHTLC(msg); self.enqueue_message(&mut *peer, msg); } for msg in update_add_htlcs { + let msg = Message::UpdateAddHTLC(msg); self.enqueue_message(&mut *peer, msg); } - if let &Some(ref msg) = update_fee { + if let Some(msg) = update_fee { + let msg = Message::UpdateFee(msg); self.enqueue_message(&mut *peer, msg); } if commitment_signed.len() > 1 { @@ -3069,37 +3114,45 @@ where batch_size: commitment_signed.len() as u16, message_type: Some(msgs::CommitmentSigned::TYPE), }; - self.enqueue_message(&mut *peer, &msg); + let msg = Message::StartBatch(msg); + self.enqueue_message(&mut *peer, msg); } for msg in commitment_signed { + let msg = Message::CommitmentSigned(msg); self.enqueue_message(&mut *peer, msg); } }, - MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + MessageSendEvent::SendRevokeAndACK { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::RevokeAndACK(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { + MessageSendEvent::SendClosingSigned { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendClosingSigned event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ClosingSigned(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendClosingComplete { ref node_id, ref msg } => { + #[cfg(simple_close)] + MessageSendEvent::SendClosingComplete { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendClosingComplete event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ClosingComplete(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendClosingSig { ref node_id, ref msg } => { + #[cfg(simple_close)] + MessageSendEvent::SendClosingSig { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendClosingSig event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ClosingSig(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendShutdown { ref node_id, ref msg } => { + MessageSendEvent::SendShutdown { ref node_id, msg } => { log_debug!( WithContext::from( &self.logger, @@ -3109,23 +3162,27 @@ where ), "Handling Shutdown event in peer_handler", ); + let msg = Message::Shutdown(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { + MessageSendEvent::SendChannelReestablish { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendChannelReestablish event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ChannelReestablish(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::SendChannelAnnouncement { ref node_id, - ref msg, - ref update_msg, + msg, + update_msg, } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}", node_id, msg.contents.short_channel_id); + let msg = Message::ChannelAnnouncement(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); + let update_msg = Message::ChannelUpdate(update_msg); self.enqueue_message( &mut *get_peer_for_forwarding!(node_id)?, update_msg, @@ -3216,12 +3273,13 @@ where _ => {}, } }, - MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { + MessageSendEvent::SendChannelUpdate { ref node_id, msg } => { log_trace!( WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelUpdate event in peer_handler for channel {}", msg.contents.short_channel_id ); + let msg = Message::ChannelUpdate(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::HandleError { node_id, action } => { @@ -3239,7 +3297,7 @@ where // about to disconnect the peer and do it after we finish // processing most messages. let msg = msg.map(|msg| { - wire::Message::<<::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg) + Message::<<::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg) }); peers_to_disconnect.insert(node_id, msg); }, @@ -3250,7 +3308,7 @@ where // about to disconnect the peer and do it after we finish // processing most messages. peers_to_disconnect - .insert(node_id, Some(wire::Message::Warning(msg))); + .insert(node_id, Some(Message::Warning(msg))); }, msgs::ErrorAction::IgnoreAndLog(level) => { log_given_level!( @@ -3266,22 +3324,21 @@ where "Received a HandleError event to be ignored", ); }, - msgs::ErrorAction::SendErrorMessage { ref msg } => { + msgs::ErrorAction::SendErrorMessage { msg } => { log_trace!(logger, "Handling SendErrorMessage HandleError event in peer_handler with message {}", msg.data); + let msg = Message::Error(msg); self.enqueue_message( &mut *get_peer_for_forwarding!(&node_id)?, msg, ); }, - msgs::ErrorAction::SendWarningMessage { - ref msg, - ref log_level, - } => { + msgs::ErrorAction::SendWarningMessage { msg, ref log_level } => { log_given_level!(logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler with message {}", msg.data); + let msg = Message::Warning(msg); self.enqueue_message( &mut *get_peer_for_forwarding!(&node_id)?, msg, @@ -3289,33 +3346,37 @@ where }, } }, - MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => { + MessageSendEvent::SendChannelRangeQuery { ref node_id, msg } => { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelRangeQuery event in peer_handler with first_blocknum={}, number_of_blocks={}", msg.first_blocknum, msg.number_of_blocks); + let msg = Message::QueryChannelRange(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendShortIdsQuery { ref node_id, ref msg } => { + MessageSendEvent::SendShortIdsQuery { ref node_id, msg } => { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendShortIdsQuery event in peer_handler with num_scids={}", msg.short_channel_ids.len()); + let msg = Message::QueryShortChannelIds(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => { + MessageSendEvent::SendReplyChannelRange { ref node_id, msg } => { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendReplyChannelRange event in peer_handler with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", msg.short_channel_ids.len(), msg.first_blocknum, msg.number_of_blocks, msg.sync_complete); + let msg = Message::ReplyChannelRange(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendGossipTimestampFilter { ref node_id, ref msg } => { + MessageSendEvent::SendGossipTimestampFilter { ref node_id, msg } => { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendGossipTimestampFilter event in peer_handler with first_timestamp={}, timestamp_range={}", msg.first_timestamp, msg.timestamp_range); + let msg = Message::GossipTimestampFilter(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, } @@ -3351,7 +3412,8 @@ where } else { continue; }; - self.enqueue_message(&mut peer, &msg); + let msg = Message::Custom(msg); + self.enqueue_message(&mut peer, msg); } for (descriptor, peer_mutex) in peers.iter() { @@ -3381,7 +3443,7 @@ where if let Some(peer_mutex) = peers.remove(&descriptor) { let mut peer = peer_mutex.lock().unwrap(); if let Some(msg) = msg { - self.enqueue_message(&mut *peer, &msg); + self.enqueue_message(&mut *peer, msg); // This isn't guaranteed to work, but if there is enough free // room in the send buffer, put the error message there... self.do_attempt_write_data(&mut descriptor, &mut *peer, false); @@ -3506,7 +3568,9 @@ where if peer.awaiting_pong_timer_tick_intervals == 0 { peer.awaiting_pong_timer_tick_intervals = -1; let ping = msgs::Ping { ponglen: 0, byteslen: 64 }; - self.enqueue_message(peer, &ping); + let msg: Message<::CustomMessage> = + Message::Ping(ping); + self.enqueue_message(peer, msg); } } @@ -3577,7 +3641,8 @@ where peer.awaiting_pong_timer_tick_intervals = 1; let ping = msgs::Ping { ponglen: 0, byteslen: 64 }; - self.enqueue_message(&mut *peer, &ping); + let msg = Message::Ping(ping); + self.enqueue_message(&mut *peer, msg); break; } self.do_attempt_write_data( @@ -4226,7 +4291,7 @@ mod tests { .push(MessageSendEvent::SendShutdown { node_id: their_id, msg: msg.clone() }); peers[0].message_handler.chan_handler = &a_chan_handler; - b_chan_handler.expect_receive_msg(wire::Message::Shutdown(msg)); + b_chan_handler.expect_receive_msg(Message::Shutdown(msg)); peers[1].message_handler.chan_handler = &b_chan_handler; peers[0].process_events(); @@ -4261,7 +4326,8 @@ mod tests { peers[0].read_event(&mut fd_dup, &act_three).unwrap(); let not_init_msg = msgs::Ping { ponglen: 4, byteslen: 0 }; - let msg_bytes = dup_encryptor.encrypt_message(¬_init_msg); + let msg: Message<()> = Message::Ping(not_init_msg); + let msg_bytes = dup_encryptor.encrypt_message(msg); assert!(peers[0].read_event(&mut fd_dup, &msg_bytes).is_err()); } @@ -4639,13 +4705,12 @@ mod tests { { let peers = peer_a.peers.read().unwrap(); let mut peer_b = peers.get(&fd_a).unwrap().lock().unwrap(); - peer_a.enqueue_message( - &mut peer_b, - &msgs::WarningMessage { - channel_id: ChannelId([0; 32]), - data: "no disconnect plz".to_string(), - }, - ); + let warning = msgs::WarningMessage { + channel_id: ChannelId([0; 32]), + data: "no disconnect plz".to_string(), + }; + let msg = Message::Warning(warning); + peer_a.enqueue_message(&mut peer_b, msg); } peer_a.process_events(); let msg = fd_a.outbound_data.lock().unwrap().split_off(0); From dfa12d8807fd109be504d249853e6b2a3dad7fda Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 1 Dec 2025 14:08:17 +0100 Subject: [PATCH 13/42] Drop `wire::write` and replace `encode_msg!` macro Now that we consistently use `wire::Message` everywhere, it's easier to simply use `Message::write`/`Type::write` instead of heaving yet another `wire::write` around. Here we drop `wire::write`, replace the `encode_msg` macro with a method that takes `wire::Message`, and convert a bunch of additional places to move semantics. --- lightning/src/ln/peer_channel_encryptor.rs | 6 +- lightning/src/ln/peer_handler.rs | 64 ++++++++++++---------- lightning/src/ln/wire.rs | 41 -------------- 3 files changed, 39 insertions(+), 72 deletions(-) diff --git a/lightning/src/ln/peer_channel_encryptor.rs b/lightning/src/ln/peer_channel_encryptor.rs index 1d34d9a8674..894de045b14 100644 --- a/lightning/src/ln/peer_channel_encryptor.rs +++ b/lightning/src/ln/peer_channel_encryptor.rs @@ -12,7 +12,9 @@ use crate::prelude::*; use crate::ln::msgs; use crate::ln::msgs::LightningError; use crate::ln::wire; +use crate::ln::wire::Type; use crate::sign::{NodeSigner, Recipient}; +use crate::util::ser::Writeable; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::{Hash, HashEngine}; @@ -570,7 +572,9 @@ impl PeerChannelEncryptor { // for the 2-byte message type prefix and its MAC. let mut res = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); res.0.resize(16 + 2, 0); - wire::write(&message, &mut res).expect("In-memory messages must never fail to serialize"); + + message.type_id().write(&mut res).expect("In-memory messages must never fail to serialize"); + message.write(&mut res).expect("In-memory messages must never fail to serialize"); self.encrypt_message_with_header_0s(&mut res.0); res.0 diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 8a6c6a786b1..4d1dff9cd52 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1144,12 +1144,11 @@ impl From for MessageHandlingError { } } -macro_rules! encode_msg { - ($msg: expr) => {{ - let mut buffer = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); - wire::write($msg, &mut buffer).unwrap(); - buffer.0 - }}; +fn encode_message(message: wire::Message) -> Vec { + let mut buffer = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); + message.type_id().write(&mut buffer).expect("In-memory messages must never fail to serialize"); + message.write(&mut buffer).expect("In-memory messages must never fail to serialize"); + buffer.0 } impl @@ -2068,7 +2067,7 @@ where for msg in msgs_to_forward.drain(..) { self.forward_broadcast_msg( &*peers, - &msg, + msg, peer_node_id.as_ref().map(|(pk, _)| pk), false, ); @@ -2661,22 +2660,25 @@ where /// unless `allow_large_buffer` is set, in which case the message will be treated as critical /// and delivered no matter the available buffer space. fn forward_broadcast_msg( - &self, peers: &HashMap>, msg: &BroadcastGossipMessage, + &self, peers: &HashMap>, msg: BroadcastGossipMessage, except_node: Option<&PublicKey>, allow_large_buffer: bool, ) { match msg { - BroadcastGossipMessage::ChannelAnnouncement(ref msg) => { + BroadcastGossipMessage::ChannelAnnouncement(msg) => { log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg); - let encoded_msg = encode_msg!(msg); let our_channel = self.our_node_id == msg.contents.node_id_1 || self.our_node_id == msg.contents.node_id_2; - + let scid = msg.contents.short_channel_id; + let node_id_1 = msg.contents.node_id_1; + let node_id_2 = msg.contents.node_id_2; + let msg: Message<::CustomMessage> = + Message::ChannelAnnouncement(msg); + let encoded_msg = encode_message(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); if !peer.handshake_complete() { continue; } - let scid = msg.contents.short_channel_id; if !our_channel && !peer.should_forward_channel_announcement(scid) { continue; } @@ -2693,9 +2695,7 @@ where continue; } if let Some((_, their_node_id)) = peer.their_node_id { - if their_node_id == msg.contents.node_id_1 - || their_node_id == msg.contents.node_id_2 - { + if their_node_id == node_id_1 || their_node_id == node_id_2 { continue; } } @@ -2708,23 +2708,25 @@ where peer.gossip_broadcast_buffer.push_back(encoded_message); } }, - BroadcastGossipMessage::NodeAnnouncement(ref msg) => { + BroadcastGossipMessage::NodeAnnouncement(msg) => { log_gossip!( self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg ); - let encoded_msg = encode_msg!(msg); let our_announcement = self.our_node_id == msg.contents.node_id; + let msg_node_id = msg.contents.node_id; + let msg: Message<::CustomMessage> = + Message::NodeAnnouncement(msg); + let encoded_msg = encode_message(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); if !peer.handshake_complete() { continue; } - let node_id = msg.contents.node_id; - if !our_announcement && !peer.should_forward_node_announcement(node_id) { + if !our_announcement && !peer.should_forward_node_announcement(msg_node_id) { continue; } debug_assert!(peer.their_node_id.is_some()); @@ -2740,7 +2742,7 @@ where continue; } if let Some((_, their_node_id)) = peer.their_node_id { - if their_node_id == msg.contents.node_id { + if their_node_id == msg_node_id { continue; } } @@ -2760,15 +2762,16 @@ where except_node, msg ); - let encoded_msg = encode_msg!(msg); - let our_channel = self.our_node_id == *node_id_1 || self.our_node_id == *node_id_2; - + let our_channel = self.our_node_id == node_id_1 || self.our_node_id == node_id_2; + let scid = msg.contents.short_channel_id; + let msg: Message<::CustomMessage> = + Message::ChannelUpdate(msg); + let encoded_msg = encode_message(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); if !peer.handshake_complete() { continue; } - let scid = msg.contents.short_channel_id; if !our_channel && !peer.should_forward_channel_announcement(scid) { continue; } @@ -3201,7 +3204,7 @@ where let forward = BroadcastGossipMessage::ChannelAnnouncement(msg); self.forward_broadcast_msg( peers, - &forward, + forward, None, from_chan_handler, ); @@ -3222,7 +3225,7 @@ where }; self.forward_broadcast_msg( peers, - &forward, + forward, None, from_chan_handler, ); @@ -3246,7 +3249,7 @@ where }; self.forward_broadcast_msg( peers, - &forward, + forward, None, from_chan_handler, ); @@ -3265,7 +3268,7 @@ where let forward = BroadcastGossipMessage::NodeAnnouncement(msg); self.forward_broadcast_msg( peers, - &forward, + forward, None, from_chan_handler, ); @@ -3742,7 +3745,7 @@ where let _ = self.message_handler.route_handler.handle_node_announcement(None, &msg); self.forward_broadcast_msg( &*self.peers.read().unwrap(), - &BroadcastGossipMessage::NodeAnnouncement(msg), + BroadcastGossipMessage::NodeAnnouncement(msg), None, true, ); @@ -4557,7 +4560,8 @@ mod tests { assert_eq!(peer.gossip_broadcast_buffer.len(), 1); let pending_msg = &peer.gossip_broadcast_buffer[0]; - let expected = encode_msg!(&msg_100); + let msg: Message<()> = Message::ChannelUpdate(msg_100); + let expected = encode_message(msg); assert_eq!(expected, pending_msg.fetch_encoded_msg_with_type_pfx()); } } diff --git a/lightning/src/ln/wire.rs b/lightning/src/ln/wire.rs index bc1d83adb68..9065c49c676 100644 --- a/lightning/src/ln/wire.rs +++ b/lightning/src/ln/wire.rs @@ -425,19 +425,6 @@ where } } -/// Writes a message to the data buffer encoded as a 2-byte big-endian type and a variable-length -/// payload. -/// -/// # Errors -/// -/// Returns an I/O error if the write could not be completed. -pub(crate) fn write( - message: &M, buffer: &mut W, -) -> Result<(), io::Error> { - message.type_id().write(buffer)?; - message.write(buffer) -} - mod encode { /// Defines a constant type identifier for reading messages from the wire. pub trait Encode { @@ -737,34 +724,6 @@ mod tests { } } - #[test] - fn write_message_with_type() { - let message = msgs::Pong { byteslen: 2u16 }; - let mut buffer = Vec::new(); - assert!(write(&message, &mut buffer).is_ok()); - - let type_length = ::core::mem::size_of::(); - let (type_bytes, payload_bytes) = buffer.split_at(type_length); - assert_eq!(u16::from_be_bytes(type_bytes.try_into().unwrap()), msgs::Pong::TYPE); - assert_eq!(payload_bytes, &ENCODED_PONG[type_length..]); - } - - #[test] - fn read_message_encoded_with_write() { - let message = msgs::Pong { byteslen: 2u16 }; - let mut buffer = Vec::new(); - assert!(write(&message, &mut buffer).is_ok()); - - let decoded_message = read(&mut &buffer[..], &IgnoringMessageHandler {}).unwrap(); - match decoded_message { - Message::Pong(msgs::Pong { byteslen: 2u16 }) => (), - Message::Pong(msgs::Pong { byteslen }) => { - panic!("Expected byteslen {}; found: {}", message.byteslen, byteslen); - }, - _ => panic!("Expected pong message; found message type: {}", decoded_message.type_id()), - } - } - #[test] fn is_even_message_type() { let message = Message::<()>::Unknown(42); From bd578235fbe5ed8ec18eb0ccd5e2e8fe10732ce4 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 9 Dec 2025 01:15:56 +0000 Subject: [PATCH 14/42] Make `AttributionData` actually pub since its used in the public API `AttributionData` is a part of the public `UpdateFulfillHTLC` and `UpdateFailHTLC` messages, but its not actually `pub`. Yet again re-exports bite us and leave us with a broken public API - we ended up accidentally sealing `AttributionData`. Instead, here, we just make `onion_utils` `pub` so that we avoid making the same mistake in the future. Note that this still leaves us with arather useless public `AttributionData` API - it can't be created, updated, or decoded, it can only be serialized and deserialized, but at least it exists. --- fuzz/src/process_onion_failure.rs | 13 +-- lightning/src/events/mod.rs | 3 +- lightning/src/ln/mod.rs | 11 +-- lightning/src/ln/msgs.rs | 9 +- lightning/src/ln/onion_utils.rs | 157 ++++++++++++++++-------------- 5 files changed, 97 insertions(+), 96 deletions(-) diff --git a/fuzz/src/process_onion_failure.rs b/fuzz/src/process_onion_failure.rs index 1bc9900718a..ac70562c006 100644 --- a/fuzz/src/process_onion_failure.rs +++ b/fuzz/src/process_onion_failure.rs @@ -9,10 +9,12 @@ use lightning::{ ln::{ channelmanager::{HTLCSource, PaymentId}, msgs::OnionErrorPacket, + onion_utils, }, routing::router::{BlindedTail, Path, RouteHop, TrampolineHop}, types::features::{ChannelFeatures, NodeFeatures}, util::logger::Logger, + util::ser::Readable, }; // Imports that need to be added manually @@ -126,19 +128,18 @@ fn do_test(data: &[u8], out: Out) { let failure_data = get_slice!(failure_len); let attribution_data = if get_bool!() { - Some(lightning::ln::AttributionData { - hold_times: get_slice!(80).try_into().unwrap(), - hmacs: get_slice!(840).try_into().unwrap(), - }) + let mut bytes = get_slice!(80 + 840); + let data: onion_utils::AttributionData = Readable::read(&mut bytes).unwrap(); + Some(data) } else { None }; let encrypted_packet = OnionErrorPacket { data: failure_data.into(), attribution_data: attribution_data.clone() }; - lightning::ln::process_onion_failure(&secp_ctx, &logger, &htlc_source, encrypted_packet); + onion_utils::process_onion_failure(&secp_ctx, &logger, &htlc_source, encrypted_packet); if let Some(attribution_data) = attribution_data { - lightning::ln::decode_fulfill_attribution_data( + onion_utils::decode_fulfill_attribution_data( &secp_ctx, &logger, &path, diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index b9c4b1ca1ef..d97ae6097b6 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -25,8 +25,9 @@ use crate::blinded_path::payment::{ use crate::chain::transaction; use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS; use crate::ln::channelmanager::{InterceptId, PaymentId, RecipientOnionFields}; +use crate::ln::msgs; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::ln::types::ChannelId; -use crate::ln::{msgs, LocalHTLCFailureReason}; use crate::offers::invoice::Bolt12Invoice; use crate::offers::invoice_request::InvoiceRequest; use crate::offers::static_invoice::StaticInvoice; diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index 9473142cfed..04aa8181b92 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -41,26 +41,17 @@ pub mod channel; #[cfg(not(fuzzing))] pub(crate) mod channel; -pub(crate) mod onion_utils; +pub mod onion_utils; mod outbound_payment; pub mod wire; #[allow(dead_code)] // TODO(dual_funding): Remove once contribution to V2 channels is enabled. pub(crate) mod interactivetxs; -pub use onion_utils::{create_payment_onion, LocalHTLCFailureReason}; // Older rustc (which we support) refuses to let us call the get_payment_preimage_hash!() macro // without the node parameter being mut. This is incorrect, and thus newer rustcs will complain // about an unnecessary mut. Thus, we silence the unused_mut warning in two test modules below. -#[cfg(fuzzing)] -pub use onion_utils::decode_fulfill_attribution_data; -#[cfg(fuzzing)] -pub use onion_utils::process_onion_failure; - -#[cfg(fuzzing)] -pub use onion_utils::AttributionData; - #[cfg(test)] #[allow(unused_mut)] mod async_payments_tests; diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 0484ebe7530..f237d73e533 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -4366,7 +4366,7 @@ mod tests { InboundOnionForwardPayload, InboundOnionReceivePayload, OutboundTrampolinePayload, TrampolineOnionPacket, }; - use crate::ln::onion_utils::{AttributionData, HMAC_COUNT, HMAC_LEN, HOLD_TIME_LEN, MAX_HOPS}; + use crate::ln::onion_utils::AttributionData; use crate::ln::types::ChannelId; use crate::routing::gossip::{NodeAlias, NodeId}; use crate::types::features::{ @@ -5899,13 +5899,10 @@ mod tests { channel_id: ChannelId::from_bytes([2; 32]), htlc_id: 2316138423780173, reason: [1; 32].to_vec(), - attribution_data: Some(AttributionData { - hold_times: [3; MAX_HOPS * HOLD_TIME_LEN], - hmacs: [3; HMAC_LEN * HMAC_COUNT], - }), + attribution_data: Some(AttributionData::new()), }; let encoded_value = update_fail_htlc.encode(); - let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d0020010101010101010101010101010101010101010101010101010101010101010101fd03980303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303").unwrap(); + let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d0020010101010101010101010101010101010101010101010101010101010101010101fd03980000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(); assert_eq!(encoded_value, target_value); } diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 18aa43e27c6..dbc2ebc9d48 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -7,6 +7,8 @@ // You may not use this file except in accordance with one or both of these // licenses. +//! Low-level onion manipulation logic and fields + use super::msgs::OnionErrorPacket; use crate::blinded_path::BlindedHop; use crate::crypto::chacha20::ChaCha20; @@ -979,27 +981,79 @@ mod fuzzy_onion_utils { #[cfg(test)] pub(crate) attribution_failed_channel: Option, } + + pub fn process_onion_failure( + secp_ctx: &Secp256k1, logger: &L, htlc_source: &HTLCSource, + encrypted_packet: OnionErrorPacket, + ) -> DecodedOnionFailure + where + L::Target: Logger, + { + let (path, session_priv) = match htlc_source { + HTLCSource::OutboundRoute { ref path, ref session_priv, .. } => (path, session_priv), + _ => unreachable!(), + }; + + process_onion_failure_inner(secp_ctx, logger, path, &session_priv, None, encrypted_packet) + } + + /// Decodes the attribution data that we got back from upstream on a payment we sent. + pub fn decode_fulfill_attribution_data( + secp_ctx: &Secp256k1, logger: &L, path: &Path, outer_session_priv: &SecretKey, + mut attribution_data: AttributionData, + ) -> Vec + where + L::Target: Logger, + { + let mut hold_times = Vec::new(); + + // Only consider hops in the regular path for attribution data. Blinded path attribution data isn't accessible. + let shared_secrets = + construct_onion_keys_generic(secp_ctx, &path.hops, None, outer_session_priv) + .map(|(shared_secret, _, _, _, _)| shared_secret); + + // Path length can reach 27 hops, but attribution data can only be conveyed back to the sender from the first 20 + // hops. Determine the number of hops to be used for attribution data. + let attributable_hop_count = usize::min(path.hops.len(), MAX_HOPS); + + for (route_hop_idx, shared_secret) in + shared_secrets.enumerate().take(attributable_hop_count) + { + attribution_data.crypt(shared_secret.as_ref()); + + // Calculate position relative to the last attributable hop. The last attributable hop is at position 0. We need + // to look at the chain of HMACs that does include all data up to the last attributable hop. Hold times beyond + // the last attributable hop will not be available. + let position = attributable_hop_count - route_hop_idx - 1; + let res = attribution_data.verify(&Vec::new(), shared_secret.as_ref(), position); + match res { + Ok(hold_time) => { + hold_times.push(hold_time); + + // Shift attribution data to prepare for processing the next hop. + attribution_data.shift_left(); + }, + Err(()) => { + // We will hit this if there is a node on the path that does not support fulfill attribution data. + log_debug!( + logger, + "Invalid fulfill HMAC in attribution data for node at pos {}", + route_hop_idx + ); + + break; + }, + } + } + + hold_times + } } #[cfg(fuzzing)] pub use self::fuzzy_onion_utils::*; #[cfg(not(fuzzing))] pub(crate) use self::fuzzy_onion_utils::*; -pub fn process_onion_failure( - secp_ctx: &Secp256k1, logger: &L, htlc_source: &HTLCSource, - encrypted_packet: OnionErrorPacket, -) -> DecodedOnionFailure -where - L::Target: Logger, -{ - let (path, session_priv) = match htlc_source { - HTLCSource::OutboundRoute { ref path, ref session_priv, .. } => (path, session_priv), - _ => unreachable!(), - }; - - process_onion_failure_inner(secp_ctx, logger, path, &session_priv, None, encrypted_packet) -} - /// Process failure we got back from upstream on a payment we sent (implying htlc_source is an /// OutboundRoute). fn process_onion_failure_inner( @@ -1449,56 +1503,6 @@ where } } -/// Decodes the attribution data that we got back from upstream on a payment we sent. -pub fn decode_fulfill_attribution_data( - secp_ctx: &Secp256k1, logger: &L, path: &Path, outer_session_priv: &SecretKey, - mut attribution_data: AttributionData, -) -> Vec -where - L::Target: Logger, -{ - let mut hold_times = Vec::new(); - - // Only consider hops in the regular path for attribution data. Blinded path attribution data isn't accessible. - let shared_secrets = - construct_onion_keys_generic(secp_ctx, &path.hops, None, outer_session_priv) - .map(|(shared_secret, _, _, _, _)| shared_secret); - - // Path length can reach 27 hops, but attribution data can only be conveyed back to the sender from the first 20 - // hops. Determine the number of hops to be used for attribution data. - let attributable_hop_count = usize::min(path.hops.len(), MAX_HOPS); - - for (route_hop_idx, shared_secret) in shared_secrets.enumerate().take(attributable_hop_count) { - attribution_data.crypt(shared_secret.as_ref()); - - // Calculate position relative to the last attributable hop. The last attributable hop is at position 0. We need - // to look at the chain of HMACs that does include all data up to the last attributable hop. Hold times beyond - // the last attributable hop will not be available. - let position = attributable_hop_count - route_hop_idx - 1; - let res = attribution_data.verify(&Vec::new(), shared_secret.as_ref(), position); - match res { - Ok(hold_time) => { - hold_times.push(hold_time); - - // Shift attribution data to prepare for processing the next hop. - attribution_data.shift_left(); - }, - Err(()) => { - // We will hit this if there is a node on the path that does not support fulfill attribution data. - log_debug!( - logger, - "Invalid fulfill HMAC in attribution data for node at pos {}", - route_hop_idx - ); - - break; - }, - } - } - - hold_times -} - const BADONION: u16 = 0x8000; const PERM: u16 = 0x4000; const NODE: u16 = 0x2000; @@ -2522,6 +2526,7 @@ where } /// Build a payment onion, returning the first hop msat and cltv values as well. +/// /// `cur_block_height` should be set to the best known block height + 1. pub fn create_payment_onion( secp_ctx: &Secp256k1, path: &Path, session_priv: &SecretKey, total_msat: u64, @@ -2711,22 +2716,28 @@ fn decode_next_hop, N: NextPacketBytes>( } } -pub const HOLD_TIME_LEN: usize = 4; -pub const MAX_HOPS: usize = 20; -pub const HMAC_LEN: usize = 4; +pub(crate) const HOLD_TIME_LEN: usize = 4; +pub(crate) const MAX_HOPS: usize = 20; +pub(crate) const HMAC_LEN: usize = 4; // Define the number of HMACs in the attributable data block. For the first node, there are 20 HMACs, and then for every // subsequent node, the number of HMACs decreases by 1. 20 + 19 + 18 + ... + 1 = 20 * 21 / 2 = 210. -pub const HMAC_COUNT: usize = MAX_HOPS * (MAX_HOPS + 1) / 2; +pub(crate) const HMAC_COUNT: usize = MAX_HOPS * (MAX_HOPS + 1) / 2; #[derive(Clone, Debug, Hash, PartialEq, Eq)] +/// Attribution data allows the sender of an HTLC to identify which hop failed an HTLC robustly, +/// preventing earlier hops from corrupting the HTLC failure information (or at least allowing the +/// sender to identify the earliest hop which corrupted HTLC failure information). +/// +/// Additionally, it allows a sender to identify how long each hop along a path held an HTLC, with +/// 100ms granularity. pub struct AttributionData { - pub hold_times: [u8; MAX_HOPS * HOLD_TIME_LEN], - pub hmacs: [u8; HMAC_LEN * HMAC_COUNT], + hold_times: [u8; MAX_HOPS * HOLD_TIME_LEN], + hmacs: [u8; HMAC_LEN * HMAC_COUNT], } impl AttributionData { - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { hold_times: [0; MAX_HOPS * HOLD_TIME_LEN], hmacs: [0; HMAC_LEN * HMAC_COUNT] } } } @@ -2775,7 +2786,7 @@ impl AttributionData { /// Writes the HMACs corresponding to the given position that have been added already by downstream hops. Position is /// relative to the final node. The final node is at position 0. - pub fn write_downstream_hmacs(&self, position: usize, w: &mut HmacEngine) { + pub(crate) fn write_downstream_hmacs(&self, position: usize, w: &mut HmacEngine) { // Set the index to the first downstream HMAC that we need to include. Note that we skip the first MAX_HOPS HMACs // because this is space reserved for the HMACs that we are producing for the current node. let mut hmac_idx = MAX_HOPS + MAX_HOPS - position - 1; From 926523d878305bd7700c8277a5b003bde2481621 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 9 Dec 2025 13:02:48 +0100 Subject: [PATCH 15/42] Bump `lightning-block-sync` version number to fix SemVer CI PR #4175 made the first breaking API change in `lightning-block-sync` since v0.2 without bumping the version number. Here we bump the version number, allowing our SemVer CI check to be happy again. --- lightning-block-sync/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning-block-sync/Cargo.toml b/lightning-block-sync/Cargo.toml index 51b19e3901e..97f199963ac 100644 --- a/lightning-block-sync/Cargo.toml +++ b/lightning-block-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-block-sync" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Jeffrey Czyz", "Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" From 9b2d57cafd2e46a59a9c276e0b41d3c33f1871f2 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 9 Dec 2025 13:31:49 +0100 Subject: [PATCH 16/42] Clean up handle_monitor_update_completion_actions after rustfmt --- lightning/src/ln/channelmanager.rs | 67 +++++++++++------------------- 1 file changed, 25 insertions(+), 42 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ea6409d0e1e..cd0adaaef28 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9465,58 +9465,41 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ WithContext::from(&self.logger, peer_id, chan_id, Some(payment_hash)); log_trace!(logger, "Handling PaymentClaimed monitor update completion action"); - if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim { + if let Some((cp_node_id, chan_id, claim_ptr)) = pending_mpp_claim { let per_peer_state = self.per_peer_state.read().unwrap(); - per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| { + per_peer_state.get(&cp_node_id).map(|peer_state_mutex| { let mut peer_state = peer_state_mutex.lock().unwrap(); let blockers_entry = peer_state.actions_blocking_raa_monitor_updates.entry(chan_id); if let btree_map::Entry::Occupied(mut blockers) = blockers_entry { blockers.get_mut().retain(|blocker| { - if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { - pending_claim, - } = &blocker - { - if *pending_claim == claim_ptr { - let mut pending_claim_state_lock = - pending_claim.0.lock().unwrap(); - let pending_claim_state = - &mut *pending_claim_state_lock; - pending_claim_state.channels_without_preimage.retain( - |(cp, cid)| { - let this_claim = *cp == counterparty_node_id - && *cid == chan_id; - if this_claim { - pending_claim_state - .channels_with_preimage - .push((*cp, *cid)); - false - } else { - true - } - }, - ); - if pending_claim_state - .channels_without_preimage - .is_empty() - { - for (cp, cid) in pending_claim_state - .channels_with_preimage - .iter() - { - let freed_chan = (*cp, *cid, blocker.clone()); - freed_channels.push(freed_chan); - } - } - !pending_claim_state - .channels_without_preimage - .is_empty() + let pending_claim = match &blocker { + RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { + pending_claim, + } => pending_claim, + _ => return true, + }; + if *pending_claim != claim_ptr { + return true; + } + let mut claim_state_lock = pending_claim.0.lock().unwrap(); + let claim_state = &mut *claim_state_lock; + claim_state.channels_without_preimage.retain(|(cp, cid)| { + let this_claim = *cp == cp_node_id && *cid == chan_id; + if this_claim { + claim_state.channels_with_preimage.push((*cp, *cid)); + false } else { true } - } else { - true + }); + if claim_state.channels_without_preimage.is_empty() { + for (cp, cid) in claim_state.channels_with_preimage.iter() { + let freed_chan = (*cp, *cid, blocker.clone()); + freed_channels.push(freed_chan); + } } + !claim_state.channels_without_preimage.is_empty() }); if blockers.get().is_empty() { blockers.remove(); From e5528eadbf4d98ad7c8c7b435dbd3e1c5c7962c3 Mon Sep 17 00:00:00 2001 From: elnosh Date: Wed, 12 Nov 2025 10:21:48 -0500 Subject: [PATCH 17/42] Add docs to `commitment_signed_dance_return_raa` --- lightning/src/ln/functional_test_utils.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 3460d300b3a..7fbf72a357c 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2670,6 +2670,10 @@ pub fn do_main_commitment_signed_dance( (extra_msg_option, bs_revoke_and_ack) } +/// Runs the commitment_signed dance by delivering the commitment_signed and handling the +/// responding `revoke_and_ack` and `commitment_signed`. +/// +/// Returns the recipient's `revoke_and_ack`. pub fn commitment_signed_dance_return_raa( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, commitment_signed: &Vec, fail_backwards: bool, From 8423ffadbd260909941dc0061208394bdc69a7fc Mon Sep 17 00:00:00 2001 From: elnosh Date: Fri, 14 Nov 2025 09:01:36 -0500 Subject: [PATCH 18/42] Remove `check_added_monitors` macro Replace calls to `check_added_monitors` macro to the identically-named function. --- .../tests/lsps2_integration_tests.rs | 22 +- lightning-persister/src/test_utils.rs | 11 +- lightning/src/chain/chainmonitor.rs | 21 +- lightning/src/chain/channelmonitor.rs | 7 +- lightning/src/ln/async_payments_tests.rs | 30 +- lightning/src/ln/async_signer_tests.rs | 30 +- lightning/src/ln/blinded_payment_tests.rs | 66 +-- lightning/src/ln/chanmon_update_fail_tests.rs | 408 +++++++++--------- lightning/src/ln/channelmanager.rs | 60 +-- lightning/src/ln/functional_test_utils.rs | 94 ++-- .../src/ln/max_payment_path_len_tests.rs | 4 +- lightning/src/ln/monitor_tests.rs | 66 +-- lightning/src/ln/offers_tests.rs | 6 +- lightning/src/ln/onion_route_tests.rs | 48 +-- lightning/src/ln/payment_tests.rs | 228 +++++----- lightning/src/ln/priv_short_conf_tests.rs | 40 +- lightning/src/ln/quiescence_tests.rs | 6 +- lightning/src/ln/reload_tests.rs | 34 +- lightning/src/ln/reorg_tests.rs | 30 +- lightning/src/ln/shutdown_tests.rs | 42 +- lightning/src/ln/update_fee_tests.rs | 2 +- lightning/src/ln/zero_fee_commitment_tests.rs | 6 +- lightning/src/util/persist.rs | 6 +- 23 files changed, 621 insertions(+), 646 deletions(-) diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index 82f93b5990c..e4ace27b715 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -7,19 +7,11 @@ use common::{ get_lsps_message, LSPSNodes, LSPSNodesWithPayer, LiquidityNode, }; -use lightning::check_added_monitors; use lightning::events::{ClosureReason, Event}; use lightning::get_event_msg; use lightning::ln::channelmanager::PaymentId; use lightning::ln::channelmanager::Retry; -use lightning::ln::functional_test_utils::create_funding_transaction; -use lightning::ln::functional_test_utils::do_commitment_signed_dance; -use lightning::ln::functional_test_utils::expect_channel_pending_event; -use lightning::ln::functional_test_utils::expect_channel_ready_event; -use lightning::ln::functional_test_utils::expect_payment_sent; -use lightning::ln::functional_test_utils::test_default_channel_config; -use lightning::ln::functional_test_utils::SendEvent; -use lightning::ln::functional_test_utils::{connect_blocks, create_chan_between_nodes_with_value}; +use lightning::ln::functional_test_utils::*; use lightning::ln::msgs::BaseMessageHandler; use lightning::ln::msgs::ChannelMessageHandler; use lightning::ln::msgs::MessageSendEvent; @@ -1226,7 +1218,7 @@ fn client_trusts_lsp_end_to_end_test() { ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); @@ -1566,7 +1558,7 @@ fn create_channel_with_manual_broadcast( let funding_created = get_event_msg!(service_node, MessageSendEvent::SendFundingCreated, *client_node_id); client_node.node.handle_funding_created(*service_node_id, &funding_created); - check_added_monitors!(client_node.inner, 1); + check_added_monitors(&client_node.inner, 1); let bs_signed_locked = client_node.node.get_and_clear_pending_msg_events(); assert_eq!(bs_signed_locked.len(), 2); @@ -1602,7 +1594,7 @@ fn create_channel_with_manual_broadcast( _ => panic!("Unexpected event"), } expect_channel_pending_event(&client_node, &service_node_id); - check_added_monitors!(service_node.inner, 1); + check_added_monitors(&service_node.inner, 1); as_channel_ready = get_event_msg!(service_node, MessageSendEvent::SendChannelReady, *client_node_id); @@ -1699,7 +1691,7 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); @@ -1890,7 +1882,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); @@ -2227,7 +2219,7 @@ fn client_trusts_lsp_partial_fee_does_not_trigger_broadcast() { ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index 1de51f44cb2..55208c61491 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -1,14 +1,11 @@ +use lightning::check_closed_broadcast; use lightning::events::ClosureReason; -use lightning::ln::functional_test_utils::{ - check_closed_event, connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, - create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs, send_payment, -}; +use lightning::ln::functional_test_utils::*; use lightning::util::persist::{ migrate_kv_store_data, read_channel_monitors, KVStoreSync, MigratableKVStore, KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN, }; use lightning::util::test_utils; -use lightning::{check_added_monitors, check_closed_broadcast}; use std::panic::RefUnwindSafe; @@ -190,7 +187,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); @@ -206,7 +203,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { check_closed_broadcast!(nodes[1], true); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Make sure everything is persisted as expected after close. check_persisted_data!(11); diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index f4a1edff038..9fd6383cf7e 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -1568,7 +1568,6 @@ where mod tests { use crate::chain::channelmonitor::ANTI_REORG_DELAY; use crate::chain::{ChannelMonitorUpdateStatus, Watch}; - use crate::check_added_monitors; use crate::events::{ClosureReason, Event}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; @@ -1601,9 +1600,9 @@ mod tests { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.claim_funds(payment_preimage_2); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone(); @@ -1666,14 +1665,14 @@ mod tests { nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_first_raa, as_first_update) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut bs_2nd_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_first_update); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0] @@ -1683,21 +1682,21 @@ mod tests { nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &bs_2nd_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); expect_payment_path_successful!(nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_second_raa, as_second_update) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_second_update); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); expect_payment_path_successful!(nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } #[test] diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 10e5049682e..515a3dc5f1d 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -6911,10 +6911,7 @@ mod tests { use crate::util::logger::Logger; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_utils::{TestBroadcaster, TestFeeEstimator, TestLogger}; - use crate::{ - check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, - get_route_and_payment_hash, - }; + use crate::{check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash}; #[allow(unused_imports)] use crate::prelude::*; @@ -6973,7 +6970,7 @@ mod tests { nodes[1].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Build a new ChannelMonitorUpdate which contains both the failing commitment tx update // and provides the claim preimages for the two pending HTLCs. The first update generates diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 8e7fbdf94fd..1f1bb70714d 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -981,7 +981,7 @@ fn ignore_duplicate_invoice() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&always_online_node_id, &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[always_online_node, async_recipient]]; let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); @@ -1060,7 +1060,7 @@ fn ignore_duplicate_invoice() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&always_online_node_id, &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) .without_clearing_recipient_events(); @@ -1129,7 +1129,7 @@ fn async_receive_flow_success() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Receiving a duplicate release_htlc message doesn't result in duplicate payment. nodes[0] @@ -1519,7 +1519,7 @@ fn amount_doesnt_match_invreq() { let mut ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); assert!(matches!( ev, MessageSendEvent::UpdateHTLCs { ref updates, .. } if updates.update_add_htlcs.len() == 1)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[2], &nodes[3]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); let claimable_ev = do_pass_along_path(args).unwrap(); @@ -1723,7 +1723,7 @@ fn invalid_async_receive_with_retry( &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[2].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], true); // Trigger a retry and make sure it fails after calling the closure that induces recipient @@ -1735,7 +1735,7 @@ fn invalid_async_receive_with_retry( let mut ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); assert!(matches!( ev, MessageSendEvent::UpdateHTLCs { ref updates, .. } if updates.update_add_htlcs.len() == 1)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .without_claimable_event() @@ -1749,7 +1749,7 @@ fn invalid_async_receive_with_retry( let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); let claimable_ev = do_pass_along_path(args).unwrap(); @@ -1915,7 +1915,7 @@ fn expired_static_invoice_payment_path() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) @@ -2360,7 +2360,7 @@ fn refresh_static_invoices_for_used_offers() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&server.node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[server, recipient]]; let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); @@ -2694,7 +2694,7 @@ fn invoice_server_is_not_channel_peer() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&forwarding_node.node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[forwarding_node, recipient]]; let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); @@ -2933,7 +2933,7 @@ fn async_payment_e2e() { let mut events = sender_lsp.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&invoice_server.node.get_our_node_id(), &mut events); - check_added_monitors!(sender_lsp, 1); + check_added_monitors(&sender_lsp, 1); let path: &[&Node] = &[invoice_server, recipient]; let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); @@ -3170,7 +3170,7 @@ fn intercepted_hold_htlc() { let mut events = lsp.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); - check_added_monitors!(lsp, 1); + check_added_monitors(&lsp, 1); let path: &[&Node] = &[recipient]; let args = PassAlongPathArgs::new(lsp, path, amt_msat, payment_hash, ev); @@ -3271,7 +3271,7 @@ fn async_payment_mpp() { let expected_path: &[&Node] = &[recipient]; lsp_a.node.process_pending_htlc_forwards(); - check_added_monitors!(lsp_a, 1); + check_added_monitors(&lsp_a, 1); let mut events = lsp_a.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); @@ -3280,7 +3280,7 @@ fn async_payment_mpp() { do_pass_along_path(args); lsp_b.node.process_pending_htlc_forwards(); - check_added_monitors!(lsp_b, 1); + check_added_monitors(&lsp_b, 1); let mut events = lsp_b.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); @@ -3417,7 +3417,7 @@ fn release_htlc_races_htlc_onion_decode() { let mut events = sender_lsp.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&invoice_server.node.get_our_node_id(), &mut events); - check_added_monitors!(sender_lsp, 1); + check_added_monitors(&sender_lsp, 1); let path: &[&Node] = &[invoice_server, recipient]; let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 0c7a467fde7..f38afc41fcc 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -301,7 +301,7 @@ fn do_test_async_commitment_signature_for_commitment_signed_revoke_and_ack( src.node .send_payment_with_route(route, our_payment_hash, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(src, 1); + check_added_monitors(&src, 1); // Pass the payment along the route. let payment_event = { @@ -528,7 +528,7 @@ fn do_test_async_raa_peer_disconnect( src.node .send_payment_with_route(route, our_payment_hash, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(src, 1); + check_added_monitors(&src, 1); // Pass the payment along the route. let payment_event = { @@ -593,7 +593,7 @@ fn do_test_async_raa_peer_disconnect( (latest_update, _) = channel_map.get(&chan_id).unwrap().clone(); } dst.chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); - check_added_monitors!(dst, 0); + check_added_monitors(&dst, 0); } // Expect the RAA @@ -677,7 +677,7 @@ fn do_test_async_commitment_signature_peer_disconnect( src.node .send_payment_with_route(route, our_payment_hash, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(src, 1); + check_added_monitors(&src, 1); // Pass the payment along the route. let payment_event = { @@ -743,7 +743,7 @@ fn do_test_async_commitment_signature_peer_disconnect( (latest_update, _) = channel_map.get(&chan_id).unwrap().clone(); } dst.chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); - check_added_monitors!(dst, 0); + check_added_monitors(&dst, 0); } // Expect the RAA @@ -813,14 +813,14 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { .node .send_payment_with_route(route, payment_hash_2, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); get_htlc_update_msgs(&nodes[0], &node_b_id); // Send back update_fulfill_htlc + commitment_signed for the first payment. nodes[1].node.claim_funds(payment_preimage_1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Handle the update_fulfill_htlc, but fail to persist the monitor update when handling the // commitment_signed. @@ -844,7 +844,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); } // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -893,7 +893,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { (latest_update, _) = channel_map.get(&chan_id).unwrap().clone(); } nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); } // Make sure that on signer_unblocked we have the same behavior (even though RAA is ready, @@ -946,18 +946,18 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { nodes[1].node.handle_revoke_and_ack(node_a_id, as_resp.1.as_ref().unwrap()); let (bs_revoke_and_ack, bs_second_commitment_signed) = get_revoke_commit_msgs(&nodes[1], &node_a_id); - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); // The rest of this is boilerplate for resolving the previous state. nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); let as_commitment_signed = get_htlc_update_msgs(&nodes[0], &node_b_id); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_commitment_signed); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1] .node @@ -965,15 +965,15 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_and_process_pending_htlcs(&nodes[1], false); diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index a902cfebd12..7941a81f61e 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -438,11 +438,11 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { } nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, true, true); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if intro_fails { let mut updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -476,7 +476,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { cause_error!(2, 3, update_add); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true); expect_and_process_pending_htlcs(&nodes[2], false); @@ -488,7 +488,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), core::slice::from_ref(&failed_destination) ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; @@ -535,10 +535,10 @@ fn failed_backwards_to_intro_node() { let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -548,7 +548,7 @@ fn failed_backwards_to_intro_node() { // Ensure the final node fails to handle the HTLC. payment_event.msgs[0].onion_routing_packet.hop_data[0] ^= 1; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); @@ -621,7 +621,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); macro_rules! cause_error { @@ -645,7 +645,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, _ => panic!("Unexpected event {:?}", events), } check_closed_broadcast(&$curr_node, 1, true); - check_added_monitors!($curr_node, 1); + check_added_monitors(&$curr_node, 1); $curr_node.node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), @@ -657,22 +657,22 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, if intro_fails { cause_error!(nodes[0], nodes[1], nodes[2], chan_id_1_2, chan_upd_1_2.short_channel_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1]], false); return } expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_1_2 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); let mut update_add = &mut updates_1_2.update_add_htlcs[0]; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true); cause_error!(nodes[1], nodes[2], nodes[3], chan_id_2_3, chan_upd_2_3.short_channel_id); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; @@ -751,7 +751,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1]], false); return } @@ -860,7 +860,7 @@ fn three_hop_blinded_path_fail() { nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2], &nodes[3]], false); } @@ -962,10 +962,10 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { SendEvent::from_event(ev) }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut payment_event_1_2 = { let mut events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -977,7 +977,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { match check { ReceiveCheckFail::RecipientFail => { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); check_payment_claimable( @@ -989,7 +989,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[2].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::OnionDecodeFail => { let session_priv = SecretKey::from_slice(&session_priv).unwrap(); @@ -1013,7 +1013,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { &payment_hash ).unwrap(); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); @@ -1023,7 +1023,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { let update_add = &mut payment_event_1_2.msgs[0]; update_add.amount_msat -= 1; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); @@ -1037,7 +1037,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event_1_2.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[2].node.handle_shutdown(nodes[1].node.get_our_node_id(), &node_1_shutdown); assert!(commitment_signed_dance_through_cp_raa(&nodes[2], &nodes[1], false, false).is_none()); @@ -1048,15 +1048,15 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { ReceiveCheckFail::ProcessPendingHTLCsCheck => { assert_eq!(payment_event_1_2.msgs[0].cltv_expiry, nodes[0].best_block_info().1 + 1 + excess_final_cltv_delta_opt.unwrap() as u32 + TEST_FINAL_CLTV); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], true); expect_htlc_failure_conditions(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::PaymentConstraints => { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); @@ -1152,7 +1152,7 @@ fn blinded_path_retries() { nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let updates = get_htlc_update_msgs(&nodes[3], &$intro_node.node.get_our_node_id()); assert_eq!(updates.update_fail_malformed_htlcs.len(), 1); @@ -1183,7 +1183,7 @@ fn blinded_path_retries() { fail_payment_back!(nodes[1]); // Pass the retry along. - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], amt_msat, payment_hash, Some(payment_secret), msg_events.pop().unwrap(), true, None); @@ -1263,7 +1263,7 @@ fn min_htlc() { SendEvent::from_event(ev) }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[1], false); expect_htlc_handling_failed_destinations!( @@ -1461,7 +1461,7 @@ fn fails_receive_tlvs_authentication() { do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[1], false); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); let mut update_fail = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -2098,7 +2098,7 @@ fn test_trampoline_forward_payload_encoded_as_receive() { }; nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let replacement_onion = { // create a substitute onion where the last Trampoline hop is a forward @@ -2263,7 +2263,7 @@ fn do_test_trampoline_single_hop_receive(success: bool) { }; nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], amt_msat, payment_hash, payment_secret); if success { @@ -2586,7 +2586,7 @@ fn do_test_trampoline_relay(blinded: bool, test_case: TrampolineTestCase) { ) .unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2765,7 +2765,7 @@ fn test_trampoline_forward_rejection() { nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index e79e8becc66..57f0ca87d45 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -123,7 +123,7 @@ fn test_monitor_and_persister_update_fail() { // Try to update ChannelMonitor nodes[1].node.claim_funds(preimage); expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); @@ -169,7 +169,7 @@ fn test_monitor_and_persister_update_fail() { } } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_sent(&nodes[0], preimage, None, false, false); } @@ -195,7 +195,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -213,7 +213,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); @@ -262,7 +262,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -281,7 +281,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { message: message.clone(), }; nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &node_b_id, message).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); // TODO: Once we hit the chain with the failure transaction we should check that we get a @@ -338,7 +338,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -347,7 +347,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1] // but nodes[0] won't respond since it is frozen. nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -387,7 +387,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } nodes[0].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -405,7 +405,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); macro_rules! disconnect_reconnect_peers { () => {{ @@ -454,10 +454,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert_eq!(reestablish_2.len(), 1); nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); assert!(as_resp.0.is_none()); @@ -501,7 +501,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); as_resp.1 = Some(as_resp_raa); bs_resp.2 = None; @@ -544,7 +544,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if disconnect_count & !disconnect_flags > 2 { let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); @@ -568,7 +568,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(as_commitment_update.update_fail_htlcs.is_empty()); assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty()); assert!(as_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }; } @@ -581,7 +581,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }; } @@ -645,7 +645,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { ); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1] .node @@ -653,15 +653,15 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); expect_and_process_pending_htlcs(&nodes[1], false); @@ -743,7 +743,7 @@ fn test_monitor_update_fail_cs() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -752,13 +752,13 @@ fn test_monitor_update_fail_cs() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(responses.len(), 2); @@ -766,7 +766,7 @@ fn test_monitor_update_fail_cs() { MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -784,7 +784,7 @@ fn test_monitor_update_fail_cs() { .node .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); }, _ => panic!("Unexpected event"), @@ -793,11 +793,11 @@ fn test_monitor_update_fail_cs() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &final_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); @@ -851,7 +851,7 @@ fn test_monitor_update_fail_no_rebroadcast() { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -864,13 +864,13 @@ fn test_monitor_update_fail_no_rebroadcast() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); expect_and_process_pending_htlcs(&nodes[1], false); let events = nodes[1].node.get_and_clear_pending_events(); @@ -906,7 +906,7 @@ fn test_monitor_update_raa_while_paused() { let id = PaymentId(our_payment_hash_1.0); nodes[0].node.send_payment_with_route(route, our_payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -916,13 +916,13 @@ fn test_monitor_update_raa_while_paused() { let id_2 = PaymentId(our_payment_hash_2.0); nodes[1].node.send_payment_with_route(route, our_payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event_1.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event_1.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); @@ -930,37 +930,37 @@ fn test_monitor_update_raa_while_paused() { nodes[0].node.handle_update_add_htlc(node_b_id, &send_event_2.msgs[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &send_event_2.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let as_update_raa = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_update_raa.0); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_cs = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_update_raa.1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_and_process_pending_htlcs(&nodes[0], false); expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000); @@ -994,7 +994,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }], ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); @@ -1007,7 +1007,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let commitment = updates.commitment_signed; let bs_revoke_and_ack = commitment_signed_dance_return_raa(&nodes[1], &nodes[2], &commitment, false); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); // While the second channel is AwaitingRAA, forward a second payment to get it into the // holding cell. @@ -1016,7 +1016,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -1024,7 +1024,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Now fail monitor updating. @@ -1033,7 +1033,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Forward a third payment which will also be added to the holding cell, despite the channel // being paused waiting a monitor update. @@ -1042,18 +1042,18 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); let id_3 = PaymentId(payment_hash_3.0); nodes[0].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, true); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell // and not forwarded. expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { @@ -1063,13 +1063,13 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let onion_4 = RecipientOnionFields::secret_only(payment_secret_4); let id_4 = PaymentId(payment_hash_4.0); nodes[2].node.send_payment_with_route(route, payment_hash_4, onion_4, id_4).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_c_id, &send_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &send_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); (Some(payment_preimage_4), Some(payment_hash_4)) } else { @@ -1081,12 +1081,12 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_2.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); expect_and_process_pending_htlcs_and_htlc_handling_failed( &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }], ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); if test_ignore_second_cs { @@ -1138,11 +1138,11 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let as_cs; if test_ignore_second_cs { nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_event_b.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &raa.unwrap()); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(bs_cs.update_add_htlcs.is_empty()); assert!(bs_cs.update_fail_htlcs.is_empty()); @@ -1151,14 +1151,14 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(bs_cs.update_fee.is_none()); nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); as_cs = get_htlc_update_msgs(&nodes[1], &node_c_id); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } else { nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_event_b.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events(); // As both messages are for nodes[1], they're in order. @@ -1167,7 +1167,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, node_b_id); nodes[1].node.handle_revoke_and_ack(node_c_id, &msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), } @@ -1185,7 +1185,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[1] .node .handle_commitment_signed_batch_test(node_c_id, &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), } @@ -1200,23 +1200,23 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[2].node.handle_update_add_htlc(node_b_id, &as_cs.update_add_htlcs[0]); nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &as_cs.commitment_signed); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_second_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_second_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_second_raa); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); expect_and_process_pending_htlcs(&nodes[2], false); @@ -1238,7 +1238,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { if test_ignore_second_cs { expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); send_event = SendEvent::from_node(&nodes[1]); assert_eq!(send_event.node_id, node_a_id); @@ -1292,7 +1292,7 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.peer_disconnected(node_b_id); nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -1303,7 +1303,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -1328,7 +1328,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(as_channel_upd.contents.channel_flags & 2, 0); nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.peer_disconnected(node_a_id); nodes[0].node.peer_disconnected(node_b_id); @@ -1346,7 +1346,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(as_channel_upd.contents.channel_flags & 2, 0); nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); // The "disabled" bit should be unset as we just reconnected let bs_channel_upd = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); @@ -1355,7 +1355,7 @@ fn test_monitor_update_fail_reestablish() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(updates.update_add_htlcs.is_empty()); @@ -1399,28 +1399,28 @@ fn raa_no_response_awaiting_raa_state() { let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); let id_1 = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion_1, id_1).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from @@ -1431,17 +1431,17 @@ fn raa_no_response_awaiting_raa_state() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); // nodes[1] should be AwaitingRAA here! - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); @@ -1452,39 +1452,39 @@ fn raa_no_response_awaiting_raa_state() { let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); let id_3 = PaymentId(payment_hash_3.0); nodes[0].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // Finally deliver the RAA to nodes[1] which results in a CS response to the last update nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); let bs_update = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_update.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000); @@ -1519,7 +1519,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[1].node.peer_disconnected(node_a_id); nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let init_msg = msgs::Init { @@ -1544,7 +1544,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[1].node.handle_channel_reestablish(node_a_id, &as_reconnect); let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Send a second payment from A to B, resulting in a commitment update that gets swallowed with @@ -1554,12 +1554,12 @@ fn claim_while_disconnected_monitor_update_fail() { let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC // until we've channel_monitor_update'd and updated for the new commitment transaction. @@ -1569,7 +1569,7 @@ fn claim_while_disconnected_monitor_update_fail() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let mut bs_msgs = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(bs_msgs.len(), 2); @@ -1583,11 +1583,11 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), } @@ -1596,7 +1596,7 @@ fn claim_while_disconnected_monitor_update_fail() { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -1605,20 +1605,20 @@ fn claim_while_disconnected_monitor_update_fail() { let bs_commitment = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_commitment.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); @@ -1661,7 +1661,7 @@ fn monitor_failed_no_reestablish_response() { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1670,7 +1670,7 @@ fn monitor_failed_no_reestablish_response() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1] // is still failing to update monitors. @@ -1698,17 +1698,17 @@ fn monitor_failed_no_reestablish_response() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); @@ -1745,7 +1745,7 @@ fn first_message_on_recv_ordering() { let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); let id_1 = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route, payment_hash_1, onion_1, id_1).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1753,13 +1753,13 @@ fn first_message_on_recv_ordering() { assert_eq!(payment_event.node_id, node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); @@ -1770,7 +1770,7 @@ fn first_message_on_recv_ordering() { let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -1783,20 +1783,20 @@ fn first_message_on_recv_ordering() { // to the next message also tests resetting the delivery order. nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an // RAA/CS response, which should be generated when we call channel_monitor_update (with the // appropriate HTLC acceptance). nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); expect_and_process_pending_htlcs(&nodes[1], false); @@ -1804,13 +1804,13 @@ fn first_message_on_recv_ordering() { let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); @@ -1850,7 +1850,7 @@ fn test_monitor_update_fail_claim() { nodes[1].node.claim_funds(payment_preimage_1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Note that at this point there is a pending commitment transaction update for A being held by // B. Even when we go to send the payment from C through B to A, B will not update this @@ -1862,7 +1862,7 @@ fn test_monitor_update_fail_claim() { let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[2].node.send_payment_with_route(route.clone(), payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be // paused, so forward shouldn't succeed until we call channel_monitor_updated(). @@ -1881,7 +1881,7 @@ fn test_monitor_update_fail_claim() { let id_3 = PaymentId(payment_hash_3.0); let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); nodes[2].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1896,7 +1896,7 @@ fn test_monitor_update_fail_claim() { let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let mut bs_fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_fulfill.update_fulfill_htlcs.remove(0)); @@ -1905,7 +1905,7 @@ fn test_monitor_update_fail_claim() { // Get the payment forwards, note that they were batched into one commitment update. nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_forward_update = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[0]); nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[1]); @@ -1994,7 +1994,7 @@ fn test_monitor_update_on_pending_forwards() { &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }], ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let cs_fail_update = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fail_htlc(node_c_id, &cs_fail_update.update_fail_htlcs[0]); @@ -2006,7 +2006,7 @@ fn test_monitor_update_on_pending_forwards() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[2].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2019,12 +2019,12 @@ fn test_monitor_update_on_pending_forwards() { &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }], ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); @@ -2077,7 +2077,7 @@ fn monitor_update_claim_fail_no_response() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2088,7 +2088,7 @@ fn monitor_update_claim_fail_no_response() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2096,11 +2096,11 @@ fn monitor_update_claim_fail_no_response() { let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); @@ -2144,7 +2144,7 @@ fn do_during_funding_monitor_fail( .node .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let funding_created_msg = @@ -2154,20 +2154,20 @@ fn do_during_funding_monitor_fail( funding_created_msg.funding_output_index, ); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id), ); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); expect_channel_pending_event(&nodes[0], &node_b_id); let events = nodes[0].node.get_and_clear_pending_events(); @@ -2222,7 +2222,7 @@ fn do_during_funding_monitor_fail( chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { if !restore_b_before_lock { @@ -2326,7 +2326,7 @@ fn test_path_paused_mpp() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // Pass the first HTLC of the payment along to nodes[3]. @@ -2382,7 +2382,7 @@ fn test_pending_update_fee_ack_on_reconnect() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_initial_send_msgs = get_htlc_update_msgs(&nodes[1], &node_a_id); // bs_initial_send_msgs are not delivered until they are re-generated after reconnect @@ -2391,7 +2391,7 @@ fn test_pending_update_fee_ack_on_reconnect() { *feerate_lock *= 2; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_update_fee_msgs = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(as_update_fee_msgs.update_fee.is_some()); @@ -2399,7 +2399,7 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[1] .node .handle_commitment_signed_batch_test(node_a_id, &as_update_fee_msgs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // bs_first_raa is not delivered until it is re-generated after reconnect @@ -2441,33 +2441,33 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &bs_initial_send_msgs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack( node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_cs = get_htlc_update_msgs(&nodes[1], &node_a_id).commitment_signed; nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_commitment_signed_batch_test( node_a_id, &get_htlc_update_msgs(&nodes[0], &node_b_id).commitment_signed, ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_third_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack( node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[0], false); expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 1_000_000); @@ -2504,13 +2504,13 @@ fn test_fail_htlc_on_broadcast_after_claim() { assert_eq!(bs_txn.len(), 1); nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 2000); let mut cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fulfill_htlc(node_c_id, cs_updates.update_fulfill_htlcs.remove(0)); let mut bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); mine_transaction(&nodes[1], &bs_txn[0]); @@ -2518,7 +2518,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_closed_event(&nodes[1], 1, reason, &[node_c_id], 100000); check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs_and_htlc_handling_failed( &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }], @@ -2550,7 +2550,7 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { *feerate_lock += 20; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_msgs = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(update_msgs.update_fee.is_some()); if deliver_update { @@ -2602,38 +2602,38 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[1] .node .handle_commitment_signed_batch_test(node_a_id, &update_msgs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_update = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_update_fee(node_a_id, as_second_update.update_fee.as_ref().unwrap()); nodes[1] .node .handle_commitment_signed_batch_test(node_a_id, &as_second_update.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); let bs_second_cs = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &bs_second_cs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } else { let commitment = &update_msgs.commitment_signed; do_commitment_signed_dance(&nodes[1], &nodes[0], commitment, false, false); @@ -2697,29 +2697,29 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); let id_1 = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion_1, id_1).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send = SendEvent::from_node(&nodes[0]); assert_eq!(send.msgs.len(), 1); let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.claim_funds(payment_preimage_0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &send.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (raa, cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); if disconnect { // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just @@ -2751,7 +2751,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); @@ -2792,14 +2792,14 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // New outbound messages should be generated immediately upon a call to // get_and_clear_pending_msg_events (but not before). - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert_eq!(events.len(), 1); // Deliver the pending in-flight CS nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let commitment_msg = match events.pop().unwrap() { MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, mut updates } => { @@ -2819,13 +2819,13 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { }; nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(commitment_signed_dance_through_cp_raa(&nodes[1], &nodes[0], false, false).is_none()); let events = nodes[1].node.get_and_clear_pending_events(); @@ -2885,19 +2885,19 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let onion_2 = RecipientOnionFields::secret_only(second_payment_secret); let id_2 = PaymentId(second_payment_hash.0); nodes[0].node.send_payment_with_route(route, second_payment_hash, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id)); } @@ -2914,13 +2914,13 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash }], ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); get_htlc_update_msgs(&nodes[2], &node_b_id); // Note that we don't populate fulfill_msg.attribution_data here, which will lead to hold times being // unavailable. } else { nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 100_000); let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -2937,7 +2937,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f } nodes[1].node.handle_update_fulfill_htlc(node_c_id, fulfill_msg); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut bs_updates = None; if htlc_status != HTLCStatusAtDupClaim::HoldingCell { @@ -2976,7 +2976,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f if htlc_status == HTLCStatusAtDupClaim::HoldingCell { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa.unwrap()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); // We finally receive the second payment, but don't claim it bs_updates = Some(get_htlc_update_msgs(&nodes[1], &node_a_id)); @@ -3029,13 +3029,13 @@ fn test_temporary_error_during_shutdown() { node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_shutdown( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id), ); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -3097,20 +3097,20 @@ fn double_temp_error() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // `claim_funds` results in a ChannelMonitorUpdate. nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (latest_update_1, _) = get_latest_mon_update_id(&nodes[1], channel_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, // which had some asserts that prevented it from being called twice. nodes[1].node.claim_funds(payment_preimage_2); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update_2, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_2); // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions @@ -3160,18 +3160,18 @@ fn double_temp_error() { }; assert_eq!(node_id, node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_1); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed_b1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.process_pending_htlc_forwards(); let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs(&nodes[0], &node_b_id); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_a1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed_a1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Complete the second HTLC. let ((update_fulfill_2, commitment_signed_b2), raa_b2) = { @@ -3200,11 +3200,11 @@ fn double_temp_error() { ) }; nodes[0].node.handle_revoke_and_ack(node_b_id, &raa_b2); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_2); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); do_commitment_signed_dance(&nodes[0], &nodes[1], &commitment_signed_b2, false, false); expect_payment_sent!(nodes[0], payment_preimage_2); @@ -3267,12 +3267,12 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { .node .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events(); @@ -3282,7 +3282,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -3377,13 +3377,13 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo .node .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding @@ -3392,7 +3392,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_channel_pending_event(&nodes[0], &node_b_id); let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4045,7 +4045,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { message: msg.clone(), }; nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, msg).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100_000); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4492,7 +4492,7 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { .node .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, a_reason, &[node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); @@ -4502,20 +4502,20 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let b_reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, b_reason, &[node_a_id], 1000000); // Now that B has a pending forwarded payment across it with the inbound edge on-chain, claim // the payment on C and give B the preimage for it. nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); // At this point nodes[1] has the preimage and is waiting for the `ChannelMonitorUpdate` for @@ -4530,13 +4530,13 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { // background events (via `get_and_clear_pending_msg_events`), the final `ChannelMonitorUpdate` // will fly and we'll drop the preimage from channel B's `ChannelMonitor`. We'll also release // the `Event::PaymentForwarded`. - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(!get_monitor!(nodes[1], chan_b.2) .get_all_current_outbound_htlcs() .iter() @@ -4569,7 +4569,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { .node .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, a_reason, &[node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); @@ -4579,7 +4579,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let b_reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, b_reason, &[node_a_id], 1000000); @@ -4588,7 +4588,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { // `Event::PaymentClaimed` from being generated. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); // Once we complete the `ChannelMonitorUpdate` the `Event::PaymentClaimed` will become diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c8f209236ef..0411d519a9d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18673,7 +18673,7 @@ mod tests { RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap(); nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash, RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); @@ -18683,19 +18683,19 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18709,7 +18709,7 @@ mod tests { // Send the second half of the original MPP payment. nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash, RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None); @@ -18720,34 +18720,34 @@ mod tests { // lightning messages manually. nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], our_payment_hash, 200_000); - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); let mut bs_1st_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_1st_updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_1st_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_first_raa, as_first_cs) = get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut bs_2nd_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_first_cs); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_2nd_updates.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_2nd_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); let as_second_updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Note that successful MPP payments will generate a single PaymentSent event upon the first // path's success and a PaymentPathSuccessful event for each path's success. @@ -18801,13 +18801,13 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0), route_params.clone(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward @@ -18815,7 +18815,7 @@ mod tests { let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18839,7 +18839,7 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -18850,19 +18850,19 @@ mod tests { let payment_secret = PaymentSecret([43; 32]); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18882,7 +18882,7 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_1, route.route_params.clone().unwrap(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -18899,19 +18899,19 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_2, route_params, Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18957,7 +18957,7 @@ mod tests { RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap(); nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash, RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); assert_eq!(updates.update_add_htlcs.len(), 1); @@ -19025,7 +19025,7 @@ mod tests { let message = "Channel force-closed".to_owned(); nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 100000); @@ -19089,7 +19089,7 @@ mod tests { .node .force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1_000_000); @@ -19291,13 +19291,13 @@ mod tests { let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); } open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 7fbf72a357c..563c60ecdcf 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1267,16 +1267,6 @@ pub fn check_added_monitors>(node: & } } -/// Check whether N channel monitor(s) have been added. -/// -/// Don't use this, use the identically-named function instead. -#[macro_export] -macro_rules! check_added_monitors { - ($node: expr, $count: expr) => { - $crate::ln::functional_test_utils::check_added_monitors(&$node, $count); - }; -} - fn claimed_htlc_matches_path<'a, 'b, 'c>( origin_node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], htlc: &ClaimedHTLC, ) -> bool { @@ -1355,7 +1345,7 @@ pub fn _reload_node<'a, 'b, 'c>( node.chain_monitor.load_existing_monitor(channel_id, monitor), Ok(ChannelMonitorUpdateStatus::Completed), ); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); } node_deserialized @@ -1511,7 +1501,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( .node .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .is_ok()); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); let funding_created_msg = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b_id); @@ -1554,7 +1544,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .is_err()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); tx } @@ -1636,7 +1626,7 @@ pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( get_event_msg!(initiator, MessageSendEvent::SendFundingCreated, receiver_node_id); receiver.node.handle_funding_created(initiator_node_id, &funding_created); - check_added_monitors!(receiver, 1); + check_added_monitors(&receiver, 1); let bs_signed_locked = receiver.node.get_and_clear_pending_msg_events(); assert_eq!(bs_signed_locked.len(), 2); let as_channel_ready; @@ -1646,7 +1636,7 @@ pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( initiator.node.handle_funding_signed(receiver_node_id, &msg); expect_channel_pending_event(&initiator, &receiver_node_id); expect_channel_pending_event(&receiver, &initiator_node_id); - check_added_monitors!(initiator, 1); + check_added_monitors(&initiator, 1); assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!( @@ -1840,11 +1830,11 @@ pub fn create_channel_manual_funding<'a, 'b, 'c: 'd, 'd>( funding_tx.clone(), ) .unwrap(); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); let funding_created = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b_id); node_b.node.handle_funding_created(node_a_id, &funding_created); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); let channel_id_b = expect_channel_pending_event(node_b, &node_a_id); if zero_conf { @@ -2010,7 +2000,7 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( let as_funding_created = get_event_msg!(nodes[a], MessageSendEvent::SendFundingCreated, node_b_id); nodes[b].node.handle_funding_created(node_a_id, &as_funding_created); - check_added_monitors!(nodes[b], 1); + check_added_monitors(&nodes[b], 1); let cs_funding_signed = get_event_msg!(nodes[b], MessageSendEvent::SendFundingSigned, node_a_id); @@ -2018,7 +2008,7 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( nodes[a].node.handle_funding_signed(node_b_id, &cs_funding_signed); expect_channel_pending_event(&nodes[a], &node_b_id); - check_added_monitors!(nodes[a], 1); + check_added_monitors(&nodes[a], 1); assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); @@ -2641,11 +2631,11 @@ pub fn do_main_commitment_signed_dance( let node_b_id = node_b.node.get_our_node_id(); let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs(node_a, &node_b_id); - check_added_monitors!(node_b, 0); + check_added_monitors(&node_b, 0); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); node_b.node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { let mut events = node_b.node.get_and_clear_pending_msg_events(); @@ -2662,7 +2652,7 @@ pub fn do_main_commitment_signed_dance( events.get(0).map(|e| e.clone()), ) }; - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); if fail_backwards { assert!(node_a.node.get_and_clear_pending_events().is_empty()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); @@ -2701,10 +2691,10 @@ pub fn do_commitment_signed_dance( ) { let node_b_id = node_b.node.get_our_node_id(); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); node_a.node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); // If this commitment signed dance was due to a claim, don't check for an RAA monitor update. let channel_id = commitment_signed[0].channel_id; @@ -2728,7 +2718,7 @@ pub fn do_commitment_signed_dance( channel_id, }], ); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); let node_a_per_peer_state = node_a.node.per_peer_state.read().unwrap(); let mut number_of_msg_events = 0; @@ -3426,7 +3416,7 @@ pub fn send_along_route_with_secret<'a, 'b, 'c>( Retry::Attempts(0), ) .unwrap(); - check_added_monitors!(origin_node, expected_paths.len()); + check_added_monitors(&origin_node, expected_paths.len()); pass_along_route(origin_node, expected_paths, recv_value, our_payment_hash, our_payment_secret); payment_id } @@ -3440,7 +3430,7 @@ pub fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) prev_node .node .handle_update_fail_htlc(node.node.get_our_node_id(), &updates.update_fail_htlcs[0]); - check_added_monitors!(prev_node, 0); + check_added_monitors(&prev_node, 0); let is_first_hop = origin_node_id == prev_node.node.get_our_node_id(); // We do not want to fail backwards on the first hop. All other hops should fail backwards. @@ -3548,7 +3538,7 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option assert_eq!(node.node.get_our_node_id(), payment_event.node_id); node.node.handle_update_add_htlc(prev_node.node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(node, 0); + check_added_monitors(&node, 0); if is_last_hop && is_probe { do_commitment_signed_dance(node, prev_node, &payment_event.commitment_msg, true, true); @@ -3650,14 +3640,14 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option assert!(events_2.len() == 1); expect_htlc_handling_failed_destinations!(events_2, &[failure]); node.node.process_pending_htlc_forwards(); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); } else { assert!(events_2.is_empty()); } } else if !is_last_hop { let mut events_2 = node.node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); } @@ -3692,7 +3682,7 @@ pub fn send_probe_along_route<'a, 'b, 'c>( let mut events = origin_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_route.len()); - check_added_monitors!(origin_node, expected_route.len()); + check_added_monitors(&origin_node, expected_route.len()); for (path, payment_hash) in expected_route.iter() { let ev = remove_first_msg_event_to_node(&path[0].node.get_our_node_id(), &mut events); @@ -3959,7 +3949,7 @@ pub fn pass_claimed_payment_along_route_from_ev( $prev_node.node.get_our_node_id(), next_msgs.as_ref().unwrap().0.clone(), ); - check_added_monitors!($node, 0); + check_added_monitors(&$node, 0); assert!($node.node.get_and_clear_pending_msg_events().is_empty()); let commitment = &next_msgs.as_ref().unwrap().1; do_commitment_signed_dance($node, $prev_node, commitment, false, false); @@ -4024,7 +4014,7 @@ pub fn pass_claimed_payment_along_route_from_ev( ); expected_total_fee_msat += actual_fee.unwrap(); fwd_amt_msat += actual_fee.unwrap(); - check_added_monitors!($node, 1); + check_added_monitors(&$node, 1); let new_next_msgs = if $new_msgs { let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4073,7 +4063,7 @@ pub fn pass_claimed_payment_along_route_from_ev( // Ensure that claim_funds is idempotent. expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage); assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(expected_paths[0].last().unwrap(), 0); + check_added_monitors(&expected_paths[0].last().unwrap(), 0); expected_total_fee_msat } @@ -4168,7 +4158,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>( our_payment_hash: PaymentHash, expected_fail_reason: PaymentFailureReason, ) { let mut expected_paths: Vec<_> = expected_paths_slice.iter().collect(); - check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len()); + check_added_monitors(&expected_paths[0].last().unwrap(), expected_paths.len()); let mut per_path_msgs: Vec<((msgs::UpdateFailHTLC, Vec), PublicKey)> = Vec::with_capacity(expected_paths.len()); @@ -4280,7 +4270,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>( prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0, ); - check_added_monitors!(origin_node, 0); + check_added_monitors(&origin_node, 0); assert!(origin_node.node.get_and_clear_pending_msg_events().is_empty()); let commitment = &next_msgs.as_ref().unwrap().1; do_commitment_signed_dance(origin_node, prev_node, commitment, false, false); @@ -4343,7 +4333,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>( pending_events ); assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(expected_paths[0].last().unwrap(), 0); + check_added_monitors(&expected_paths[0].last().unwrap(), 0); } pub fn fail_payment<'a, 'b, 'c>( @@ -5199,9 +5189,9 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { || pending_cell_htlc_fails.0 != 0 || expect_renegotiated_funding_locked_monitor_update.1 { - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); } else { - check_added_monitors!(node_b, 0); + check_added_monitors(&node_b, 0); } let mut resp_2 = Vec::new(); @@ -5213,9 +5203,9 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { || pending_cell_htlc_fails.1 != 0 || expect_renegotiated_funding_locked_monitor_update.0 { - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); } else { - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); } // We don't yet support both needing updates, as that would require a different commitment dance: @@ -5290,7 +5280,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); node_a.node.handle_revoke_and_ack(node_b_id, &chan_msgs.1.unwrap()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); } else { assert!(chan_msgs.1.is_none()); } @@ -5330,15 +5320,15 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_b_id, &commitment_update.commitment_signed, ); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); let as_revoke_and_ack = get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!( - node_b, - if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 } + check_added_monitors( + &node_b, + if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 }, ); } } else { @@ -5404,7 +5394,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); node_b.node.handle_revoke_and_ack(node_a_id, &chan_msgs.1.unwrap()); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); } else { assert!(chan_msgs.1.is_none()); } @@ -5444,15 +5434,15 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_a_id, &commitment_update.commitment_signed, ); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); let bs_revoke_and_ack = get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_a.node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!( - node_a, - if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 } + check_added_monitors( + &node_a, + if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 }, ); } } else { @@ -5535,7 +5525,7 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( tx.clone(), ) .is_ok()); - check_added_monitors!(funding_node, 0); + check_added_monitors(&funding_node, 0); let events = funding_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), params.len()); for (other_node, ..) in params { diff --git a/lightning/src/ln/max_payment_path_len_tests.rs b/lightning/src/ln/max_payment_path_len_tests.rs index f67ad442c29..fa7e8d8f132 100644 --- a/lightning/src/ln/max_payment_path_len_tests.rs +++ b/lightning/src/ln/max_payment_path_len_tests.rs @@ -92,7 +92,7 @@ fn large_payment_metadata() { .node .send_payment(payment_hash, max_sized_onion.clone(), id, route_params, Retry::Attempts(0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1]]; @@ -174,7 +174,7 @@ fn large_payment_metadata() { .node .send_payment(payment_hash_2, onion_allowing_2_hops, id, route_params, Retry::Attempts(0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 34064ebb484..04915affa20 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -68,7 +68,7 @@ fn chanmon_fail_from_stale_commitment() { let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let bs_txn = get_local_commitment_txn!(nodes[1], chan_id_2); @@ -78,19 +78,19 @@ fn chanmon_fail_from_stale_commitment() { expect_and_process_pending_htlcs(&nodes[1], false); get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Don't bother delivering the new HTLC add/commits, instead confirming the pre-HTLC commitment // transaction for nodes[1]. mine_transaction(&nodes[1], &bs_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[2].node.get_our_node_id()], 100000); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let fail_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]); @@ -140,7 +140,7 @@ fn revoked_output_htlc_resolution_timing() { // Confirm the revoked commitment transaction, closing the channel. mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); // Two justice transactions will be broadcast, one on the unpinnable, revoked to_self output, @@ -185,7 +185,7 @@ fn archive_fully_resolved_monitors() { let message = "Channel force-closed".to_owned(); nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1_000_000); @@ -565,18 +565,18 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 3_000_100); let mut b_htlc_msgs = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); // We claim the dust payment here as well, but it won't impact our claimable balances as its // dust and thus doesn't appear on chain at all. nodes[1].node.claim_funds(dust_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], dust_payment_hash, 3_000); nodes[1].node.claim_funds(timeout_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], timeout_payment_hash, 4_000_200); if prev_commitment_tx { @@ -585,14 +585,14 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_fulfill); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &b_htlc_msgs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_raa, as_cs) = get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); let _htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs); let _bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } // Once B has received the payment preimage, it includes the value of the HTLC in its @@ -681,11 +681,11 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c assert_eq!(remote_txn[0].output[b_broadcast_txn[1].input[0].previous_output.vout as usize].value.to_sat(), 4_000); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); @@ -885,7 +885,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); @@ -897,7 +897,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 20_000_000); nodes[0].node.send_payment_with_route(route_2, payment_hash_2, RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); @@ -907,7 +907,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 20_000_000); nodes[1].node.claim_funds(payment_preimage_2); get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_2, 20_000_000); let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64; @@ -918,7 +918,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let message = "Channel force-closed".to_owned(); let node_a_commitment_claimable = nodes[0].best_block_info().1 + BREAKDOWN_TIMEOUT as u32; nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1000000); @@ -980,7 +980,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b // Get nodes[1]'s HTLC claim tx for the second HTLC mine_transaction(&nodes[1], &commitment_tx); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_claim_txn.len(), 1); @@ -1210,7 +1210,7 @@ fn test_no_preimage_inbound_htlc_balances() { mine_transaction(&nodes[0], &as_txn[0]); nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); assert_eq!(as_pre_spend_claims, @@ -1218,7 +1218,7 @@ fn test_no_preimage_inbound_htlc_balances() { mine_transaction(&nodes[1], &as_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); let node_b_commitment_claimable = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -1427,12 +1427,12 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc nodes[1].node.claim_funds(claimed_payment_preimage); expect_payment_claimed!(nodes[1], claimed_payment_hash, 3_000_100); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let _b_htlc_msgs = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); connect_blocks(&nodes[0], htlc_cltv_timeout + 1 - 10); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 5); @@ -1461,7 +1461,7 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_events(&nodes[1], &[ExpectedCloseEvent { channel_capacity_sats: Some(1_000_000), channel_id: Some(chan_id), @@ -1723,7 +1723,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor // B will generate an HTLC-Success from its revoked commitment tx mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); if keyed_anchors || p2a_anchor { handle_bump_htlc_event(&nodes[1], 1); @@ -1767,7 +1767,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor // A will generate justice tx from B's revoked commitment/HTLC tx mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); let to_remote_conf_height = nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -2020,7 +2020,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho nodes[0].node.claim_funds(claimed_payment_preimage); expect_payment_claimed!(nodes[0], claimed_payment_hash, 3_000_100); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let _a_htlc_msgs = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose { @@ -2049,7 +2049,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho mine_transaction(&nodes[1], &as_revoked_txn[0]); check_closed_broadcast!(nodes[1], true); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut claim_txn = nodes[1].tx_broadcaster.txn_broadcast(); assert_eq!(claim_txn.len(), 2); @@ -2635,9 +2635,9 @@ fn do_test_yield_anchors_events(have_htlcs: bool, p2a_anchor: bool) { } mine_transactions(&nodes[0], &[&commitment_tx, &anchor_tx]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); mine_transactions(&nodes[1], &[&commitment_tx, &anchor_tx]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if !have_htlcs { // If we don't have any HTLCs, we're done, the rest of the test is about HTLC transactions @@ -2828,7 +2828,7 @@ fn do_test_anchors_aggregated_revoked_htlc_tx(p2a_anchor: bool) { } } check_closed_broadcast(&nodes[0], 2, true); - check_added_monitors!(&nodes[0], 2); + check_added_monitors(&nodes[0], 2); check_closed_event(&nodes[0], 2, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id(); 2], 1000000); // Alice should detect the confirmed revoked commitments, and attempt to claim all of the @@ -3167,13 +3167,13 @@ fn do_test_monitor_claims_with_random_signatures(keyed_anchors: bool, p2a_anchor mine_transaction(closing_node, anchor_tx.as_ref().unwrap()); } check_closed_broadcast!(closing_node, true); - check_added_monitors!(closing_node, 1); + check_added_monitors(&closing_node, 1); let message = "ChannelMonitor-initiated commitment transaction broadcast".to_string(); check_closed_event(&closing_node, 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }, &[other_node.node.get_our_node_id()], 1_000_000); mine_transaction(other_node, &commitment_tx); check_closed_broadcast!(other_node, true); - check_added_monitors!(other_node, 1); + check_added_monitors(&other_node, 1); check_closed_event(&other_node, 1, ClosureReason::CommitmentTxConfirmed, &[closing_node.node.get_our_node_id()], 1_000_000); // If we update the best block to the new height before providing the confirmed transactions, diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 4c53aefe58d..906d9e247ce 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -2414,7 +2414,7 @@ fn rejects_keysend_to_non_static_invoice_path() { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), keysend_payment_id, route_params, Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); @@ -2482,7 +2482,7 @@ fn no_double_pay_with_stale_channelmanager() { let expected_route: &[&[&Node]] = &[&[&nodes[1]], &[&nodes[1]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let ev = remove_first_msg_event_to_node(&bob_id, &mut events); let args = PassAlongPathArgs::new(&nodes[0], expected_route[0], amt_msat, payment_hash, ev) @@ -2507,7 +2507,7 @@ fn no_double_pay_with_stale_channelmanager() { reload_node!(nodes[0], &alice_chan_manager_serialized, &[&monitor_0, &monitor_1], persister, chain_monitor, alice_deserialized); // The stale manager results in closing the channels. check_closed_event(&nodes[0], 2, ClosureReason::OutdatedChannelManager, &[bob_id, bob_id], 10_000_000); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); // Alice receives a duplicate invoice, but the payment should be transitioned to Retryable by now. nodes[0].onion_messenger.handle_onion_message(bob_id, &invoice_om); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index f9b4ab28e88..03557469537 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -133,7 +133,7 @@ fn run_onion_failure_test_with_fail_intercept( .node .send_payment_with_route(route.clone(), *payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); // temper update_add (0 => 1) let mut update_add_0 = update_0.update_add_htlcs[0].clone(); @@ -170,7 +170,7 @@ fn run_onion_failure_test_with_fail_intercept( expect_htlc_forward!(&nodes[1]); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert_eq!(update_1.update_add_htlcs.len(), 1); // tamper update_add (1 => 2) let mut update_add_1 = update_1.update_add_htlcs[0].clone(); @@ -202,7 +202,7 @@ fn run_onion_failure_test_with_fail_intercept( }, _ => {}, } - check_added_monitors!(&nodes[2], 1); + check_added_monitors(&nodes[2], 1); let update_2_1 = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); assert!(update_2_1.update_fail_htlcs.len() == 1); @@ -405,7 +405,7 @@ fn test_fee_failures() { .node .send_payment_with_route(route.clone(), payment_hash_success, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -456,7 +456,7 @@ fn test_fee_failures() { .node .send_payment_with_route(route, payment_hash_success, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -1548,7 +1548,7 @@ fn test_overshoot_final_cltv() { .send_payment_with_route(route, payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add_0 = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add_0); @@ -1567,7 +1567,7 @@ fn test_overshoot_final_cltv() { } expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); let mut update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_1); @@ -2285,7 +2285,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -2300,7 +2300,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { &nodes[1], &[HTLCHandlingFailureType::Receive { payment_hash }], ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2435,7 +2435,7 @@ fn test_phantom_onion_hmac_failure() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2470,7 +2470,7 @@ fn test_phantom_onion_hmac_failure() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2508,7 +2508,7 @@ fn test_phantom_invalid_onion_payload() { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2571,7 +2571,7 @@ fn test_phantom_invalid_onion_payload() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2607,7 +2607,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2637,7 +2637,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2676,7 +2676,7 @@ fn test_phantom_failure_too_low_cltv() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2691,7 +2691,7 @@ fn test_phantom_failure_too_low_cltv() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2729,7 +2729,7 @@ fn test_phantom_failure_modified_cltv() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2784,7 +2784,7 @@ fn test_phantom_failure_expires_too_soon() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2834,7 +2834,7 @@ fn test_phantom_failure_too_low_recv_amt() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2851,7 +2851,7 @@ fn test_phantom_failure_too_low_recv_amt() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2904,7 +2904,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2954,7 +2954,7 @@ fn test_phantom_failure_reject_payment() { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2981,7 +2981,7 @@ fn test_phantom_failure_reject_payment() { nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 6c982738a52..f9894fa8819 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -144,7 +144,7 @@ fn mpp_retry() { let onion = RecipientOnionFields::secret_only(pay_secret); let retry = Retry::Attempts(1); nodes[0].node.send_payment(hash, onion, id, route_params.clone(), retry).unwrap(); - check_added_monitors!(nodes[0], 2); // one monitor per path + check_added_monitors(&nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -169,7 +169,7 @@ fn mpp_retry() { assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[2], &htlc_updates.commitment_signed, false, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -191,7 +191,7 @@ fn mpp_retry() { route.route_params = Some(route_params.clone()); nodes[0].router.expect_find_route(route_params, Ok(route)); expect_and_process_pending_htlcs(&nodes[0], false); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -262,7 +262,7 @@ fn mpp_retry_overpay() { let onion = RecipientOnionFields::secret_only(pay_secret); let retry = Retry::Attempts(1); nodes[0].node.send_payment(hash, onion, id, route_params.clone(), retry).unwrap(); - check_added_monitors!(nodes[0], 2); // one monitor per path + check_added_monitors(&nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -288,7 +288,7 @@ fn mpp_retry_overpay() { assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[2], &htlc_updates.commitment_signed, false, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -314,7 +314,7 @@ fn mpp_retry_overpay() { nodes[0].router.expect_find_route(route_params, Ok(route)); nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -362,7 +362,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { // Initiate the MPP payment. let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(route, hash, onion, PaymentId(hash.0)).unwrap(); - check_added_monitors!(nodes[0], 2); // one monitor per path + check_added_monitors(&nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -384,7 +384,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let htlc_fail_updates = get_htlc_update_msgs(&nodes[3], &node_b_id); assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); nodes[1].node.handle_update_fail_htlc(node_d_id, &htlc_fail_updates.update_fail_htlcs[0]); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let commitment = &htlc_fail_updates.commitment_signed; do_commitment_signed_dance(&nodes[1], &nodes[3], commitment, false, false); @@ -397,7 +397,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let htlc_fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let commitment = &htlc_fail_updates.commitment_signed; do_commitment_signed_dance(&nodes[0], &nodes[1], commitment, false, false); @@ -461,7 +461,7 @@ fn do_test_keysend_payments(public_node: bool) { nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); @@ -510,7 +510,7 @@ fn test_mpp_keysend() { let id = PaymentId([42; 32]); let hash = nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -553,7 +553,7 @@ fn test_fulfill_hold_times() { let id = PaymentId([42; 32]); let hash = nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -621,7 +621,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let onion = RecipientOnionFields::spontaneous_empty(); let retry = Retry::Attempts(0); nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_0, params, retry).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &node_b_id); let update_add_0 = update_0.update_add_htlcs[0].clone(); @@ -629,7 +629,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { do_commitment_signed_dance(&nodes[1], &nodes[0], &update_0.commitment_signed, false, true); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs(&nodes[1], &node_d_id); let update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(node_b_id, &update_add_1); @@ -670,7 +670,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let params = route.route_params.clone().unwrap(); let retry = Retry::Attempts(0); nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_1, params, retry).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_2 = get_htlc_update_msgs(&nodes[0], &node_c_id); let update_add_2 = update_2.update_add_htlcs[0].clone(); @@ -678,7 +678,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { do_commitment_signed_dance(&nodes[2], &nodes[0], &update_2.commitment_signed, false, true); expect_and_process_pending_htlcs(&nodes[2], false); - check_added_monitors!(&nodes[2], 1); + check_added_monitors(&nodes[2], 1); let update_3 = get_htlc_update_msgs(&nodes[2], &node_d_id); let update_add_3 = update_3.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(node_c_id, &update_add_3); @@ -710,7 +710,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { nodes[3].node.process_pending_htlc_forwards(); let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[3], &[fail_type]); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); // Fail back along nodes[2] let update_fail_0 = get_htlc_update_msgs(&nodes[3], &node_c_id); @@ -721,7 +721,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let fail_type = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_chan_id }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail_type]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let update_fail_1 = get_htlc_update_msgs(&nodes[2], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_c_id, &update_fail_1.update_fail_htlcs[0]); @@ -806,7 +806,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -862,7 +862,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { } else { assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.peer_disconnected(node_a_id); @@ -890,7 +890,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { nodes[1].node.handle_error(node_a_id, msg); check_closed_event(&nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) }, &[node_a_id], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); }, @@ -901,13 +901,13 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // Now claim the first payment, which should allow nodes[1] to claim the payment on-chain when // we close in a moment. nodes[2].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); let mut htlc_fulfill = get_htlc_update_msgs(&nodes[2], &node_b_id); let fulfill_msg = htlc_fulfill.update_fulfill_htlcs.remove(0); nodes[1].node.handle_update_fulfill_htlc(node_c_id, fulfill_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); do_commitment_signed_dance(&nodes[1], &nodes[2], &htlc_fulfill.commitment_signed, false, false); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false); @@ -990,7 +990,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let id = PaymentId(payment_hash.0); let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1071,7 +1071,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let init_msg = msgs::Init { features: nodes[1].node.init_features(), @@ -1102,7 +1102,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { ); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(msg) }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); }, _ => panic!("Unexpected event"), @@ -1115,7 +1115,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { nodes[2].node.fail_htlc_backwards(&hash); let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail_type]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let htlc_fulfill_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fail_htlc(node_c_id, &htlc_fulfill_updates.update_fail_htlcs[0]); @@ -1197,7 +1197,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // the payment is not (spuriously) listed as still pending. let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], amt, hash, payment_secret); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); @@ -1271,7 +1271,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( .force_close_broadcasting_latest_txn(&chan_id, &node_b_id, message.clone()) .unwrap(); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -1289,12 +1289,12 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( }; nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 10_000_000); mine_transaction(&nodes[1], &commitment_tx); check_closed_broadcast(&nodes[1], 1, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); let htlc_success_tx = { @@ -1450,7 +1450,7 @@ fn test_fulfill_restart_failure() { let mon_ser = get_monitor!(nodes[1], chan_id).encode(); nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 100_000); let mut htlc_fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); @@ -1467,7 +1467,7 @@ fn test_fulfill_restart_failure() { nodes[1].node.fail_htlc_backwards(&payment_hash); let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[fail_type]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); @@ -1517,7 +1517,7 @@ fn get_ldk_payment_preimage() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.unwrap(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Make sure to use `get_payment_preimage` let preimage = Some(nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap()); @@ -1560,7 +1560,7 @@ fn sent_probe_is_probe_of_sending_node() { } get_htlc_update_msgs(&nodes[0], &node_b_id); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } #[test] @@ -1607,20 +1607,20 @@ fn failed_probe_yields_event() { let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); // node[0] -- update_add_htlcs -> node[1] - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); let probe_event = SendEvent::from_commitment_update(node_b_id, channel_id, updates); nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &probe_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); // node[0] <- update_fail_htlcs -- node[1] - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); let _events = nodes[1].node.get_and_clear_pending_events(); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -1658,15 +1658,15 @@ fn onchain_failed_probe_yields_event() { let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); // node[0] -- update_add_htlcs -> node[1] - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); let probe_event = SendEvent::from_commitment_update(node_b_id, chan_id, updates); nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &probe_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let _ = get_htlc_update_msgs(&nodes[1], &node_c_id); // Don't bother forwarding the HTLC onwards and just confirm the force-close transaction on @@ -1674,7 +1674,7 @@ fn onchain_failed_probe_yields_event() { let bs_txn = get_local_commitment_txn!(nodes[1], chan_id); confirm_transaction(&nodes[0], &bs_txn[0]); check_closed_broadcast!(&nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_added_monitors(&nodes[0], 0); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -1925,7 +1925,7 @@ fn claimed_send_payment_idempotent() { let onion = RecipientOnionFields::secret_only(second_payment_secret); nodes[0].node.send_payment_with_route(route, hash_b, onion, payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, hash_b, second_payment_secret); claim_payment(&nodes[0], &[&nodes[1]], preimage_b); } @@ -1994,7 +1994,7 @@ fn abandoned_send_payment_idempotent() { // failed payment back. let onion = RecipientOnionFields::secret_only(second_payment_secret); nodes[0].node.send_payment_with_route(route, hash_b, onion, payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, hash_b, second_payment_secret); claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage); } @@ -2163,12 +2163,12 @@ fn test_holding_cell_inflight_htlcs() { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); } let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); @@ -2309,7 +2309,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[fail]); nodes[1].node.process_pending_htlc_forwards(); let update_fail = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_fail.update_fail_htlcs.len() == 1); let fail_msg = update_fail.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); @@ -2394,7 +2394,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { let fail_type = HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[fail_type]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_fail = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(htlc_fail.update_add_htlcs.is_empty()); @@ -2490,7 +2490,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(0)).unwrap(); - check_added_monitors!(nodes[0], num_mpp_parts); // one monitor per path + check_added_monitors(&nodes[0], num_mpp_parts); // one monitor per path let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), num_mpp_parts); @@ -2647,7 +2647,7 @@ fn do_automatic_retries(test: AutoRetry) { macro_rules! pass_failed_attempt_with_retry_along_path { ($failing_channel_id: expr, $expect_pending_htlcs_forwardable: expr) => { // Send a payment attempt that fails due to lack of liquidity on the second hop - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &node_b_id); let mut update_add = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(node_a_id, &update_add); @@ -2664,7 +2664,7 @@ fn do_automatic_retries(test: AutoRetry) { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); @@ -2710,7 +2710,7 @@ fn do_automatic_retries(test: AutoRetry) { // We retry payments in `process_pending_htlc_forwards` nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -2738,7 +2738,7 @@ fn do_automatic_retries(test: AutoRetry) { // We retry payments in `process_pending_htlc_forwards` nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -3008,7 +3008,7 @@ fn auto_retry_partial_failure() { } // Pass the first part of the payment along the path. - check_added_monitors!(nodes[0], 1); // only one HTLC actually made it out + check_added_monitors(&nodes[0], 1); // only one HTLC actually made it out let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); // Only one HTLC/channel update actually made it out @@ -3017,35 +3017,35 @@ fn auto_retry_partial_failure() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_2nd_htlcs = SendEvent::from_node(&nodes[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &as_2nd_htlcs.msgs[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &as_2nd_htlcs.msgs[1]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_2nd_htlcs.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_htlc_forwards(); @@ -3058,19 +3058,19 @@ fn auto_retry_partial_failure() { nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_claim.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_claim.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_third_raa, as_third_cs) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_third_raa); - check_added_monitors!(nodes[1], 4); + check_added_monitors(&nodes[1], 4); let mut bs_2nd_claim = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_third_cs); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_third_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); let bs_second_fulfill_a = bs_2nd_claim.update_fulfill_htlcs.remove(0); @@ -3078,18 +3078,18 @@ fn auto_retry_partial_failure() { nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_second_fulfill_a); nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_second_fulfill_b); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_2nd_claim.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_fourth_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_fourth_cs); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); if let Event::PaymentPathSuccessful { .. } = events[0] { @@ -3167,7 +3167,7 @@ fn auto_retry_zero_attempts_send_error() { } else { panic!(); } - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); } #[test] @@ -3203,12 +3203,12 @@ fn fails_paying_after_rejected_by_payee() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(&nodes[1], payment_hash, payment_secret, amt_msat); @@ -3336,7 +3336,7 @@ fn retry_multi_path_single_failed_payment() { } let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(htlc_msgs.len(), 2); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); } #[test] @@ -3417,7 +3417,7 @@ fn immediate_retry_on_failure() { } let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(htlc_msgs.len(), 2); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); } #[test] @@ -3541,40 +3541,40 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); let first_htlc = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert_eq!(first_htlc.msgs.len(), 1); nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let second_htlc = SendEvent::from_node(&nodes[0]); assert_eq!(second_htlc.msgs.len(), 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &second_htlc.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); let next_hop_failure = @@ -3631,7 +3631,7 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[0].node.process_pending_htlc_forwards(); let retry_htlc_updates = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); let commitment = &retry_htlc_updates.commitment_msg; @@ -3785,26 +3785,26 @@ fn test_simple_partial_retry() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); let first_htlc = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert_eq!(first_htlc.msgs.len(), 1); nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let second_htlc_updates = SendEvent::from_node(&nodes[0]); assert_eq!(second_htlc_updates.msgs.len(), 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc_updates.msgs[0]); let commitment = &second_htlc_updates.commitment_msg; @@ -3860,14 +3860,14 @@ fn test_simple_partial_retry() { nodes[0].node.process_pending_htlc_forwards(); let retry_htlc_updates = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); let commitment = &retry_htlc_updates.commitment_msg; do_commitment_signed_dance(&nodes[1], &nodes[0], commitment, false, true); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_forward = get_htlc_update_msgs(&nodes[1], &node_c_id); nodes[2].node.handle_update_add_htlc(node_b_id, &bs_second_forward.update_add_htlcs[0]); @@ -3987,7 +3987,7 @@ fn test_threaded_payment_retries() { let id = PaymentId(payment_hash.0); let retry = Retry::Attempts(0xdeadbeef); nodes[0].node.send_payment(payment_hash, onion, id, route_params.clone(), retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_msg_events.len(), 2); send_msg_events.retain(|msg| { @@ -4086,7 +4086,7 @@ fn test_threaded_payment_retries() { nodes[0].node.process_pending_htlc_forwards(); send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); if cur_time > end_time { break; @@ -4124,14 +4124,14 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: } nodes[1].node.claim_funds(our_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); if at_midpoint { let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } else { let mut fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, fulfill.update_fulfill_htlcs.remove(0)); @@ -4466,7 +4466,7 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - check_added_monitors!(&nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); @@ -4536,7 +4536,7 @@ fn test_retry_custom_tlvs() { nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); nodes[0].node.send_payment(hash, onion, id, route_params.clone(), Retry::Attempts(1)).unwrap(); - check_added_monitors!(nodes[0], 1); // one monitor per path + check_added_monitors(&nodes[0], 1); // one monitor per path // Add the HTLC along the first hop. let htlc_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); @@ -4550,7 +4550,7 @@ fn test_retry_custom_tlvs() { let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2_id }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; @@ -4571,7 +4571,7 @@ fn test_retry_custom_tlvs() { route.route_params = Some(route_params.clone()); nodes[0].router.expect_find_route(route_params, Ok(route)); nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; @@ -4673,7 +4673,7 @@ fn do_test_custom_tlvs_consistency( .node .test_send_payment_along_path(path_a, &hash, onion, amt_msat, cur_height, id, &None, priv_a) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4695,7 +4695,7 @@ fn do_test_custom_tlvs_consistency( .node .test_send_payment_along_path(path_b, &hash, onion, amt_msat, cur_height, id, &None, priv_b) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -4707,14 +4707,14 @@ fn do_test_custom_tlvs_consistency( do_commitment_signed_dance(&nodes[2], &nodes[0], commitment, false, false); expect_and_process_pending_htlcs(&nodes[2], false); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[3].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[3], 0); + check_added_monitors(&nodes[3], 0); do_commitment_signed_dance(&nodes[3], &nodes[2], &payment_event.commitment_msg, true, true); } expect_htlc_failure_conditions(nodes[3].node.get_and_clear_pending_events(), &[]); @@ -4743,7 +4743,7 @@ fn do_test_custom_tlvs_consistency( &nodes[3], &expected_destinations, ); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let fail_updates_1 = get_htlc_update_msgs(&nodes[3], &node_c_id); nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); @@ -4753,7 +4753,7 @@ fn do_test_custom_tlvs_consistency( let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs(&nodes[2], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_c_id, &fail_updates_2.update_fail_htlcs[0]); @@ -4815,7 +4815,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { }; let retry = Retry::Attempts(1); nodes[0].node.send_payment(payment_hash, onion, payment_id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_events.len(), 2); @@ -5009,7 +5009,7 @@ fn test_htlc_forward_considers_anchor_outputs_value() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5169,7 +5169,7 @@ fn test_non_strict_forwarding() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let mut send_event = SendEvent::from_event(msg_events.remove(0)); @@ -5177,7 +5177,7 @@ fn test_non_strict_forwarding() { do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); send_event = SendEvent::from_event(msg_events.remove(0)); @@ -5209,7 +5209,7 @@ fn test_non_strict_forwarding() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let mut send_event = SendEvent::from_event(msg_events.remove(0)); @@ -5217,7 +5217,7 @@ fn test_non_strict_forwarding() { do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let routed_scid = route.paths[0].hops[1].short_channel_id; let routed_chan_id = match routed_scid { scid if scid == chan_update_1.contents.short_channel_id => channel_id_1, @@ -5346,7 +5346,7 @@ fn pay_route_without_params() { let id = PaymentId(hash.0); nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index ab7cad9be44..83aaca24203 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -83,7 +83,7 @@ fn test_priv_forwarding_rejection() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); @@ -166,7 +166,7 @@ fn test_priv_forwarding_rejection() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -350,7 +350,7 @@ fn test_routed_scid_alias() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); @@ -514,7 +514,7 @@ fn test_inbound_scid_privacy() { node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, node_c_id), ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let cs_funding_signed = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, node_b_id); @@ -522,7 +522,7 @@ fn test_inbound_scid_privacy() { nodes[1].node.handle_funding_signed(node_c_id, &cs_funding_signed); expect_channel_pending_event(&nodes[1], &node_c_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let conf_height = core::cmp::max(nodes[1].best_block_info().1 + 1, nodes[2].best_block_info().1 + 1); @@ -580,7 +580,7 @@ fn test_inbound_scid_privacy() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); @@ -601,7 +601,7 @@ fn test_inbound_scid_privacy() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route_2, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); assert_eq!(node_b_id, payment_event.node_id); @@ -698,7 +698,7 @@ fn test_scid_alias_returned() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &as_updates.commitment_signed, false, true); @@ -710,7 +710,7 @@ fn test_scid_alias_returned() { channel_id: chan.0.channel_id, }]; expect_htlc_failure_conditions(events, &expected_failures); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); @@ -735,7 +735,7 @@ fn test_scid_alias_returned() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &as_updates.commitment_signed, false, true); @@ -845,7 +845,7 @@ fn test_0conf_channel_with_async_monitor() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_funding_created(node_a_id, &funding_created); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let channel_id = ChannelId::v1_from_funding_outpoint(funding_output); @@ -860,7 +860,7 @@ fn test_0conf_channel_with_async_monitor() { MessageSendEvent::SendFundingSigned { node_id, msg } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -938,26 +938,26 @@ fn test_0conf_channel_with_async_monitor() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_send = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &as_send.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_send.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_raa, bs_commitment_signed) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_revoke_and_ack( node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -971,10 +971,10 @@ fn test_0conf_channel_with_async_monitor() { .chain_monitor .channel_monitor_updated(bs_raa.channel_id, latest_update) .unwrap(); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_send = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &bs_send.msgs[0]); @@ -1011,7 +1011,7 @@ fn test_0conf_close_no_early_chan_update() { send_payment(&nodes[0], &[&nodes[1]], 100_000); nodes[0].node.force_close_all_channels_broadcasting_latest_txn(message.clone()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); let _ = get_err_msg(&nodes[0], &node_b_id); diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index 6daf4d65b9d..a2b14a798c4 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -101,7 +101,7 @@ fn allow_shutdown_while_awaiting_quiescence(local_shutdown: bool) { let onion = RecipientOnionFields::secret_only(payment_secret); let payment_id = PaymentId(payment_hash.0); local_node.node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap(); - check_added_monitors!(local_node, 1); + check_added_monitors(&local_node, 1); // Attempt to send an HTLC, but don't fully commit it yet. let update_add = get_htlc_update_msgs(&local_node, &remote_node_id); @@ -373,7 +373,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { let onion1 = RecipientOnionFields::secret_only(payment_secret1); let payment_id1 = PaymentId(payment_hash1.0); nodes[1].node.send_payment_with_route(route1, payment_hash1, onion1, payment_id1).unwrap(); - check_added_monitors!(&nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Send a payment in the opposite direction. Since nodes[0] hasn't sent its own `stfu` yet, it's @@ -383,7 +383,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { let onion2 = RecipientOnionFields::secret_only(payment_secret2); let payment_id2 = PaymentId(payment_hash2.0); nodes[0].node.send_payment_with_route(route2, payment_hash2, onion2, payment_id2).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_add = get_htlc_update_msgs(&nodes[0], &node_id_1); nodes[1].node.handle_update_add_htlc(node_id_0, &update_add.update_add_htlcs[0]); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 2e9471a787d..95b993a4a90 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -259,7 +259,7 @@ fn test_manager_serialize_deserialize_events() { let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&node_a, &node_b.node.get_our_node_id(), channel_value, 42); node_a.node.funding_transaction_generated(temporary_channel_id, node_b.node.get_our_node_id(), tx.clone()).unwrap(); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); let funding_created = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()); let channel_id = ChannelId::v1_from_funding_txid( @@ -462,7 +462,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { for monitor in node_0_monitors.drain(..) { assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.channel_id(), monitor), Ok(ChannelMonitorUpdateStatus::Completed)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } nodes[0].node = &nodes_0_deserialized; @@ -474,7 +474,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { check_spends!(txn[0], funding_tx); assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.compute_txid()); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // nodes[1] and nodes[2] have no lost state with nodes[0]... reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); @@ -647,7 +647,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, .node .force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1000000); @@ -697,7 +697,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, assert_eq!(err_msgs_0.len(), 1); nodes[1].node.handle_error(nodes[0].node.get_our_node_id(), &err_msgs_0[0]); assert!(nodes[1].node.list_usable_channels().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) } , &[nodes[0].node.get_our_node_id()], 1000000); check_closed_broadcast!(nodes[1], false); @@ -754,7 +754,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -785,7 +785,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest expect_payment_claimable!(nodes[3], payment_hash, payment_secret, 15_000_000); nodes[3].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[3], 2); + check_added_monitors(&nodes[3], 2); expect_payment_claimed!(nodes[3], payment_hash, 15_000_000); // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we @@ -881,7 +881,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC // claim should fly. let mut ds_msgs = nodes[3].node.get_and_clear_pending_msg_events(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); assert_eq!(ds_msgs.len(), 2); if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); } @@ -889,7 +889,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest MessageSendEvent::UpdateHTLCs { mut updates, .. } => { let mut fulfill = updates.update_fulfill_htlcs.remove(0); nodes[2].node.handle_update_fulfill_htlc(nodes[3].node.get_our_node_id(), fulfill); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); do_commitment_signed_dance(&nodes[2], &nodes[3], &updates.commitment_signed, false, true); @@ -951,7 +951,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV; nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); @@ -985,7 +985,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let payment_event = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); if claim_htlc { get_monitor!(nodes[2], chan_id_2).provide_payment_preimage_unsafe_legacy( @@ -1005,7 +1005,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 }); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[2], 1, reason, &[nodes[1].node.get_our_node_id()], 100000); check_closed_broadcast!(nodes[2], true); @@ -1031,7 +1031,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht nodes[1].node.timer_tick_occurred(); let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_commitment_tx.len(), 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); @@ -1064,7 +1064,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht } else { expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true); } - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut update = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); if claim_htlc { @@ -1124,7 +1124,7 @@ fn removed_payment_no_manager_persistence() { &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash }] ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { @@ -1159,7 +1159,7 @@ fn removed_payment_no_manager_persistence() { &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { @@ -1266,7 +1266,7 @@ fn test_htlc_localremoved_persistence() { RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap(); nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash, RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 043862fea90..b56caf96008 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -65,7 +65,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // Provide preimage to node 2 by claiming payment nodes[2].node.claim_funds(our_payment_preimage); expect_payment_claimed!(nodes[2], our_payment_hash, 1_000_000); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let claim_txn = if local_commitment { @@ -79,7 +79,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // Give node 2 node 1's transactions and get its response (claiming the HTLC instead). connect_block(&nodes[2], &create_dummy_block(nodes[2].best_block_hash(), 42, node_1_commitment_txn.clone())); check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); check_closed_event(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 100000); let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_2_commitment_txn.len(), 1); // ChannelMonitor: 1 offered HTLC-Claim @@ -113,11 +113,11 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { vec![node_2_commitment_txn.pop().unwrap()] }; check_closed_broadcast!(nodes[1], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[2].node.get_our_node_id()], 100000); // Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1. connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0); if claim { @@ -139,7 +139,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { ); } - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Which should result in an immediate claim/fail of the HTLC: let mut htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); if claim { @@ -199,7 +199,7 @@ fn test_counterparty_revoked_reorg() { nodes[0].node.claim_funds(payment_preimage_3); let _ = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_claimed!(nodes[0], payment_hash_3, 4_000_000); let mut unrevoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2); @@ -211,7 +211,7 @@ fn test_counterparty_revoked_reorg() { // on any of the HTLCs, at least until we get six confirmations (which we won't get). mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); // Connect up to one block before the revoked transaction would be considered final, then do a @@ -313,7 +313,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 0); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } if reload_node { @@ -380,7 +380,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ // we were already running. nodes[0].node.test_process_background_events(); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(txn.len(), 1); @@ -389,7 +389,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ let expected_err = "Funding transaction was un-confirmed, originally locked at 6 confs."; if reorg_after_reload || !reload_node { handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed, originally locked at 6 confs."); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) }; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 100000); } @@ -477,14 +477,14 @@ fn test_set_outpoints_partial_claiming() { expect_payment_claimed!(nodes[0], payment_hash_1, 3_000_000); nodes[0].node.claim_funds(payment_preimage_2); expect_payment_claimed!(nodes[0], payment_hash_2, 3_000_000); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); nodes[0].node.get_and_clear_pending_msg_events(); // Connect blocks on node A commitment transaction mine_transaction(&nodes[0], &remote_txn[0]); check_closed_broadcast!(nodes[0], true); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Verify node A broadcast tx claiming both HTLCs { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -508,7 +508,7 @@ fn test_set_outpoints_partial_claiming() { channel_funding_txo: None, user_channel_id: None, }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Verify node B broadcast 2 HTLC-timeout txn let partial_claim_tx = { let mut node_txn = nodes[1].tx_broadcaster.unique_txn_broadcast(); @@ -583,11 +583,11 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) { check_closed_broadcast!(nodes[0], true); assert!(nodes[0].node.list_channels().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); check_closed_broadcast!(nodes[1], true); assert!(nodes[1].node.list_channels().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 5e7c7d9fd35..192bc6399e4 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -175,7 +175,7 @@ fn expect_channel_shutdown_state_with_htlc() { // Claim Funds on Node2 nodes[2].node.claim_funds(payment_preimage_0); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); // Fulfil HTLCs on node1 and node0 @@ -187,7 +187,7 @@ fn expect_channel_shutdown_state_with_htlc() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -361,7 +361,7 @@ fn expect_channel_shutdown_state_with_force_closure() { .force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, message.clone()) .unwrap(); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown); assert!(nodes[1].node.list_channels().is_empty()); @@ -371,7 +371,7 @@ fn expect_channel_shutdown_state_with_force_closure() { check_spends!(node_txn[0], chan_1.3); mine_transaction(&nodes[0], &node_txn[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); @@ -452,7 +452,7 @@ fn updates_shutdown_wait() { unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); nodes[2].node.claim_funds(payment_preimage_0); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -463,7 +463,7 @@ fn updates_shutdown_wait() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -549,7 +549,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { .node .send_payment(our_payment_hash, onion, id, route_params, Retry::Attempts(0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); assert_eq!(updates.update_add_htlcs.len(), 1); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -564,7 +564,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_shutdown(node_a_id, &node_0_shutdown); assert!(commitment_signed_dance_through_cp_raa(&nodes[1], &nodes[0], false, false).is_none()); expect_and_process_pending_htlcs(&nodes[1], false); @@ -718,7 +718,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 100_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -729,7 +729,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -834,7 +834,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // checks it, but in this case nodes[1] didn't ever get a chance to receive a // closing_signed so we do it ourselves check_closed_broadcast!(nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); } @@ -920,7 +920,7 @@ fn test_upfront_shutdown_script() { nodes[0].node.close_channel(&chan.2, &node_b_id).unwrap(); let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, node_b_id); nodes[1].node.handle_shutdown(node_a_id, &node_1_shutdown); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { @@ -935,7 +935,7 @@ fn test_upfront_shutdown_script() { *nodes[0].override_init_features.borrow_mut() = None; let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); nodes[0].node.handle_shutdown(node_b_id, &node_0_shutdown); let events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -951,7 +951,7 @@ fn test_upfront_shutdown_script() { //// channel smoothly let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); nodes[0].node.handle_shutdown(node_b_id, &node_0_shutdown); let events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1088,7 +1088,7 @@ fn test_segwit_v0_shutdown_script() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a segwit v0 script supported even without option_shutdown_anysegwit let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1127,7 +1127,7 @@ fn test_anysegwit_shutdown_script() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a non-v0 segwit script supported by option_shutdown_anysegwit let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1188,7 +1188,7 @@ fn test_unsupported_anysegwit_shutdown_script() { Ok(_) => panic!("Expected error"), } nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a non-v0 segwit script unsupported without option_shutdown_anysegwit let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1217,7 +1217,7 @@ fn test_invalid_shutdown_script() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a segwit v0 script with an unsupported witness program let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1253,7 +1253,7 @@ fn test_user_shutdown_script() { .node .close_channel_with_feerate_and_script(&chan.2, &node_a_id, None, Some(shutdown_script)) .unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1390,7 +1390,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { && txn[0].output[0].script_pubkey.is_p2wsh()) ); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: "closing_signed negotiation failed to finish within two timer ticks".to_string(), }; @@ -1819,7 +1819,7 @@ fn test_force_closure_on_low_stale_fee() { // Finally, connect one more block and check the force-close happened. connect_blocks(&nodes[1], 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_broadcast(&nodes[1], 1, true); let reason = ClosureReason::PeerFeerateTooLow { peer_feerate_sat_per_kw: 253, diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs index 060496d3bee..67a07325ad6 100644 --- a/lightning/src/ln/update_fee_tests.rs +++ b/lightning/src/ln/update_fee_tests.rs @@ -1124,7 +1124,7 @@ pub fn do_cannot_afford_on_holding_cell_release( if let MessageSendEvent::SendRevokeAndACK { node_id, msg } = events.pop().unwrap() { assert_eq!(node_id, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } else { panic!(); } diff --git a/lightning/src/ln/zero_fee_commitment_tests.rs b/lightning/src/ln/zero_fee_commitment_tests.rs index f94066789c1..2503ad81cde 100644 --- a/lightning/src/ln/zero_fee_commitment_tests.rs +++ b/lightning/src/ln/zero_fee_commitment_tests.rs @@ -158,7 +158,7 @@ fn test_htlc_claim_chunking() { for (preimage, payment_hash) in node_1_preimages { nodes[1].node.claim_funds(preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, NONDUST_HTLC_AMT_MSAT); } nodes[0].node.get_and_clear_pending_msg_events(); @@ -188,12 +188,12 @@ fn test_htlc_claim_chunking() { assert_eq!(htlc_claims[1].output.len(), 24); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], CHAN_CAPACITY); assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], CHAN_CAPACITY); assert!(nodes[1].node.list_channels().is_empty()); diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 344e76d7e6d..e15209676e3 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -1516,13 +1516,13 @@ impl From for UpdateName { mod tests { use super::*; use crate::chain::ChannelMonitorUpdateStatus; + use crate::check_closed_broadcast; use crate::events::ClosureReason; use crate::ln::functional_test_utils::*; use crate::ln::msgs::BaseMessageHandler; use crate::sync::Arc; use crate::util::test_channel_signer::TestChannelSigner; use crate::util::test_utils::{self, TestStore}; - use crate::{check_added_monitors, check_closed_broadcast}; use bitcoin::hashes::hex::FromHex; use core::cmp; @@ -1738,7 +1738,7 @@ mod tests { ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_id_1], 100000); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(node_txn.len(), 1); @@ -1750,7 +1750,7 @@ mod tests { let reason = ClosureReason::CommitmentTxConfirmed; let node_id_0 = nodes[0].node.get_our_node_id(); check_closed_event(&nodes[1], 1, reason, &[node_id_0], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Make sure everything is persisted as expected after close. // We always send at least two payments, and loop up to max_pending_updates_0 * 2. From 040ce2ab40e77b087bb2e2f30545c925d29cf58c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 10 Dec 2025 14:38:44 +0100 Subject: [PATCH 19/42] Convert channelmanager handle_error macro to fn --- lightning/src/ln/channelmanager.rs | 244 +++++++++++++++-------------- 1 file changed, 124 insertions(+), 120 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0411d519a9d..460994e8ec2 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3201,69 +3201,6 @@ pub struct PhantomRouteHints { pub real_node_pubkey: PublicKey, } -#[rustfmt::skip] -macro_rules! handle_error { - ($self: ident, $internal: expr, $counterparty_node_id: expr) => { { - // In testing, ensure there are no deadlocks where the lock is already held upon - // entering the macro. - debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread); - debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); - - match $internal { - Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, shutdown_finish, tx_abort, .. }) => { - let mut msg_event = None; - - if let Some((shutdown_res, update_option)) = shutdown_finish { - let counterparty_node_id = shutdown_res.counterparty_node_id; - let channel_id = shutdown_res.channel_id; - let logger = WithContext::from( - &$self.logger, Some(counterparty_node_id), Some(channel_id), None - ); - log_error!(logger, "Closing channel: {}", err.err); - - $self.finish_close_channel(shutdown_res); - if let Some((update, node_id_1, node_id_2)) = update_option { - let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { - msg: update, node_id_1, node_id_2 - }); - } - } else { - log_error!($self.logger, "Got non-closing error: {}", err.err); - } - - if let msgs::ErrorAction::IgnoreError = err.action { - if let Some(tx_abort) = tx_abort { - msg_event = Some(MessageSendEvent::SendTxAbort { - node_id: $counterparty_node_id, - msg: tx_abort, - }); - } - } else { - msg_event = Some(MessageSendEvent::HandleError { - node_id: $counterparty_node_id, - action: err.action.clone() - }); - } - - if let Some(msg_event) = msg_event { - let per_peer_state = $self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) { - let mut peer_state = peer_state_mutex.lock().unwrap(); - if peer_state.is_connected { - peer_state.pending_msg_events.push(msg_event); - } - } - } - - // Return error in case higher-API need one - Err(err) - }, - } - } }; -} - macro_rules! send_channel_ready { ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{ if $channel.context.is_connected() { @@ -3752,7 +3689,7 @@ where /// When a channel is removed, two things need to happen: /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, -/// (b) [`handle_error`] needs to be called without holding any locks (except +/// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except /// [`ChannelManager::total_consistency_lock`]), which then calls /// [`ChannelManager::finish_close_channel`]. /// @@ -4031,6 +3968,74 @@ where } } + fn handle_error( + &self, internal: Result, counterparty_node_id: PublicKey, + ) -> Result { + // In testing, ensure there are no deadlocks where the lock is already held upon + // entering the macro. + debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); + debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); + + match internal { + Ok(msg) => Ok(msg), + Err(MsgHandleErrInternal { err, shutdown_finish, tx_abort, .. }) => { + let mut msg_event = None; + + if let Some((shutdown_res, update_option)) = shutdown_finish { + let counterparty_node_id = shutdown_res.counterparty_node_id; + let channel_id = shutdown_res.channel_id; + let logger = WithContext::from( + &self.logger, + Some(counterparty_node_id), + Some(channel_id), + None, + ); + log_error!(logger, "Closing channel: {}", err.err); + + self.finish_close_channel(shutdown_res); + if let Some((update, node_id_1, node_id_2)) = update_option { + let mut pending_broadcast_messages = + self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { + msg: update, + node_id_1, + node_id_2, + }); + } + } else { + log_error!(self.logger, "Got non-closing error: {}", err.err); + } + + if let msgs::ErrorAction::IgnoreError = err.action { + if let Some(tx_abort) = tx_abort { + msg_event = Some(MessageSendEvent::SendTxAbort { + node_id: counterparty_node_id, + msg: tx_abort, + }); + } + } else { + msg_event = Some(MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: err.action.clone(), + }); + } + + if let Some(msg_event) = msg_event { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + if peer_state.is_connected { + peer_state.pending_msg_events.push(msg_event); + } + } + } + + // Return error in case higher-API need one + Err(err) + }, + } + } + /// Gets the current [`UserConfig`] which controls some global behavior and includes the /// default configuration applied to all new channels. pub fn get_current_config(&self) -> UserConfig { @@ -4398,7 +4403,7 @@ where self.fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None); } - let _ = handle_error!(self, shutdown_result, *counterparty_node_id); + let _ = self.handle_error(shutdown_result, *counterparty_node_id); Ok(()) } @@ -4509,7 +4514,7 @@ where /// When a channel is removed, two things need to happen: /// (a) [`convert_channel_err`] must be called in the same `per_peer_state` lock as the /// channel-closing action, - /// (b) [`handle_error`] needs to be called without holding any locks (except + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except /// [`ChannelManager::total_consistency_lock`]), which then calls this. #[rustfmt::skip] fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) { @@ -4610,7 +4615,7 @@ where } } for (err, counterparty_node_id) in shutdown_results.drain(..) { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } } @@ -4643,7 +4648,7 @@ where // error message. e.dont_send_error_message(); } - let _ = handle_error!(self, Err::<(), _>(e), *peer_node_id); + let _ = self.handle_error(Err::<(), _>(e), *peer_node_id); Ok(()) } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() { log_error!(logger, "Force-closing inbound channel request"); @@ -5380,7 +5385,7 @@ where } return Ok(()); }; - match handle_error!(self, err, path.hops.first().unwrap().pubkey) { + match self.handle_error(err, path.hops.first().unwrap().pubkey) { Ok(_) => unreachable!(), Err(e) => Err(APIError::ChannelUnavailable { err: e.err }), } @@ -6073,7 +6078,7 @@ where mem::drop(peer_state_lock); mem::drop(per_peer_state); - let _: Result<(), _> = handle_error!(self, Err(err), counterparty); + let _: Result<(), _> = self.handle_error(Err(err), counterparty); Err($api_err) } } } @@ -6420,7 +6425,7 @@ where } mem::drop(funding_batch_states); for (err, counterparty_node_id) in shutdown_results { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } } result @@ -8367,7 +8372,7 @@ where } for (err, counterparty_node_id) in handle_errors { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } #[cfg(feature = "std")] @@ -8877,7 +8882,7 @@ where // Now we can handle any errors which were generated. for (counterparty_node_id, err) in errs.drain(..) { let res: Result<(), _> = Err(err); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } } @@ -10053,10 +10058,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ mem::drop(peer_state_lock); mem::drop(per_peer_state); // TODO(dunxen): Find/make less icky way to do this. - match handle_error!( - self, + match self.handle_error( Result::<(), MsgHandleErrInternal>::Err(err), - *counterparty_node_id + *counterparty_node_id, ) { Ok(_) => { unreachable!("`handle_error` only returns Err as we've passed in an Err") @@ -11129,7 +11133,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some((broadcast_tx, err)) = tx_err { log_info!(logger, "Broadcasting {}", log_tx!(broadcast_tx)); self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); - let _ = handle_error!(self, err, *counterparty_node_id); + let _ = self.handle_error(err, *counterparty_node_id); } Ok(()) } @@ -12240,7 +12244,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } for (err, counterparty_node_id) in failed_channels { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } has_pending_monitor_events @@ -12450,7 +12454,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } drop(per_peer_state); for (err, counterparty_node_id) in shutdown_results { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } } @@ -12510,7 +12514,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } for (counterparty_node_id, err) in handle_errors { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } has_update @@ -13956,7 +13960,7 @@ where }; for (err, counterparty_node_id) in failed_channels.drain(..) { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } persist @@ -14665,7 +14669,7 @@ where } for (failure, counterparty_node_id) in failed_channels { - let _ = handle_error!(self, failure, counterparty_node_id); + let _ = self.handle_error(failure, counterparty_node_id); } for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) { @@ -14781,7 +14785,7 @@ where }, _ => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14789,7 +14793,7 @@ where #[rustfmt::skip] fn handle_open_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannelV2) { if !self.init_features().supports_dual_fund() { - let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + let _: Result<(), _> = self.handle_error(Err(MsgHandleErrInternal::send_err_msg_no_close( "Dual-funded channels not supported".to_owned(), msg.common_fields.temporary_channel_id.clone())), counterparty_node_id); return; @@ -14806,7 +14810,7 @@ where }, _ => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14817,7 +14821,7 @@ where // change to the contents. let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_accept_channel(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); NotifyOption::SkipPersistHandleEvents }); } @@ -14829,26 +14833,26 @@ where "Dual-funded channels not supported".to_owned(), msg.common_fields.temporary_channel_id.clone(), )); - let _: Result<(), _> = handle_error!(self, err, counterparty_node_id); + let _: Result<(), _> = self.handle_error(err, counterparty_node_id); } fn handle_funding_created(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingCreated) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_funding_created(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_funding_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingSigned) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_funding_signed(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) { let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents); let res = self.internal_peer_storage(counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_peer_storage_retrieval( @@ -14857,7 +14861,7 @@ where let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents); let res = self.internal_peer_storage_retrieval(counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) { @@ -14871,7 +14875,7 @@ where Err(e) if e.closes_channel() => NotifyOption::DoPersist, _ => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14890,7 +14894,7 @@ where } }, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14903,7 +14907,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14916,7 +14920,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14930,7 +14934,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::DoPersist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14938,27 +14942,27 @@ where fn handle_shutdown(&self, counterparty_node_id: PublicKey, msg: &msgs::Shutdown) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_shutdown(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_closing_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::ClosingSigned) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_closing_signed(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } #[cfg(simple_close)] fn handle_closing_complete(&self, counterparty_node_id: PublicKey, msg: msgs::ClosingComplete) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_closing_complete(counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } #[cfg(simple_close)] fn handle_closing_sig(&self, counterparty_node_id: PublicKey, msg: msgs::ClosingSig) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_closing_sig(counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_update_add_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateAddHTLC) { @@ -14972,7 +14976,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistNoEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14982,7 +14986,7 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_update_fulfill_htlc(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_update_fail_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) { @@ -14996,7 +15000,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistNoEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15014,7 +15018,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistNoEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15024,7 +15028,7 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_commitment_signed(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_commitment_signed_batch( @@ -15033,13 +15037,13 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_commitment_signed_batch(&counterparty_node_id, channel_id, batch); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_revoke_and_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::RevokeAndACK) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_revoke_and_ack(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_update_fee(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFee) { @@ -15053,7 +15057,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistNoEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15063,13 +15067,13 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_announcement_signatures(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_channel_update(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelUpdate) { PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_channel_update(&counterparty_node_id, msg); - if let Ok(persist) = handle_error!(self, res, counterparty_node_id) { + if let Ok(persist) = self.handle_error(res, counterparty_node_id) { persist } else { NotifyOption::DoPersist @@ -15082,7 +15086,7 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_channel_reestablish(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } #[rustfmt::skip] @@ -15209,7 +15213,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15221,7 +15225,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15233,7 +15237,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15245,7 +15249,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15257,7 +15261,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15265,7 +15269,7 @@ where fn handle_tx_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::TxSignatures) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_tx_signatures(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_tx_init_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxInitRbf) { @@ -15273,7 +15277,7 @@ where "Dual-funded channels not supported".to_owned(), msg.channel_id.clone(), )); - let _: Result<(), _> = handle_error!(self, err, counterparty_node_id); + let _: Result<(), _> = self.handle_error(err, counterparty_node_id); } fn handle_tx_ack_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAckRbf) { @@ -15281,7 +15285,7 @@ where "Dual-funded channels not supported".to_owned(), msg.channel_id.clone(), )); - let _: Result<(), _> = handle_error!(self, err, counterparty_node_id); + let _: Result<(), _> = self.handle_error(err, counterparty_node_id); } fn handle_tx_abort(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAbort) { @@ -15295,7 +15299,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } From deac317fa9e202d24e3e6b5cdbf3f55766e6492b Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 10 Dec 2025 14:57:34 +0100 Subject: [PATCH 20/42] Simplify channelmanager handle_error via map_err --- lightning/src/ln/channelmanager.rs | 94 +++++++++++++++--------------- 1 file changed, 46 insertions(+), 48 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 460994e8ec2..222fcf8b92e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3968,6 +3968,7 @@ where } } + /// Handles an error by closing the channel if required and generating peer messages. fn handle_error( &self, internal: Result, counterparty_node_id: PublicKey, ) -> Result { @@ -3976,64 +3977,61 @@ where debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); - match internal { - Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, shutdown_finish, tx_abort, .. }) => { - let mut msg_event = None; + internal.map_err(|err_internal| { + let mut msg_event = None; - if let Some((shutdown_res, update_option)) = shutdown_finish { - let counterparty_node_id = shutdown_res.counterparty_node_id; - let channel_id = shutdown_res.channel_id; - let logger = WithContext::from( - &self.logger, - Some(counterparty_node_id), - Some(channel_id), - None, - ); - log_error!(logger, "Closing channel: {}", err.err); - - self.finish_close_channel(shutdown_res); - if let Some((update, node_id_1, node_id_2)) = update_option { - let mut pending_broadcast_messages = - self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { - msg: update, - node_id_1, - node_id_2, - }); - } - } else { - log_error!(self.logger, "Got non-closing error: {}", err.err); + if let Some((shutdown_res, update_option)) = err_internal.shutdown_finish { + let counterparty_node_id = shutdown_res.counterparty_node_id; + let channel_id = shutdown_res.channel_id; + let logger = WithContext::from( + &self.logger, + Some(counterparty_node_id), + Some(channel_id), + None, + ); + log_error!(logger, "Closing channel: {}", err_internal.err.err); + + self.finish_close_channel(shutdown_res); + if let Some((update, node_id_1, node_id_2)) = update_option { + let mut pending_broadcast_messages = + self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { + msg: update, + node_id_1, + node_id_2, + }); } + } else { + log_error!(self.logger, "Got non-closing error: {}", err_internal.err.err); + } - if let msgs::ErrorAction::IgnoreError = err.action { - if let Some(tx_abort) = tx_abort { - msg_event = Some(MessageSendEvent::SendTxAbort { - node_id: counterparty_node_id, - msg: tx_abort, - }); - } - } else { - msg_event = Some(MessageSendEvent::HandleError { + if let msgs::ErrorAction::IgnoreError = err_internal.err.action { + if let Some(tx_abort) = err_internal.tx_abort { + msg_event = Some(MessageSendEvent::SendTxAbort { node_id: counterparty_node_id, - action: err.action.clone(), + msg: tx_abort, }); } + } else { + msg_event = Some(MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: err_internal.err.action.clone(), + }); + } - if let Some(msg_event) = msg_event { - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let mut peer_state = peer_state_mutex.lock().unwrap(); - if peer_state.is_connected { - peer_state.pending_msg_events.push(msg_event); - } + if let Some(msg_event) = msg_event { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + if peer_state.is_connected { + peer_state.pending_msg_events.push(msg_event); } } + } - // Return error in case higher-API need one - Err(err) - }, - } + // Return error in case higher-API need one + err_internal.err + }) } /// Gets the current [`UserConfig`] which controls some global behavior and includes the From 4f055aca878ae990280d5b467d0e1faac5691aa0 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 13 Nov 2025 14:20:46 -0500 Subject: [PATCH 21/42] Store inbound committed update_adds in Channel We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. As part of this, we plan to store at least parts of Channels in ChannelMonitors, and that Channel data will be used in rebuilding the manager. Once we store update_adds in Channels, we can use them on restart when reconstructing ChannelManager maps such as forward_htlcs and pending_intercepted_htlcs. Upcoming commits will start doing this reconstruction. --- lightning/src/ln/channel.rs | 92 ++++++++++++++++++++++++------------- 1 file changed, 61 insertions(+), 31 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5b4ac4c0aa5..ed6f6cef77f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -211,7 +211,14 @@ enum InboundHTLCState { /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), - Committed, + /// An HTLC irrevocably committed in the latest commitment transaction, ready to be forwarded or + /// removed. + Committed { + /// Used to rebuild `ChannelManager` HTLC state on restart. Previously the manager would track + /// and persist all HTLC forwards and receives itself, but newer LDK versions avoid relying on + /// its persistence and instead reconstruct state based on `Channel` and `ChannelMonitor` data. + update_add_htlc_opt: Option, + }, /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack /// we'll drop it. @@ -235,7 +242,7 @@ impl From<&InboundHTLCState> for Option { InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => { Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd) }, - InboundHTLCState::Committed => Some(InboundHTLCStateDetails::Committed), + InboundHTLCState::Committed { .. } => Some(InboundHTLCStateDetails::Committed), InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) => { Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail) }, @@ -256,7 +263,7 @@ impl fmt::Display for InboundHTLCState { InboundHTLCState::RemoteAnnounced(_) => write!(f, "RemoteAnnounced"), InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => write!(f, "AwaitingRemoteRevokeToAnnounce"), InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => write!(f, "AwaitingAnnouncedRemoteRevoke"), - InboundHTLCState::Committed => write!(f, "Committed"), + InboundHTLCState::Committed { .. } => write!(f, "Committed"), InboundHTLCState::LocalRemoved(_) => write!(f, "LocalRemoved"), } } @@ -268,7 +275,7 @@ impl InboundHTLCState { InboundHTLCState::RemoteAnnounced(_) => !generated_by_local, InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => !generated_by_local, InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => true, - InboundHTLCState::Committed => true, + InboundHTLCState::Committed { .. } => true, InboundHTLCState::LocalRemoved(_) => !generated_by_local, } } @@ -296,7 +303,7 @@ impl InboundHTLCState { }, InboundHTLCResolution::Resolved { .. } => false, }, - InboundHTLCState::Committed | InboundHTLCState::LocalRemoved(_) => false, + InboundHTLCState::Committed { .. } | InboundHTLCState::LocalRemoved(_) => false, } } } @@ -4102,7 +4109,7 @@ where if self.pending_inbound_htlcs.iter() .any(|htlc| match htlc.state { - InboundHTLCState::Committed => false, + InboundHTLCState::Committed { .. } => false, // An HTLC removal from the local node is pending on the remote commitment. InboundHTLCState::LocalRemoved(_) => true, // An HTLC add from the remote node is pending on the local commitment. @@ -4531,7 +4538,7 @@ where (InboundHTLCState::RemoteAnnounced(..), _) => true, (InboundHTLCState::AwaitingRemoteRevokeToAnnounce(..), _) => true, (InboundHTLCState::AwaitingAnnouncedRemoteRevoke(..), _) => true, - (InboundHTLCState::Committed, _) => true, + (InboundHTLCState::Committed { .. }, _) => true, (InboundHTLCState::LocalRemoved(..), true) => true, (InboundHTLCState::LocalRemoved(..), false) => false, }) @@ -7320,7 +7327,7 @@ where payment_preimage_arg ); match htlc.state { - InboundHTLCState::Committed => {}, + InboundHTLCState::Committed { .. } => {}, InboundHTLCState::LocalRemoved(ref reason) => { if let &InboundHTLCRemovalReason::Fulfill { .. } = reason { } else { @@ -7413,7 +7420,7 @@ where { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; - if let InboundHTLCState::Committed = htlc.state { + if let InboundHTLCState::Committed { .. } = htlc.state { } else { debug_assert!( false, @@ -7548,7 +7555,7 @@ where for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { match htlc.state { - InboundHTLCState::Committed => {}, + InboundHTLCState::Committed { .. } => {}, InboundHTLCState::LocalRemoved(_) => { return Err(ChannelError::Ignore(format!("HTLC {} was already resolved", htlc.htlc_id))); }, @@ -8716,7 +8723,7 @@ where false }; if swap { - let mut state = InboundHTLCState::Committed; + let mut state = InboundHTLCState::Committed { update_add_htlc_opt: None }; mem::swap(&mut state, &mut htlc.state); if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { @@ -8755,14 +8762,21 @@ where PendingHTLCStatus::Forward(forward_info) => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash); to_forward_infos.push((forward_info, htlc.htlc_id)); - htlc.state = InboundHTLCState::Committed; + htlc.state = InboundHTLCState::Committed { + // HTLCs will only be in state `InboundHTLCResolution::Resolved` if they were + // received on an old pre-0.0.123 version of LDK. In this case, the HTLC is + // required to be resolved prior to upgrading to 0.1+ per CHANGELOG.md. + update_add_htlc_opt: None, + }; }, } }, InboundHTLCResolution::Pending { update_add_htlc } => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); - pending_update_adds.push(update_add_htlc); - htlc.state = InboundHTLCState::Committed; + pending_update_adds.push(update_add_htlc.clone()); + htlc.state = InboundHTLCState::Committed { + update_add_htlc_opt: Some(update_add_htlc), + }; }, } } @@ -9297,7 +9311,7 @@ where // in response to it yet, so don't touch it. true }, - InboundHTLCState::Committed => true, + InboundHTLCState::Committed { .. } => true, InboundHTLCState::LocalRemoved(_) => { // We (hopefully) sent a commitment_signed updating this HTLC (which we can // re-transmit if needed) and they may have even sent a revoke_and_ack back @@ -14518,6 +14532,7 @@ where } } let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); + let mut inbound_committed_update_adds: Vec> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -14537,8 +14552,9 @@ where 2u8.write(writer)?; htlc_resolution.write(writer)?; }, - &InboundHTLCState::Committed => { + &InboundHTLCState::Committed { ref update_add_htlc_opt } => { 3u8.write(writer)?; + inbound_committed_update_adds.push(update_add_htlc_opt.clone()); }, &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; @@ -14914,6 +14930,7 @@ where (69, holding_cell_held_htlc_flags, optional_vec), // Added in 0.2 (71, holder_commitment_point_previous_revoked, option), // Added in 0.3 (73, holder_commitment_point_last_revoked, option), // Added in 0.3 + (75, inbound_committed_update_adds, optional_vec), }); Ok(()) @@ -14997,7 +15014,7 @@ where }; InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) }, - 3 => InboundHTLCState::Committed, + 3 => InboundHTLCState::Committed { update_add_htlc_opt: None }, 4 => { let reason = match ::read(reader)? { 0 => InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { @@ -15301,6 +15318,7 @@ where let mut pending_outbound_held_htlc_flags_opt: Option>> = None; let mut holding_cell_held_htlc_flags_opt: Option>> = None; + let mut inbound_committed_update_adds_opt: Option>> = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -15350,6 +15368,7 @@ where (69, holding_cell_held_htlc_flags_opt, optional_vec), // Added in 0.2 (71, holder_commitment_point_previous_revoked_opt, option), // Added in 0.3 (73, holder_commitment_point_last_revoked_opt, option), // Added in 0.3 + (75, inbound_committed_update_adds_opt, optional_vec), }); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); @@ -15473,6 +15492,17 @@ where return Err(DecodeError::InvalidValue); } } + if let Some(update_adds) = inbound_committed_update_adds_opt { + let mut iter = update_adds.into_iter(); + for htlc in pending_inbound_htlcs.iter_mut() { + if let InboundHTLCState::Committed { ref mut update_add_htlc_opt } = htlc.state { + *update_add_htlc_opt = iter.next().ok_or(DecodeError::InvalidValue)?; + } + } + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } + } if let Some(attribution_data_list) = removed_htlc_attribution_data { let mut removed_htlcs = pending_inbound_htlcs.iter_mut().filter_map(|status| { @@ -16057,7 +16087,7 @@ mod tests { amount_msat: htlc_amount_msat, payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()), cltv_expiry: 300000000, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput { @@ -16903,7 +16933,7 @@ mod tests { amount_msat: 1000000, cltv_expiry: 500, payment_hash: PaymentHash::from(payment_preimage_0), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); let payment_preimage_1 = @@ -16913,7 +16943,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); let payment_preimage_2 = @@ -16953,7 +16983,7 @@ mod tests { amount_msat: 4000000, cltv_expiry: 504, payment_hash: PaymentHash::from(payment_preimage_4), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); // commitment tx with all five HTLCs untrimmed (minimum feerate) @@ -17342,7 +17372,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); chan.context.pending_outbound_htlcs.clear(); @@ -17593,7 +17623,7 @@ mod tests { amount_msat: 5000000, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, })); chan.context.pending_outbound_htlcs.extend( @@ -17656,7 +17686,7 @@ mod tests { amount_msat, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }, )); @@ -17722,7 +17752,7 @@ mod tests { amount_msat: 100000, cltv_expiry: 920125, payment_hash: htlc_0_in_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); let htlc_1_in_preimage = @@ -17740,7 +17770,7 @@ mod tests { amount_msat: 49900000, cltv_expiry: 920125, payment_hash: htlc_1_in_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); chan.context.pending_outbound_htlcs.extend( @@ -17792,7 +17822,7 @@ mod tests { amount_msat: 30000, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }, )); @@ -17833,7 +17863,7 @@ mod tests { amount_msat: 29525, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }, )); @@ -17870,7 +17900,7 @@ mod tests { amount_msat: 29525, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }, )); @@ -17907,7 +17937,7 @@ mod tests { amount_msat: 29753, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }, )); @@ -17959,7 +17989,7 @@ mod tests { amount_msat, cltv_expiry, payment_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }), ); From c27093ded5773473946bd21af303ee638d5b8c9c Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 1 Dec 2025 16:11:45 -0800 Subject: [PATCH 22/42] Extract util for HTLCIntercepted event creation We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. As part of rebuilding ChannelManager forward HTLCs maps, we will also add a fix that will regenerate HTLCIntercepted events for HTLC intercepts that are present but have no corresponding event in the queue. That fix will use this new method. --- lightning/src/ln/channelmanager.rs | 44 +++++++++++++++++++----------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 72585d69f80..aa7871051e6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3911,6 +3911,25 @@ macro_rules! process_events_body { } } +/// Creates an [`Event::HTLCIntercepted`] from a [`PendingAddHTLCInfo`]. We generate this event in a +/// few places so this DRYs the code. +fn create_htlc_intercepted_event( + intercept_id: InterceptId, pending_add: &PendingAddHTLCInfo, +) -> Result { + let inbound_amount_msat = pending_add.forward_info.incoming_amt_msat.ok_or(())?; + let requested_next_hop_scid = match pending_add.forward_info.routing { + PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, + _ => return Err(()), + }; + Ok(Event::HTLCIntercepted { + requested_next_hop_scid, + payment_hash: pending_add.forward_info.payment_hash, + inbound_amount_msat, + expected_outbound_amount_msat: pending_add.forward_info.outgoing_amt_msat, + intercept_id, + }) +} + impl< M: Deref, T: Deref, @@ -11486,22 +11505,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); match pending_intercepts.entry(intercept_id) { hash_map::Entry::Vacant(entry) => { - new_intercept_events.push_back(( - events::Event::HTLCIntercepted { - requested_next_hop_scid: scid, - payment_hash, - inbound_amount_msat: pending_add - .forward_info - .incoming_amt_msat - .unwrap(), - expected_outbound_amount_msat: pending_add - .forward_info - .outgoing_amt_msat, - intercept_id, - }, - None, - )); - entry.insert(pending_add); + if let Ok(intercept_ev) = + create_htlc_intercepted_event(intercept_id, &pending_add) + { + new_intercept_events.push_back((intercept_ev, None)); + entry.insert(pending_add); + } else { + debug_assert!(false); + fail_intercepted_htlc(pending_add); + } }, hash_map::Entry::Occupied(_) => { log_info!( From 26992e1dc88325f49f39cc55fd0985c515ab5367 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 17 Nov 2025 17:27:00 -0500 Subject: [PATCH 23/42] Extract method to dedup pre-decode update_add We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. We'll use this new util when reconstructing the ChannelManager::decode_update_add_htlcs map from Channel data in upcoming commits. While the Channel data is not included in the monitors yet, it will be in future work. --- lightning/src/ln/channelmanager.rs | 50 ++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index aa7871051e6..12cfe594891 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -16819,6 +16819,38 @@ where } } +// If the HTLC corresponding to `prev_hop_data` is present in `decode_update_add_htlcs`, remove it +// from the map as it is already being stored and processed elsewhere. +fn dedup_decode_update_add_htlcs( + decode_update_add_htlcs: &mut HashMap>, + prev_hop_data: &HTLCPreviousHopData, removal_reason: &'static str, logger: &L, +) where + L::Target: Logger, +{ + decode_update_add_htlcs.retain(|src_outb_alias, update_add_htlcs| { + update_add_htlcs.retain(|update_add| { + let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias + && update_add.htlc_id == prev_hop_data.htlc_id; + if matches { + let logger = WithContext::from( + logger, + prev_hop_data.counterparty_node_id, + Some(update_add.channel_id), + Some(update_add.payment_hash), + ); + log_info!( + logger, + "Removing pending to-decode HTLC with id {}: {}", + update_add.htlc_id, + removal_reason + ); + } + !matches + }); + !update_add_htlcs.is_empty() + }); +} + // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the // SipmleArcChannelManager type: impl< @@ -17686,19 +17718,11 @@ where // still have an entry for this HTLC in `forward_htlcs` or // `pending_intercepted_htlcs`, we were apparently not persisted after // the monitor was when forwarding the payment. - decode_update_add_htlcs.retain( - |src_outb_alias, update_add_htlcs| { - update_add_htlcs.retain(|update_add_htlc| { - let matches = *src_outb_alias - == prev_hop_data.prev_outbound_scid_alias - && update_add_htlc.htlc_id == prev_hop_data.htlc_id; - if matches { - log_info!(logger, "Removing pending to-decode HTLC as it was forwarded to the closed channel"); - } - !matches - }); - !update_add_htlcs.is_empty() - }, + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, ); forward_htlcs.retain(|_, forwards| { forwards.retain(|forward| { From 005da38e494e5ca8284e72b3916992f8b119a53e Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 20 Nov 2025 12:32:40 -0500 Subject: [PATCH 24/42] Rename manager HTLC forward maps to _legacy We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. Soon we'll be reconstructing these now-legacy maps from Channel data (that will also be included in ChannelMonitors in future work), so rename them as part of moving towards not needing to persist them in ChannelManager. --- lightning/src/ln/channelmanager.rs | 38 +++++++++++++++++++----------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 12cfe594891..a48eaa46c72 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17200,7 +17200,11 @@ where const MAX_ALLOC_SIZE: usize = 1024 * 64; let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + // This map is read but may no longer be used because we'll attempt to rebuild the set of HTLC + // forwards from the `Channel{Monitor}`s instead, as a step towards removing the requirement of + // regularly persisting the `ChannelManager`. + let mut forward_htlcs_legacy: HashMap> = + hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); for _ in 0..forward_htlcs_count { let short_channel_id = Readable::read(reader)?; let pending_forwards_count: u64 = Readable::read(reader)?; @@ -17211,7 +17215,7 @@ where for _ in 0..pending_forwards_count { pending_forwards.push(Readable::read(reader)?); } - forward_htlcs.insert(short_channel_id, pending_forwards); + forward_htlcs_legacy.insert(short_channel_id, pending_forwards); } let claimable_htlcs_count: u64 = Readable::read(reader)?; @@ -17299,12 +17303,18 @@ where }; } + // Some maps are read but may no longer be used because we attempt to rebuild the pending HTLC + // set from the `Channel{Monitor}`s instead, as a step towards removing the requirement of + // regularly persisting the `ChannelManager`. + let mut pending_intercepted_htlcs_legacy: Option> = + Some(new_hash_map()); + let mut decode_update_add_htlcs_legacy: Option>> = + None; + // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = None; let mut pending_outbound_payments = None; - let mut pending_intercepted_htlcs: Option> = - Some(new_hash_map()); let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; @@ -17322,13 +17332,12 @@ where let mut in_flight_monitor_updates: Option< HashMap<(PublicKey, ChannelId), Vec>, > = None; - let mut decode_update_add_htlcs: Option>> = None; let mut inbound_payment_id_secret = None; let mut peer_storage_dir: Option)>> = None; let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), - (2, pending_intercepted_htlcs, option), + (2, pending_intercepted_htlcs_legacy, option), (3, pending_outbound_payments, option), (4, pending_claiming_payments, option), (5, received_network_pubkey, option), @@ -17339,13 +17348,14 @@ where (10, legacy_in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs, option), + (14, decode_update_add_htlcs_legacy, option), (15, inbound_payment_id_secret, option), (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), (21, async_receive_offer_cache, (default_value, async_receive_offer_cache)), }); - let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map()); + let mut decode_update_add_htlcs_legacy = + decode_update_add_htlcs_legacy.unwrap_or_else(|| new_hash_map()); let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); @@ -17719,12 +17729,12 @@ where // `pending_intercepted_htlcs`, we were apparently not persisted after // the monitor was when forwarding the payment. dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, + &mut decode_update_add_htlcs_legacy, &prev_hop_data, "HTLC was forwarded to the closed channel", &args.logger, ); - forward_htlcs.retain(|_, forwards| { + forward_htlcs_legacy.retain(|_, forwards| { forwards.retain(|forward| { if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { if pending_forward_matches_htlc(&htlc_info) { @@ -17736,7 +17746,7 @@ where }); !forwards.is_empty() }); - pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { + pending_intercepted_htlcs_legacy.as_mut().unwrap().retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", &htlc.payment_hash, &monitor.channel_id()); @@ -18234,10 +18244,10 @@ where inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()), + pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy.unwrap()), - forward_htlcs: Mutex::new(forward_htlcs), - decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), + forward_htlcs: Mutex::new(forward_htlcs_legacy), + decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs_legacy), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap(), From 7c4d0214d475c5ff6b12985880b3dc08c5add3bf Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 20 Nov 2025 18:24:32 -0500 Subject: [PATCH 25/42] Tweak pending_htlc_intercepts ser on manager read Makes an upcoming commit cleaner --- lightning/src/ln/channelmanager.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a48eaa46c72..a854bb7b5d6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17307,7 +17307,7 @@ where // set from the `Channel{Monitor}`s instead, as a step towards removing the requirement of // regularly persisting the `ChannelManager`. let mut pending_intercepted_htlcs_legacy: Option> = - Some(new_hash_map()); + None; let mut decode_update_add_htlcs_legacy: Option>> = None; @@ -17356,6 +17356,8 @@ where }); let mut decode_update_add_htlcs_legacy = decode_update_add_htlcs_legacy.unwrap_or_else(|| new_hash_map()); + let mut pending_intercepted_htlcs_legacy = + pending_intercepted_htlcs_legacy.unwrap_or_else(|| new_hash_map()); let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); @@ -17746,7 +17748,7 @@ where }); !forwards.is_empty() }); - pending_intercepted_htlcs_legacy.as_mut().unwrap().retain(|intercepted_id, htlc_info| { + pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", &htlc.payment_hash, &monitor.channel_id()); @@ -18244,7 +18246,7 @@ where inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy.unwrap()), + pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy), forward_htlcs: Mutex::new(forward_htlcs_legacy), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs_legacy), From 64de98919068d3ec9691d576e85ad80e3c7135da Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 20 Nov 2025 18:35:57 -0500 Subject: [PATCH 26/42] Gather to-decode HTLC fwds from channels on manager read We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. Here we start this process by rebuilding ChannelManager::decode_update_add_htlcs from the Channels, which will soon be included in the ChannelMonitors as part of a different series of PRs. The newly built map is not yet used but will be in the next commit. --- lightning/src/ln/channel.rs | 14 ++++++++ lightning/src/ln/channelmanager.rs | 53 ++++++++++++++++++++++++++++-- 2 files changed, 64 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index ed6f6cef77f..cb455400b5b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7778,6 +7778,20 @@ where Ok(()) } + /// Useful for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn get_inbound_committed_update_adds(&self) -> Vec { + self.context + .pending_inbound_htlcs + .iter() + .filter_map(|htlc| match htlc.state { + InboundHTLCState::Committed { ref update_add_htlc_opt } => { + update_add_htlc_opt.clone() + }, + _ => None, + }) + .collect() + } + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] fn mark_outbound_htlc_removed( diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a854bb7b5d6..080ecef2c1f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17358,6 +17358,7 @@ where decode_update_add_htlcs_legacy.unwrap_or_else(|| new_hash_map()); let mut pending_intercepted_htlcs_legacy = pending_intercepted_htlcs_legacy.unwrap_or_else(|| new_hash_map()); + let mut decode_update_add_htlcs = new_hash_map(); let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); @@ -17669,6 +17670,21 @@ where let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + let inbound_committed_update_adds = + funded_chan.get_inbound_committed_update_adds(); + if !inbound_committed_update_adds.is_empty() { + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel`, as part of removing the requirement to regularly persist the + // `ChannelManager`. + decode_update_add_htlcs.insert( + funded_chan.context.outbound_scid_alias(), + inbound_committed_update_adds, + ); + } + } + } } if is_channel_closed { @@ -17727,9 +17743,15 @@ where }; // The ChannelMonitor is now responsible for this HTLC's // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs` or - // `pending_intercepted_htlcs`, we were apparently not persisted after - // the monitor was when forwarding the payment. + // still have an entry for this HTLC in `forward_htlcs`, + // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not + // persisted after the monitor was when forwarding the payment. + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, + ); dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs_legacy, &prev_hop_data, @@ -18220,6 +18242,31 @@ where } } + // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. + // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. + for (src, _, _, _, _, _) in failed_htlcs.iter() { + if let HTLCSource::PreviousHopData(prev_hop_data) = src { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was failed backwards during manager read", + &args.logger, + ); + } + } + + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, + ); + } + } + let best_block = BestBlock::new(best_block_hash, best_block_height); let flow = OffersMessageFlow::new( chain_hash, From cb398f6b761edde6b45fcda93a01c564cb49a13c Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 20 Nov 2025 18:39:39 -0500 Subject: [PATCH 27/42] Rebuild manager forwarded htlcs maps from Channels We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. Here we start this process by rebuilding ChannelManager::decode_update_add_htlcs, forward_htlcs, and pending_intercepted_htlcs from Channel data, which will soon be included in the ChannelMonitors as part of a different series of PRs. We also fix the reload_node test util to use the node's pre-reload config after restart. The previous behavior was a bit surprising and led to one of this commit's tests failing. --- lightning/src/ln/channelmanager.rs | 72 ++++++++++++++++++- lightning/src/ln/functional_test_utils.rs | 3 +- lightning/src/ln/reload_tests.rs | 87 ++++++++++++++++++++++- 3 files changed, 159 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 080ecef2c1f..e2a3db8783a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18267,6 +18267,76 @@ where } } + // Remove HTLCs from `forward_htlcs` if they are also present in `decode_update_add_htlcs`. + // + // In the future, the full set of pending HTLCs will be pulled from `Channel{Monitor}` data and + // placed in `ChannelManager::decode_update_add_htlcs` on read, to be handled on the next call + // to `process_pending_htlc_forwards`. This is part of a larger effort to remove the requirement + // of regularly persisting the `ChannelManager`. The new pipeline is supported for HTLC forwards + // received on LDK 0.3+ but not <= 0.2, so prune non-legacy HTLCs from `forward_htlcs`. + forward_htlcs_legacy.retain(|scid, pending_fwds| { + for fwd in pending_fwds { + let (prev_scid, prev_htlc_id) = match fwd { + HTLCForwardInfo::AddHTLC(htlc) => { + (htlc.prev_outbound_scid_alias, htlc.prev_htlc_id) + }, + HTLCForwardInfo::FailHTLC { htlc_id, .. } + | HTLCForwardInfo::FailMalformedHTLC { htlc_id, .. } => (*scid, *htlc_id), + }; + if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { + if pending_update_adds + .iter() + .any(|update_add| update_add.htlc_id == prev_htlc_id) + { + return false; + } + } + } + true + }); + // Remove intercepted HTLC forwards if they are also present in `decode_update_add_htlcs`. See + // the above comment. + pending_intercepted_htlcs_legacy.retain(|id, fwd| { + let prev_scid = fwd.prev_outbound_scid_alias; + if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { + if pending_update_adds + .iter() + .any(|update_add| update_add.htlc_id == fwd.prev_htlc_id) + { + pending_events_read.retain( + |(ev, _)| !matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), + ); + return false; + } + } + if !pending_events_read.iter().any( + |(ev, _)| matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), + ) { + match create_htlc_intercepted_event(*id, &fwd) { + Ok(ev) => pending_events_read.push_back((ev, None)), + Err(()) => debug_assert!(false), + } + } + true + }); + // Add legacy update_adds that were received on LDK <= 0.2 that are not present in the + // `decode_update_add_htlcs` map that was rebuilt from `Channel{Monitor}` data, see above + // comment. + for (scid, legacy_update_adds) in decode_update_add_htlcs_legacy.drain() { + match decode_update_add_htlcs.entry(scid) { + hash_map::Entry::Occupied(mut update_adds) => { + for legacy_update_add in legacy_update_adds { + if !update_adds.get().contains(&legacy_update_add) { + update_adds.get_mut().push(legacy_update_add); + } + } + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(legacy_update_adds); + }, + } + } + let best_block = BestBlock::new(best_block_hash, best_block_height); let flow = OffersMessageFlow::new( chain_hash, @@ -18296,7 +18366,7 @@ where pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy), forward_htlcs: Mutex::new(forward_htlcs_legacy), - decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs_legacy), + decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap(), diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index ff33d7508b5..3a5940cb161 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1382,9 +1382,10 @@ macro_rules! reload_node { $node.onion_messenger.set_async_payments_handler(&$new_channelmanager); }; ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { + let config = $node.node.get_current_config(); reload_node!( $node, - test_default_channel_config(), + config, $chanman_encoded, $monitors_encoded, $persister, diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 2e9471a787d..cd560745256 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -508,7 +508,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { #[cfg(feature = "std")] fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, not_stale: bool) { - use crate::ln::channelmanager::Retry; + use crate::ln::outbound_payment::Retry; use crate::types::string::UntrustedString; // When we get a data_loss_protect proving we're behind, we immediately panic as the // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The @@ -1173,6 +1173,91 @@ fn removed_payment_no_manager_persistence() { expect_payment_failed!(nodes[0], payment_hash, false); } +#[test] +fn manager_persisted_pre_outbound_edge_forward() { + do_manager_persisted_pre_outbound_edge_forward(false); +} + +#[test] +fn manager_persisted_pre_outbound_edge_intercept_forward() { + do_manager_persisted_pre_outbound_edge_forward(true); +} + +fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let mut intercept_forwards_config = test_default_channel_config(); + intercept_forwards_config.accept_intercept_htlcs = true; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 5000; + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + if intercept_htlc { + route.paths[0].hops[1].short_channel_id = nodes[1].node.get_intercept_scid(); + } + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + // Decode the HTLC onion but don't forward it to the next hop, such that the HTLC ends up in + // `ChannelManager::forward_htlcs` or `ChannelManager::pending_intercepted_htlcs`. + nodes[1].node.test_process_pending_update_add_htlcs(); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]); + args_b_c.send_channel_ready = (true, true); + args_b_c.send_announcement_sigs = (true, true); + reconnect_nodes(args_b_c); + + // Forward the HTLC and ensure we can claim it post-reload. + nodes[1].node.process_pending_htlc_forwards(); + + if intercept_htlc { + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let (intercept_id, expected_outbound_amt_msat) = match events[0] { + Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { + (intercept_id, expected_outbound_amount_msat) + }, + _ => panic!() + }; + nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id_2, + nodes[2].node.get_our_node_id(), expected_outbound_amt_msat).unwrap(); + nodes[1].node.process_pending_htlc_forwards(); + } + check_added_monitors(&nodes[1], 1); + + let updates = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); + expect_and_process_pending_htlcs(&nodes[2], false); + + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3); From a24dcffa32766b1f97d9f36be43a193e2616ca8b Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 1 Dec 2025 16:11:18 -0800 Subject: [PATCH 28/42] Test 0.2 -> 0.3 reload with with forward htlcs present We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. In the previous commit we started this process by rebuilding ChannelManager::decode_update_add_htlcs, forward_htlcs, and pending_intercepted_htlcs from the Channel data, which will soon be included in the ChannelMonitors as part of a different series of PRs. Here we test that HTLC forwards that were originally received on 0.2 can still be successfully forwarded using the new reload + legacy handling code that will be merged for 0.3. --- lightning-tests/Cargo.toml | 1 + .../src/upgrade_downgrade_tests.rs | 201 ++++++++++++++++++ 2 files changed, 202 insertions(+) diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml index 439157e528b..4e8d330089d 100644 --- a/lightning-tests/Cargo.toml +++ b/lightning-tests/Cargo.toml @@ -15,6 +15,7 @@ lightning-types = { path = "../lightning-types", features = ["_test_utils"] } lightning-invoice = { path = "../lightning-invoice", default-features = false } lightning-macros = { path = "../lightning-macros" } lightning = { path = "../lightning", features = ["_test_utils"] } +lightning_0_2 = { package = "lightning", version = "0.2.0", features = ["_test_utils"] } lightning_0_1 = { package = "lightning", version = "0.1.7", features = ["_test_utils"] } lightning_0_0_125 = { package = "lightning", version = "0.0.125", features = ["_test_utils"] } diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index cef180fbd4e..19c50e870de 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -10,6 +10,16 @@ //! Tests which test upgrading from previous versions of LDK or downgrading to previous versions of //! LDK. +use lightning_0_2::commitment_signed_dance as commitment_signed_dance_0_2; +use lightning_0_2::events::Event as Event_0_2; +use lightning_0_2::get_monitor as get_monitor_0_2; +use lightning_0_2::ln::channelmanager::PaymentId as PaymentId_0_2; +use lightning_0_2::ln::channelmanager::RecipientOnionFields as RecipientOnionFields_0_2; +use lightning_0_2::ln::functional_test_utils as lightning_0_2_utils; +use lightning_0_2::ln::msgs::ChannelMessageHandler as _; +use lightning_0_2::routing::router as router_0_2; +use lightning_0_2::util::ser::Writeable as _; + use lightning_0_1::commitment_signed_dance as commitment_signed_dance_0_1; use lightning_0_1::events::ClosureReason as ClosureReason_0_1; use lightning_0_1::expect_pending_htlcs_forwardable_ignore as expect_pending_htlcs_forwardable_ignore_0_1; @@ -498,3 +508,194 @@ fn test_0_1_htlc_forward_after_splice() { do_test_0_1_htlc_forward_after_splice(true); do_test_0_1_htlc_forward_after_splice(false); } + +#[derive(PartialEq, Eq)] +enum MidHtlcForwardCase { + // Restart the upgraded node after locking an HTLC forward into the inbound edge, but before + // decoding the onion. + PreOnionDecode, + // Restart the upgraded node after locking an HTLC forward into the inbound edge + decoding the + // onion. + PostOnionDecode, + // Restart the upgraded node after the HTLC has been decoded and placed in the pending intercepted + // HTLCs map. + Intercept, +} + +#[test] +fn upgrade_pre_htlc_forward_onion_decode() { + do_upgrade_mid_htlc_forward(MidHtlcForwardCase::PreOnionDecode); +} + +#[test] +fn upgrade_mid_htlc_forward() { + do_upgrade_mid_htlc_forward(MidHtlcForwardCase::PostOnionDecode); +} + +#[test] +fn upgrade_mid_htlc_intercept_forward() { + do_upgrade_mid_htlc_forward(MidHtlcForwardCase::Intercept); +} + +fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { + // In 0.3, we started reconstructing the `ChannelManager`'s HTLC forwards maps from the HTLCs + // contained in `Channel`s, as part of removing the requirement to regularly persist the + // `ChannelManager`. However, HTLC forwards can only be reconstructed this way if they were + // received on 0.3 or higher. Test that HTLC forwards that were serialized on <=0.2 will still + // succeed when read on 0.3+. + let (node_a_ser, node_b_ser, node_c_ser, mon_a_1_ser, mon_b_1_ser, mon_b_2_ser, mon_c_1_ser); + let (node_a_id, node_b_id, node_c_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + let chan_id_bytes_b_c; + + { + let chanmon_cfgs = lightning_0_2_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_2_utils::create_node_cfgs(3, &chanmon_cfgs); + + let mut intercept_cfg = lightning_0_2_utils::test_default_channel_config(); + intercept_cfg.accept_intercept_htlcs = true; + let cfgs = &[None, Some(intercept_cfg), None]; + let node_chanmgrs = lightning_0_2_utils::create_node_chanmgrs(3, &node_cfgs, cfgs); + let nodes = lightning_0_2_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_a = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + + let chan_id_b = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + chan_id_bytes_b_c = chan_id_b.0; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_0_2_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_0_2_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + let pay_params = router_0_2::PaymentParameters::from_node_id( + node_c_id, + lightning_0_2_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_2::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let mut route = lightning_0_2_utils::get_route(&nodes[0], &route_params).unwrap(); + + if test == MidHtlcForwardCase::Intercept { + route.paths[0].hops[1].short_channel_id = nodes[1].node.get_intercept_scid(); + } + + let onion = RecipientOnionFields_0_2::secret_only(secret); + let id = PaymentId_0_2(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_2_utils::check_added_monitors(&nodes[0], 1); + let send_event = lightning_0_2_utils::SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + commitment_signed_dance_0_2!(nodes[1], nodes[0], send_event.commitment_msg, false); + if test != MidHtlcForwardCase::PreOnionDecode { + nodes[1].node.test_process_pending_update_add_htlcs(); + } + let events = nodes[1].node.get_and_clear_pending_events(); + if test == MidHtlcForwardCase::Intercept { + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event_0_2::HTLCIntercepted { .. })); + } else { + assert!(events.is_empty()); + } + + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + node_c_ser = nodes[2].node.encode(); + mon_a_1_ser = get_monitor_0_2!(nodes[0], chan_id_a).encode(); + mon_b_1_ser = get_monitor_0_2!(nodes[1], chan_id_a).encode(); + mon_b_2_ser = get_monitor_0_2!(nodes[1], chan_id_b).encode(); + mon_c_1_ser = get_monitor_0_2!(nodes[2], chan_id_b).encode(); + } + + // Create a dummy node to reload over with the 0.2 state + let mut chanmon_cfgs = create_chanmon_cfgs(3); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let config = test_default_channel_config(); + let a_mons = &[&mon_a_1_ser[..]]; + reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; + reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + let c_mons = &[&mon_c_1_ser[..]]; + reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + reconnect_nodes(reconnect_b_c_args); + + // Now release the HTLC from node_b to node_c, to be claimed back to node_a + nodes[1].node.process_pending_htlc_forwards(); + + if test == MidHtlcForwardCase::Intercept { + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let (intercept_id, expected_outbound_amt_msat) = match events[0] { + Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { + (intercept_id, expected_outbound_amount_msat) + }, + _ => panic!(), + }; + nodes[1] + .node + .forward_intercepted_htlc( + intercept_id, + &ChannelId(chan_id_bytes_b_c), + nodes[2].node.get_our_node_id(), + expected_outbound_amt_msat, + ) + .unwrap(); + nodes[1].node.process_pending_htlc_forwards(); + } + + let pay_secret = PaymentSecret(payment_secret_bytes); + let pay_hash = PaymentHash(payment_hash_bytes); + let pay_preimage = PaymentPreimage(payment_preimage_bytes); + + check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} From 004ceef48c084eb478547ee6e9f24935b2bb2412 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 08:30:33 +0100 Subject: [PATCH 29/42] Convert convert_funded_channel_err fns to methods --- lightning/src/ln/channelmanager.rs | 188 ++++++++++++++--------------- 1 file changed, 93 insertions(+), 95 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2a89e7b5681..a24f31158e1 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3597,96 +3597,6 @@ fn convert_channel_err_internal< } } -fn convert_funded_channel_err_internal>( - cm: &CM, closed_channel_monitor_update_ids: &mut BTreeMap, - in_flight_monitor_updates: &mut BTreeMap)>, - coop_close_shutdown_res: Option, err: ChannelError, - chan: &mut FundedChannel, -) -> (bool, MsgHandleErrInternal) -where - SP::Target: SignerProvider, - CM::Watch: Watch<::EcdsaSigner>, -{ - let chan_id = chan.context.channel_id(); - convert_channel_err_internal(err, chan_id, |reason, msg| { - let cm = cm.get_cm(); - let logger = WithChannelContext::from(&cm.logger, &chan.context, None); - - let mut shutdown_res = - if let Some(res) = coop_close_shutdown_res { res } else { chan.force_shutdown(reason) }; - let chan_update = cm.get_channel_update_for_broadcast(chan).ok(); - - log_error!(logger, "Closed channel due to close-required error: {}", msg); - - if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { - handle_new_monitor_update_locked_actions_handled_by_caller!( - cm, - funding_txo, - update, - in_flight_monitor_updates, - chan.context - ); - } - // If there's a possibility that we need to generate further monitor updates for this - // channel, we need to store the last update_id of it. However, we don't want to insert - // into the map (which prevents the `PeerState` from being cleaned up) for channels that - // never even got confirmations (which would open us up to DoS attacks). - let update_id = chan.context.get_latest_monitor_update_id(); - let funding_confirmed = chan.funding.get_funding_tx_confirmation_height().is_some(); - let chan_zero_conf = chan.context.minimum_depth(&chan.funding) == Some(0); - if funding_confirmed || chan_zero_conf || update_id > 1 { - closed_channel_monitor_update_ids.insert(chan_id, update_id); - } - let mut short_to_chan_info = cm.short_to_chan_info.write().unwrap(); - if let Some(short_id) = chan.funding.get_short_channel_id() { - short_to_chan_info.remove(&short_id); - } else { - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let outbound_alias = chan.context.outbound_scid_alias(); - let alias_removed = cm.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); - debug_assert!(alias_removed); - } - short_to_chan_info.remove(&chan.context.outbound_scid_alias()); - for scid in chan.context.historical_scids() { - short_to_chan_info.remove(scid); - } - - (shutdown_res, chan_update) - }) -} - -fn convert_unfunded_channel_err_internal( - cm: &CM, err: ChannelError, chan: &mut Channel, -) -> (bool, MsgHandleErrInternal) -where - SP::Target: SignerProvider, -{ - let chan_id = chan.context().channel_id(); - convert_channel_err_internal(err, chan_id, |reason, msg| { - let cm = cm.get_cm(); - let logger = WithChannelContext::from(&cm.logger, chan.context(), None); - - let shutdown_res = chan.force_shutdown(reason); - log_error!(logger, "Closed channel due to close-required error: {}", msg); - cm.short_to_chan_info.write().unwrap().remove(&chan.context().outbound_scid_alias()); - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let outbound_alias = chan.context().outbound_scid_alias(); - let alias_removed = cm.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); - debug_assert!(alias_removed); - (shutdown_res, None) - }) -} - /// When a channel is removed, two things need to happen: /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except @@ -3706,7 +3616,7 @@ macro_rules! convert_channel_err { let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; let (close, mut err) = - convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, Some($shutdown_result), reason, $funded_channel); + $self.convert_funded_channel_err_internal(closed_update_ids, in_flight_updates, Some($shutdown_result), reason, $funded_channel); err.dont_send_error_message(); debug_assert!(close); err @@ -3714,20 +3624,20 @@ macro_rules! convert_channel_err { ($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, FUNDED_CHANNEL) => { { let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; - convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, None, $err, $funded_channel) + $self.convert_funded_channel_err_internal(closed_update_ids, in_flight_updates, None, $err, $funded_channel) } }; ($self: ident, $peer_state: expr, $err: expr, $channel: expr, UNFUNDED_CHANNEL) => { { - convert_unfunded_channel_err_internal($self, $err, $channel) + $self.convert_unfunded_channel_err_internal($err, $channel) } }; ($self: ident, $peer_state: expr, $err: expr, $channel: expr) => { match $channel.as_funded_mut() { Some(funded_channel) => { let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; - convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, None, $err, funded_channel) + $self.convert_funded_channel_err_internal(closed_update_ids, in_flight_updates, None, $err, funded_channel) }, None => { - convert_unfunded_channel_err_internal($self, $err, $channel) + $self.convert_unfunded_channel_err_internal($err, $channel) }, } }; @@ -4034,6 +3944,94 @@ where }) } + fn convert_funded_channel_err_internal( + &self, closed_channel_monitor_update_ids: &mut BTreeMap, + in_flight_monitor_updates: &mut BTreeMap)>, + coop_close_shutdown_res: Option, err: ChannelError, + chan: &mut FundedChannel, + ) -> (bool, MsgHandleErrInternal) { + let chan_id = chan.context.channel_id(); + convert_channel_err_internal(err, chan_id, |reason, msg| { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); + + let mut shutdown_res = if let Some(res) = coop_close_shutdown_res { + res + } else { + chan.force_shutdown(reason) + }; + let chan_update = self.get_channel_update_for_broadcast(chan).ok(); + + log_error!(logger, "Closed channel due to close-required error: {}", msg); + + if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { + handle_new_monitor_update_locked_actions_handled_by_caller!( + self, + funding_txo, + update, + in_flight_monitor_updates, + chan.context + ); + } + // If there's a possibility that we need to generate further monitor updates for this + // channel, we need to store the last update_id of it. However, we don't want to insert + // into the map (which prevents the `PeerState` from being cleaned up) for channels that + // never even got confirmations (which would open us up to DoS attacks). + let update_id = chan.context.get_latest_monitor_update_id(); + let funding_confirmed = chan.funding.get_funding_tx_confirmation_height().is_some(); + let chan_zero_conf = chan.context.minimum_depth(&chan.funding) == Some(0); + if funding_confirmed || chan_zero_conf || update_id > 1 { + closed_channel_monitor_update_ids.insert(chan_id, update_id); + } + let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); + if let Some(short_id) = chan.funding.get_short_channel_id() { + short_to_chan_info.remove(&short_id); + } else { + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let outbound_alias = chan.context.outbound_scid_alias(); + let alias_removed = + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); + debug_assert!(alias_removed); + } + short_to_chan_info.remove(&chan.context.outbound_scid_alias()); + for scid in chan.context.historical_scids() { + short_to_chan_info.remove(scid); + } + + (shutdown_res, chan_update) + }) + } + + fn convert_unfunded_channel_err_internal( + &self, err: ChannelError, chan: &mut Channel, + ) -> (bool, MsgHandleErrInternal) + where + SP::Target: SignerProvider, + { + let chan_id = chan.context().channel_id(); + convert_channel_err_internal(err, chan_id, |reason, msg| { + let logger = WithChannelContext::from(&self.logger, chan.context(), None); + + let shutdown_res = chan.force_shutdown(reason); + log_error!(logger, "Closed channel due to close-required error: {}", msg); + self.short_to_chan_info.write().unwrap().remove(&chan.context().outbound_scid_alias()); + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let outbound_alias = chan.context().outbound_scid_alias(); + let alias_removed = self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); + debug_assert!(alias_removed); + (shutdown_res, None) + }) + } + /// Gets the current [`UserConfig`] which controls some global behavior and includes the /// default configuration applied to all new channels. pub fn get_current_config(&self) -> UserConfig { From 87e01ffdfe6d6bc5d7574f62c808885a2a37a1f6 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 09:03:15 +0100 Subject: [PATCH 30/42] Convert macro to convert_channel_err_coop method --- lightning/src/ln/channelmanager.rs | 45 ++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a24f31158e1..872c11387d8 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3611,16 +3611,6 @@ fn convert_channel_err_internal< /// true). #[rustfmt::skip] macro_rules! convert_channel_err { - ($self: ident, $peer_state: expr, $shutdown_result: expr, $funded_channel: expr, COOP_CLOSED) => { { - let reason = ChannelError::Close(("Coop Closed".to_owned(), $shutdown_result.closure_reason.clone())); - let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; - let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; - let (close, mut err) = - $self.convert_funded_channel_err_internal(closed_update_ids, in_flight_updates, Some($shutdown_result), reason, $funded_channel); - err.dont_send_error_message(); - debug_assert!(close); - err - } }; ($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, FUNDED_CHANNEL) => { { let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; @@ -4032,6 +4022,32 @@ where }) } + /// When a cooperatively closed channel is removed, two things need to happen: + /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + /// + /// Returns a mapped error. + fn convert_channel_err_coop( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + shutdown_result: ShutdownResult, funded_channel: &mut FundedChannel, + ) -> MsgHandleErrInternal { + let reason = + ChannelError::Close(("Coop Closed".to_owned(), shutdown_result.closure_reason.clone())); + let (close, mut err) = self.convert_funded_channel_err_internal( + closed_update_ids, + in_flight_updates, + Some(shutdown_result), + reason, + funded_channel, + ); + err.dont_send_error_message(); + debug_assert!(close); + err + } + /// Gets the current [`UserConfig`] which controls some global behavior and includes the /// default configuration applied to all new channels. pub fn get_current_config(&self) -> UserConfig { @@ -11146,7 +11162,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // also implies there are no pending HTLCs left on the channel, so we can // fully delete it from tracking (the channel monitor is still around to // watch for old state broadcasts)! - let err = convert_channel_err!(self, peer_state, close_res, chan, COOP_CLOSED); + let err = self.convert_channel_err_coop(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, close_res, chan); chan_entry.remove(); Some((tx, Err(err))) } else { @@ -12467,7 +12483,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ log_trace!(logger, "Removing channel now that the signer is unblocked"); let (remove, err) = if let Some(funded) = chan.as_funded_mut() { let err = - convert_channel_err!(self, peer_state, shutdown, funded, COOP_CLOSED); + self.convert_channel_err_coop(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, shutdown, funded); (true, err) } else { debug_assert!(false); @@ -12522,7 +12538,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some((tx, shutdown_res)) = tx_shutdown_result_opt { // We're done with this channel. We got a closing_signed and sent back // a closing_signed with a closing transaction to broadcast. - let err = convert_channel_err!(self, peer_state, shutdown_res, funded_chan, COOP_CLOSED); + let err = self.convert_channel_err_coop(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, shutdown_res, funded_chan); handle_errors.push((*cp_id, Err(err))); log_info!(logger, "Broadcasting {}", log_tx!(tx)); @@ -12532,7 +12548,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, Err(e) => { has_update = true; - let (close_channel, res) = convert_channel_err!(self, peer_state, e, funded_chan, FUNDED_CHANNEL); + let (close_channel, res) = convert_channel_err!( + self, peer_state, e, funded_chan, FUNDED_CHANNEL); handle_errors.push((funded_chan.context.get_counterparty_node_id(), Err(res))); !close_channel } From 36cfb13a5a064d57c4403b03bc3d307eb32cc153 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 09:10:39 +0100 Subject: [PATCH 31/42] Convert macro to convert_channel_err_funded method --- lightning/src/ln/channelmanager.rs | 41 ++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 872c11387d8..e7131c63a76 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3611,11 +3611,6 @@ fn convert_channel_err_internal< /// true). #[rustfmt::skip] macro_rules! convert_channel_err { - ($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, FUNDED_CHANNEL) => { { - let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; - let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; - $self.convert_funded_channel_err_internal(closed_update_ids, in_flight_updates, None, $err, $funded_channel) - } }; ($self: ident, $peer_state: expr, $err: expr, $channel: expr, UNFUNDED_CHANNEL) => { { $self.convert_unfunded_channel_err_internal($err, $channel) } }; @@ -4048,6 +4043,28 @@ where err } + /// When a funded channel is removed, two things need to happen: + /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + fn convert_channel_err_funded( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + err: ChannelError, funded_channel: &mut FundedChannel, + ) -> (bool, MsgHandleErrInternal) { + self.convert_funded_channel_err_internal( + closed_update_ids, + in_flight_updates, + None, + err, + funded_channel, + ) + } + /// Gets the current [`UserConfig`] which controls some global behavior and includes the /// default configuration applied to all new channels. pub fn get_current_config(&self) -> UserConfig { @@ -8192,7 +8209,7 @@ where if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } if let Err(e) = funded_chan.timer_check_closing_negotiation_progress() { - let (needs_close, err) = convert_channel_err!(self, peer_state, e, funded_chan, FUNDED_CHANNEL); + let (needs_close, err) = self.convert_channel_err_funded(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, e, funded_chan); handle_errors.push((Err(err), counterparty_node_id)); if needs_close { return false; } } @@ -12548,8 +12565,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, Err(e) => { has_update = true; - let (close_channel, res) = convert_channel_err!( - self, peer_state, e, funded_chan, FUNDED_CHANNEL); + let (close_channel, res) = self.convert_channel_err_funded( + &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, e, funded_chan); handle_errors.push((funded_chan.context.get_counterparty_node_id(), Err(res))); !close_channel } @@ -14657,12 +14674,10 @@ where // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = convert_channel_err!( - self, - peer_state, + let (_, e) = self.convert_channel_err_funded( + &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, - funded_channel, - FUNDED_CHANNEL + funded_channel ); failed_channels.push((Err(e), *counterparty_node_id)); return false; From ec112c4787ee6a1544dd55b29b6ac9a52b44554d Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 09:14:28 +0100 Subject: [PATCH 32/42] Replace macro with direct call to convert_unfunded_channel_err_internal --- lightning/src/ln/channelmanager.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e7131c63a76..5e1fe59a383 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3611,9 +3611,6 @@ fn convert_channel_err_internal< /// true). #[rustfmt::skip] macro_rules! convert_channel_err { - ($self: ident, $peer_state: expr, $err: expr, $channel: expr, UNFUNDED_CHANNEL) => { { - $self.convert_unfunded_channel_err_internal($err, $channel) - } }; ($self: ident, $peer_state: expr, $err: expr, $channel: expr) => { match $channel.as_funded_mut() { Some(funded_channel) => { @@ -10506,7 +10503,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let err = ChannelError::close($err.to_owned()); chan.unset_funding_info(); let mut chan = Channel::from(chan); - return Err(convert_channel_err!(self, peer_state, err, &mut chan, UNFUNDED_CHANNEL).1); + return Err(self.convert_unfunded_channel_err_internal(err, &mut chan).1); } } } match peer_state.channel_by_id.entry(funded_channel_id) { @@ -12506,7 +12503,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ debug_assert!(false); let reason = shutdown.closure_reason.clone(); let err = ChannelError::Close((reason.to_string(), reason)); - convert_channel_err!(self, peer_state, err, chan, UNFUNDED_CHANNEL) + self.convert_unfunded_channel_err_internal(err, chan) }; debug_assert!(remove); shutdown_results.push((Err(err), *cp_id)); From ee426703af6bd09dba258f77a8b6835397c1fa55 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 09:53:27 +0100 Subject: [PATCH 33/42] Convert macro to convert_channel_err method --- lightning/src/ln/channelmanager.rs | 157 +++++++++++++++++++++-------- 1 file changed, 114 insertions(+), 43 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5e1fe59a383..5fd17648034 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3597,40 +3597,17 @@ fn convert_channel_err_internal< } } -/// When a channel is removed, two things need to happen: -/// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, -/// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except -/// [`ChannelManager::total_consistency_lock`]), which then calls -/// [`ChannelManager::finish_close_channel`]. -/// -/// Note that this step can be skipped if the channel was never opened (through the creation of a -/// [`ChannelMonitor`]/channel funding transaction) to begin with. -/// -/// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped -/// error)`, except in the `COOP_CLOSE` case, where the bool is elided (it is always implicitly -/// true). -#[rustfmt::skip] -macro_rules! convert_channel_err { - ($self: ident, $peer_state: expr, $err: expr, $channel: expr) => { - match $channel.as_funded_mut() { - Some(funded_channel) => { - let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; - let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; - $self.convert_funded_channel_err_internal(closed_update_ids, in_flight_updates, None, $err, funded_channel) - }, - None => { - $self.convert_unfunded_channel_err_internal($err, $channel) - }, - } - }; -} - macro_rules! break_channel_entry { ($self: ident, $peer_state: expr, $res: expr, $entry: expr) => { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut()); + let (drop, res) = $self.convert_channel_err( + &mut $peer_state.closed_channel_monitor_update_ids, + &mut $peer_state.in_flight_monitor_updates, + e, + $entry.get_mut(), + ); if drop { $entry.remove_entry(); } @@ -3645,7 +3622,12 @@ macro_rules! try_channel_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut()); + let (drop, res) = $self.convert_channel_err( + &mut $peer_state.closed_channel_monitor_update_ids, + &mut $peer_state.in_flight_monitor_updates, + e, + $entry.get_mut(), + ); if drop { $entry.remove_entry(); } @@ -4062,6 +4044,34 @@ where ) } + /// When a channel that can be funded or unfunded is removed, two things need to happen: + /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + /// + /// Note that this step can be skipped if the channel was never opened (through the creation of a + /// [`ChannelMonitor`]/channel funding transaction) to begin with. + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + fn convert_channel_err( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + err: ChannelError, channel: &mut Channel, + ) -> (bool, MsgHandleErrInternal) { + match channel.as_funded_mut() { + Some(funded_channel) => self.convert_funded_channel_err_internal( + closed_update_ids, + in_flight_updates, + None, + err, + funded_channel, + ), + None => self.convert_unfunded_channel_err_internal(err, channel), + } + } + /// Gets the current [`UserConfig`] which controls some global behavior and includes the /// default configuration applied to all new channels. pub fn get_current_config(&self) -> UserConfig { @@ -4405,7 +4415,13 @@ where let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, mut e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); + e.dont_send_error_message(); shutdown_result = Err(e); } @@ -4538,7 +4554,7 @@ where } /// When a channel is removed, two things need to happen: - /// (a) [`convert_channel_err`] must be called in the same `per_peer_state` lock as the + /// (a) [`ChannelManager::convert_channel_err`] must be called in the same `per_peer_state` lock as the /// channel-closing action, /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except /// [`ChannelManager::total_consistency_lock`]), which then calls this. @@ -4590,7 +4606,12 @@ where if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) { let reason = ClosureReason::FundingBatchClosure; let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); shutdown_results.push((Err(e), counterparty_node_id)); } } @@ -4666,7 +4687,12 @@ where if let Some(mut chan) = peer_state.channel_by_id.remove(channel_id) { log_error!(logger, "Force-closing channel"); let err = ChannelError::Close((message, reason)); - let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, mut e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); mem::drop(peer_state_lock); mem::drop(per_peer_state); if is_from_counterparty { @@ -6444,7 +6470,12 @@ where let err = ChannelError::Close((e.clone(), reason)); let peer_state = &mut *peer_state_lock; let (_, e) = - convert_channel_err!(self, peer_state, err, &mut chan); + self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); shutdown_results.push((Err(e), counterparty_node_id)); }); } @@ -8283,7 +8314,12 @@ where let reason = ClosureReason::FundingTimedOut; let msg = "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(); let err = ChannelError::Close((msg, reason)); - let (_, e) = convert_channel_err!(self, peer_state, err, chan); + let (_, e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + chan, + ); handle_errors.push((Err(e), counterparty_node_id)); false } else { @@ -10481,14 +10517,24 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // concerning this channel as it is safe to do so. debug_assert!(matches!(err, ChannelError::Close(_))); let mut chan = Channel::from(inbound_chan); - return Err(convert_channel_err!(self, peer_state, err, &mut chan).1); + return Err(self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ).1); }, } }, Some(Err(mut chan)) => { let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id); let err = ChannelError::close(err_msg); - return Err(convert_channel_err!(self, peer_state, err, &mut chan).1); + return Err(self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ).1); }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) }; @@ -11116,7 +11162,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, mut e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); e.dont_send_error_message(); return Err(e); }, @@ -12272,7 +12323,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); failed_channels.push((Err(e), counterparty_node_id)); } } @@ -12288,7 +12344,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let reason = ClosureReason::CommitmentTxConfirmed; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); failed_channels.push((Err(e), counterparty_node_id)); } } @@ -12485,7 +12546,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ _ => match unblock_chan(chan, &mut peer_state.pending_msg_events) { Ok(shutdown_result) => shutdown_result, Err(err) => { - let (_, err) = convert_channel_err!(self, peer_state, err, chan); + let (_, err) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + chan, + ); shutdown_results.push((Err(err), *cp_id)); return false; }, @@ -13930,7 +13996,12 @@ where // Clean up for removal. let reason = ClosureReason::DisconnectedPeer; let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = convert_channel_err!(self, peer_state, err, chan); + let (_, e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + chan, + ); failed_channels.push((Err(e), counterparty_node_id)); false }); From 7fe270b069b74d8cde53bf07f22bd5dd22b385a1 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 10:28:49 +0100 Subject: [PATCH 34/42] Remove rustfmt::skip from touched methods --- lightning/src/ln/channelmanager.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5fd17648034..c78a469100e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4364,7 +4364,6 @@ where .collect() } - #[rustfmt::skip] fn close_channel_internal(&self, chan_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -4558,7 +4557,6 @@ where /// channel-closing action, /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except /// [`ChannelManager::total_consistency_lock`]), which then calls this. - #[rustfmt::skip] fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) { debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); #[cfg(debug_assertions)] @@ -4668,7 +4666,6 @@ where /// `peer_msg` should be set when we receive a message from a peer, but not set when the /// user closes, which will be re-exposed as the `ChannelClosed` reason. - #[rustfmt::skip] fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, reason: ClosureReason) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -6354,7 +6351,6 @@ where self.batch_funding_transaction_generated_intern(temporary_channels, funding_type) } - #[rustfmt::skip] fn batch_funding_transaction_generated_intern(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType) -> Result<(), APIError> { let mut result = Ok(()); if let FundingType::Checked(funding_transaction) | @@ -10490,7 +10486,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(()) } - #[rustfmt::skip] fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { let best_block = *self.best_block.read().unwrap(); @@ -12443,7 +12438,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// attempted in every channel, or in the specifically provided channel. /// /// [`ChannelSigner`]: crate::sign::ChannelSigner - #[rustfmt::skip] pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -12588,7 +12582,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// Check whether any channels have finished removing all pending updates after a shutdown /// exchange and can now send a closing_signed. /// Returns whether any closing_signed messages were generated. - #[rustfmt::skip] fn maybe_generate_initial_closing_signed(&self) -> bool { let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new(); let mut has_update = false; @@ -14580,7 +14573,6 @@ where /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by /// the function. - #[rustfmt::skip] fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> (&self, height_opt: Option, f: FN) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called From d436cbf5e11289c41d59fc0eb3d5c6e4e54b5179 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 10:30:50 +0100 Subject: [PATCH 35/42] Rustfmt touched methods --- lightning/src/ln/channelmanager.rs | 557 ++++++++++++++++++----------- 1 file changed, 353 insertions(+), 204 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c78a469100e..4a2ebf730f5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4364,7 +4364,11 @@ where .collect() } - fn close_channel_internal(&self, chan_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { + fn close_channel_internal( + &self, chan_id: &ChannelId, counterparty_node_id: &PublicKey, + target_feerate_sats_per_1000_weight: Option, + override_shutdown_script: Option, + ) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new(); @@ -4390,8 +4394,12 @@ where if let Some(chan) = chan_entry.get_mut().as_funded_mut() { let funding_txo_opt = chan.funding.get_funding_txo(); let their_features = &peer_state.latest_features; - let (shutdown_msg, mut monitor_update_opt, htlcs) = - chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; + let (shutdown_msg, mut monitor_update_opt, htlcs) = chan.get_shutdown( + &self.signer_provider, + their_features, + target_feerate_sats_per_1000_weight, + override_shutdown_script, + )?; failed_htlcs = htlcs; // We can send the `shutdown` message before updating the `ChannelMonitor` @@ -4402,13 +4410,22 @@ where msg: shutdown_msg, }); - debug_assert!(monitor_update_opt.is_none() || !chan.is_shutdown(), - "We can't both complete shutdown and generate a monitor update"); + debug_assert!( + monitor_update_opt.is_none() || !chan.is_shutdown(), + "We can't both complete shutdown and generate a monitor update" + ); // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt.take() { - handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan); + handle_new_monitor_update!( + self, + funding_txo_opt.unwrap(), + monitor_update, + peer_state_lock, + peer_state, + per_peer_state, + chan + ); } } else { let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; @@ -4430,7 +4447,7 @@ where err: format!( "Channel with id {} not found for the passed counterparty node_id {}", chan_id, counterparty_node_id, - ) + ), }); }, } @@ -4439,7 +4456,10 @@ where for htlc_source in failed_htlcs.drain(..) { let failure_reason = LocalHTLCFailureReason::ChannelClosed; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(*counterparty_node_id), channel_id: *chan_id }; + let receiver = HTLCHandlingFailureType::Forward { + node_id: Some(*counterparty_node_id), + channel_id: *chan_id, + }; let (source, hash) = htlc_source; self.fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None); } @@ -4565,21 +4585,36 @@ where } let logger = WithContext::from( - &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None + &self.logger, + Some(shutdown_res.counterparty_node_id), + Some(shutdown_res.channel_id), + None, ); - log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail", - shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len()); + log_debug!( + logger, + "Finishing closure of channel due to {} with {} HTLCs to fail", + shutdown_res.closure_reason, + shutdown_res.dropped_outbound_htlcs.len() + ); for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let failure_reason = LocalHTLCFailureReason::ChannelClosed; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { + node_id: Some(counterparty_node_id), + channel_id, + }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver, None); } if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update { debug_assert!(false, "This should have been handled in `convert_channel_err`"); - self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update); + self.apply_post_close_monitor_update( + shutdown_res.counterparty_node_id, + shutdown_res.channel_id, + funding_txo, + monitor_update, + ); } if self.background_events_processed_since_startup.load(Ordering::Acquire) { // If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are @@ -4588,7 +4623,11 @@ where // TODO: If we do the `in_flight_monitor_updates.is_empty()` check in // `convert_channel_err` we can skip the locks here. if shutdown_res.channel_funding_txo.is_some() { - self.channel_monitor_updated(&shutdown_res.channel_id, None, &shutdown_res.counterparty_node_id); + self.channel_monitor_updated( + &shutdown_res.channel_id, + None, + &shutdown_res.counterparty_node_id, + ); } } let mut shutdown_results: Vec<(Result, _)> = Vec::new(); @@ -4613,7 +4652,8 @@ where shutdown_results.push((Err(e), counterparty_node_id)); } } - has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state)); + has_uncompleted_channel = + Some(has_uncompleted_channel.map_or(!state, |v| v || !state)); } debug_assert!( has_uncompleted_channel.unwrap_or(true), @@ -4623,26 +4663,32 @@ where { let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push_back((events::Event::ChannelClosed { - channel_id: shutdown_res.channel_id, - user_channel_id: shutdown_res.user_channel_id, - reason: shutdown_res.closure_reason, - counterparty_node_id: Some(shutdown_res.counterparty_node_id), - channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis), - channel_funding_txo: shutdown_res.channel_funding_txo, - last_local_balance_msat: Some(shutdown_res.last_local_balance_msat), - }, None)); - - if let Some(splice_funding_failed) = shutdown_res.splice_funding_failed.take() { - pending_events.push_back((events::Event::SpliceFailed { + pending_events.push_back(( + events::Event::ChannelClosed { channel_id: shutdown_res.channel_id, - counterparty_node_id: shutdown_res.counterparty_node_id, user_channel_id: shutdown_res.user_channel_id, - abandoned_funding_txo: splice_funding_failed.funding_txo, - channel_type: splice_funding_failed.channel_type, - contributed_inputs: splice_funding_failed.contributed_inputs, - contributed_outputs: splice_funding_failed.contributed_outputs, - }, None)); + reason: shutdown_res.closure_reason, + counterparty_node_id: Some(shutdown_res.counterparty_node_id), + channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis), + channel_funding_txo: shutdown_res.channel_funding_txo, + last_local_balance_msat: Some(shutdown_res.last_local_balance_msat), + }, + None, + )); + + if let Some(splice_funding_failed) = shutdown_res.splice_funding_failed.take() { + pending_events.push_back(( + events::Event::SpliceFailed { + channel_id: shutdown_res.channel_id, + counterparty_node_id: shutdown_res.counterparty_node_id, + user_channel_id: shutdown_res.user_channel_id, + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type, + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, + None, + )); } if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx { @@ -4652,11 +4698,15 @@ where .expect("We had an unbroadcasted funding tx, so should also have had a funding outpoint"), } } else { - FundingInfo::Tx{ transaction } + FundingInfo::Tx { transaction } }; - pending_events.push_back((events::Event::DiscardFunding { - channel_id: shutdown_res.channel_id, funding_info - }, None)); + pending_events.push_back(( + events::Event::DiscardFunding { + channel_id: shutdown_res.channel_id, + funding_info, + }, + None, + )); } } for (err, counterparty_node_id) in shutdown_results.drain(..) { @@ -4666,11 +4716,17 @@ where /// `peer_msg` should be set when we receive a message from a peer, but not set when the /// user closes, which will be re-exposed as the `ChannelClosed` reason. - fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, reason: ClosureReason) - -> Result<(), APIError> { + fn force_close_channel_with_peer( + &self, channel_id: &ChannelId, peer_node_id: &PublicKey, reason: ClosureReason, + ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(peer_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?; + let peer_state_mutex = + per_peer_state.get(peer_node_id).ok_or_else(|| APIError::ChannelUnavailable { + err: format!( + "Can't find a peer matching the passed counterparty node_id {}", + peer_node_id + ), + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None); @@ -4702,21 +4758,24 @@ where } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() { log_error!(logger, "Force-closing inbound channel request"); if !is_from_counterparty && peer_state.is_connected { - peer_state.pending_msg_events.push( - MessageSendEvent::HandleError { - node_id: *peer_node_id, - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: *channel_id, data: message } - }, - } - ); + peer_state.pending_msg_events.push(MessageSendEvent::HandleError { + node_id: *peer_node_id, + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: *channel_id, data: message }, + }, + }); } // N.B. that we don't send any channel close event here: we // don't have a user_channel_id, and we never sent any opening // events anyway. Ok(()) } else { - Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) }) + Err(APIError::ChannelUnavailable { + err: format!( + "Channel with id {} not found for the passed counterparty node_id {}", + channel_id, peer_node_id + ), + }) } } @@ -6351,16 +6410,20 @@ where self.batch_funding_transaction_generated_intern(temporary_channels, funding_type) } - fn batch_funding_transaction_generated_intern(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType) -> Result<(), APIError> { + fn batch_funding_transaction_generated_intern( + &self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType, + ) -> Result<(), APIError> { let mut result = Ok(()); - if let FundingType::Checked(funding_transaction) | - FundingType::CheckedManualBroadcast(funding_transaction) = &funding + if let FundingType::Checked(funding_transaction) + | FundingType::CheckedManualBroadcast(funding_transaction) = &funding { if !funding_transaction.is_coinbase() { for inp in funding_transaction.input.iter() { if inp.witness.is_empty() { result = result.and(Err(APIError::APIMisuseError { - err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned() + err: + "Funding transaction must be fully signed and spend Segwit outputs" + .to_owned(), })); } } @@ -6368,7 +6431,8 @@ where if funding_transaction.output.len() > u16::max_value() as usize { result = result.and(Err(APIError::APIMisuseError { - err: "Transaction had more than 2^16 outputs, which is not supported".to_owned() + err: "Transaction had more than 2^16 outputs, which is not supported" + .to_owned(), })); } let height = self.best_block.read().unwrap().height; @@ -6376,97 +6440,109 @@ where // lower than the next block height. However, the modules constituting our Lightning // node might not have perfect sync about their blockchain views. Thus, if the wallet // module is ahead of LDK, only allow one more block of headroom. - if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && - funding_transaction.lock_time.is_block_height() && - funding_transaction.lock_time.to_consensus_u32() > height + 1 + if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) + && funding_transaction.lock_time.is_block_height() + && funding_transaction.lock_time.to_consensus_u32() > height + 1 { result = result.and(Err(APIError::APIMisuseError { - err: "Funding transaction absolute timelock is non-final".to_owned() + err: "Funding transaction absolute timelock is non-final".to_owned(), })); } } let txid = funding.txid(); let is_batch_funding = temporary_channels.len() > 1; - let mut funding_batch_states = if is_batch_funding { - Some(self.funding_batch_states.lock().unwrap()) - } else { - None - }; - let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| { - match states.entry(txid) { - btree_map::Entry::Occupied(_) => { - result = result.clone().and(Err(APIError::APIMisuseError { - err: "Batch funding transaction with the same txid already exists".to_owned() - })); - None - }, - btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())), - } + let mut funding_batch_states = + if is_batch_funding { Some(self.funding_batch_states.lock().unwrap()) } else { None }; + let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| match states + .entry(txid) + { + btree_map::Entry::Occupied(_) => { + result = result.clone().and(Err(APIError::APIMisuseError { + err: "Batch funding transaction with the same txid already exists".to_owned(), + })); + None + }, + btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())), }); let is_manual_broadcast = funding.is_manual_broadcast(); for &(temporary_channel_id, counterparty_node_id) in temporary_channels { - result = result.and_then(|_| self.funding_transaction_generated_intern( - *temporary_channel_id, - *counterparty_node_id, - funding.transaction_or_dummy(), - is_batch_funding, - |chan| { - let mut output_index = None; - let expected_spk = chan.funding.get_funding_redeemscript().to_p2wsh(); - let outpoint = match &funding { - FundingType::Checked(tx) | FundingType::CheckedManualBroadcast(tx) => { - for (idx, outp) in tx.output.iter().enumerate() { - if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.funding.get_value_satoshis() { - if output_index.is_some() { - return Err("Multiple outputs matched the expected script and value"); + result = result.and_then(|_| { + self.funding_transaction_generated_intern( + *temporary_channel_id, + *counterparty_node_id, + funding.transaction_or_dummy(), + is_batch_funding, + |chan| { + let mut output_index = None; + let expected_spk = chan.funding.get_funding_redeemscript().to_p2wsh(); + let outpoint = match &funding { + FundingType::Checked(tx) | FundingType::CheckedManualBroadcast(tx) => { + for (idx, outp) in tx.output.iter().enumerate() { + if outp.script_pubkey == expected_spk + && outp.value.to_sat() == chan.funding.get_value_satoshis() + { + if output_index.is_some() { + return Err("Multiple outputs matched the expected script and value"); + } + output_index = Some(idx as u16); } - output_index = Some(idx as u16); } - } - if output_index.is_none() { - return Err("No output matched the script_pubkey and value in the FundingGenerationReady event"); - } - OutPoint { txid, index: output_index.unwrap() } - }, - FundingType::Unchecked(outpoint) => outpoint.clone(), - }; - if let Some(funding_batch_state) = funding_batch_state.as_mut() { - // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably - // need to fix this somehow to not rely on using the outpoint for the channel ID if we - // want to support V2 batching here as well. - funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false)); - } - Ok(outpoint) - }, - is_manual_broadcast) - ); + if output_index.is_none() { + return Err("No output matched the script_pubkey and value in the FundingGenerationReady event"); + } + OutPoint { txid, index: output_index.unwrap() } + }, + FundingType::Unchecked(outpoint) => outpoint.clone(), + }; + if let Some(funding_batch_state) = funding_batch_state.as_mut() { + // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably + // need to fix this somehow to not rely on using the outpoint for the channel ID if we + // want to support V2 batching here as well. + funding_batch_state.push(( + ChannelId::v1_from_funding_outpoint(outpoint), + *counterparty_node_id, + false, + )); + } + Ok(outpoint) + }, + is_manual_broadcast, + ) + }); } if let Err(ref e) = result { // Remaining channels need to be removed on any error. let e = format!("Error in transaction funding: {:?}", e); let mut channels_to_remove = Vec::new(); - channels_to_remove.extend(funding_batch_states.as_mut() - .and_then(|states| states.remove(&txid)) - .into_iter().flatten() - .map(|(chan_id, node_id, _state)| (chan_id, node_id)) - ); - channels_to_remove.extend(temporary_channels.iter() - .map(|(&chan_id, &node_id)| (chan_id, node_id)) + channels_to_remove.extend( + funding_batch_states + .as_mut() + .and_then(|states| states.remove(&txid)) + .into_iter() + .flatten() + .map(|(chan_id, node_id, _state)| (chan_id, node_id)), ); + channels_to_remove + .extend(temporary_channels.iter().map(|(&chan_id, &node_id)| (chan_id, node_id))); let mut shutdown_results: Vec<(Result, _)> = Vec::new(); { let per_peer_state = self.per_peer_state.read().unwrap(); for (channel_id, counterparty_node_id) in channels_to_remove { - per_peer_state.get(&counterparty_node_id) + per_peer_state + .get(&counterparty_node_id) .map(|peer_state_mutex| peer_state_mutex.lock().unwrap()) - .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state))) + .and_then(|mut peer_state| { + peer_state + .channel_by_id + .remove(&channel_id) + .map(|chan| (chan, peer_state)) + }) .map(|(mut chan, mut peer_state_lock)| { let reason = ClosureReason::ProcessingError { err: e.clone() }; let err = ChannelError::Close((e.clone(), reason)); let peer_state = &mut *peer_state_lock; - let (_, e) = - self.convert_channel_err( + let (_, e) = self.convert_channel_err( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -10486,7 +10562,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(()) } - fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { + fn internal_funding_created( + &self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated, + ) -> Result<(), MsgHandleErrInternal> { let best_block = *self.best_block.read().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); @@ -10536,16 +10614,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let funded_channel_id = chan.context.channel_id(); - macro_rules! fail_chan { ($err: expr) => { { - // Note that at this point we've filled in the funding outpoint on our channel, but its - // actually in conflict with another channel. Thus, if we call `convert_channel_err` - // immediately, we'll remove the existing channel from `outpoint_to_peer`. - // Thus, we must first unset the funding outpoint on the channel. - let err = ChannelError::close($err.to_owned()); - chan.unset_funding_info(); - let mut chan = Channel::from(chan); - return Err(self.convert_unfunded_channel_err_internal(err, &mut chan).1); - } } } + macro_rules! fail_chan { + ($err: expr) => {{ + // Note that at this point we've filled in the funding outpoint on our channel, but its + // actually in conflict with another channel. Thus, if we call `convert_channel_err` + // immediately, we'll remove the existing channel from `outpoint_to_peer`. + // Thus, we must first unset the funding outpoint on the channel. + let err = ChannelError::close($err.to_owned()); + chan.unset_funding_info(); + let mut chan = Channel::from(chan); + return Err(self.convert_unfunded_channel_err_internal(err, &mut chan).1); + }}; + } match peer_state.channel_by_id.entry(funded_channel_id) { hash_map::Entry::Occupied(_) => { @@ -10566,8 +10646,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(funded_chan) = e.insert(Channel::from(chan)).as_funded_mut() { - handle_initial_monitor!(self, persist_state, peer_state_lock, peer_state, - per_peer_state, funded_chan); + handle_initial_monitor!( + self, + persist_state, + peer_state_lock, + peer_state, + per_peer_state, + funded_chan + ); } else { unreachable!("This must be a funded channel as we just inserted it."); } @@ -10577,7 +10663,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated"); fail_chan!("Duplicate channel ID"); } - } + }, } } @@ -12442,48 +12528,46 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); // Returns whether we should remove this channel as it's just been closed. - let unblock_chan = |chan: &mut Channel, pending_msg_events: &mut Vec| -> Result, ChannelError> { + let unblock_chan = |chan: &mut Channel, + pending_msg_events: &mut Vec| + -> Result, ChannelError> { let channel_id = chan.context().channel_id(); let outbound_scid_alias = chan.context().outbound_scid_alias(); let logger = WithChannelContext::from(&self.logger, &chan.context(), None); let node_id = chan.context().get_counterparty_node_id(); - let cbp = |htlc_id| self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &node_id); + let cbp = |htlc_id| { + self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &node_id) + }; let msgs = chan.signer_maybe_unblocked(self.chain_hash, &&logger, cbp)?; if let Some(msgs) = msgs { if chan.context().is_connected() { if let Some(msg) = msgs.open_channel { - pending_msg_events.push(MessageSendEvent::SendOpenChannel { - node_id, - msg, - }); + pending_msg_events.push(MessageSendEvent::SendOpenChannel { node_id, msg }); } if let Some(msg) = msgs.funding_created { - pending_msg_events.push(MessageSendEvent::SendFundingCreated { - node_id, - msg, - }); + pending_msg_events + .push(MessageSendEvent::SendFundingCreated { node_id, msg }); } if let Some(msg) = msgs.accept_channel { - pending_msg_events.push(MessageSendEvent::SendAcceptChannel { - node_id, - msg, - }); + pending_msg_events + .push(MessageSendEvent::SendAcceptChannel { node_id, msg }); } - let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs { - node_id, - channel_id, - updates, - }); - let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK { - node_id, - msg, + let cu_msg = msgs.commitment_update.map(|updates| { + MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates } }); + let raa_msg = msgs + .revoke_and_ack + .map(|msg| MessageSendEvent::SendRevokeAndACK { node_id, msg }); match (cu_msg, raa_msg) { - (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::CommitmentFirst => { + (Some(cu), Some(raa)) + if msgs.order == RAACommitmentOrder::CommitmentFirst => + { pending_msg_events.push(cu); pending_msg_events.push(raa); }, - (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => { + (Some(cu), Some(raa)) + if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => + { pending_msg_events.push(raa); pending_msg_events.push(cu); }, @@ -12492,16 +12576,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ (_, _) => {}, } if let Some(msg) = msgs.funding_signed { - pending_msg_events.push(MessageSendEvent::SendFundingSigned { - node_id, - msg, - }); + pending_msg_events + .push(MessageSendEvent::SendFundingSigned { node_id, msg }); } if let Some(msg) = msgs.closing_signed { - pending_msg_events.push(MessageSendEvent::SendClosingSigned { - node_id, - msg, - }); + pending_msg_events + .push(MessageSendEvent::SendClosingSigned { node_id, msg }); } } if let Some(funded_chan) = chan.as_funded() { @@ -12529,7 +12609,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state_iter = per_peer_state.iter().filter(|(cp_id, _)| { if let Some((counterparty_node_id, _)) = channel_opt { **cp_id == counterparty_node_id - } else { true } + } else { + true + } }); for (cp_id, peer_state_mutex) in per_peer_state_iter { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -12541,7 +12623,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(shutdown_result) => shutdown_result, Err(err) => { let (_, err) = self.convert_channel_err( - &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, chan, @@ -12556,8 +12638,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let logger = WithChannelContext::from(&self.logger, context, None); log_trace!(logger, "Removing channel now that the signer is unblocked"); let (remove, err) = if let Some(funded) = chan.as_funded_mut() { - let err = - self.convert_channel_err_coop(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, shutdown, funded); + let err = self.convert_channel_err_coop( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + shutdown, + funded, + ); (true, err) } else { debug_assert!(false); @@ -12598,34 +12684,59 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } match chan.as_funded_mut() { Some(funded_chan) => { - let logger = WithChannelContext::from(&self.logger, &funded_chan.context, None); - match funded_chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) { + let logger = + WithChannelContext::from(&self.logger, &funded_chan.context, None); + match funded_chan + .maybe_propose_closing_signed(&self.fee_estimator, &&logger) + { Ok((msg_opt, tx_shutdown_result_opt)) => { if let Some(msg) = msg_opt { has_update = true; - pending_msg_events.push(MessageSendEvent::SendClosingSigned { - node_id: funded_chan.context.get_counterparty_node_id(), msg, - }); + pending_msg_events.push( + MessageSendEvent::SendClosingSigned { + node_id: funded_chan + .context + .get_counterparty_node_id(), + msg, + }, + ); } - debug_assert_eq!(tx_shutdown_result_opt.is_some(), funded_chan.is_shutdown()); + debug_assert_eq!( + tx_shutdown_result_opt.is_some(), + funded_chan.is_shutdown() + ); if let Some((tx, shutdown_res)) = tx_shutdown_result_opt { // We're done with this channel. We got a closing_signed and sent back // a closing_signed with a closing transaction to broadcast. - let err = self.convert_channel_err_coop(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, shutdown_res, funded_chan); + let err = self.convert_channel_err_coop( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + shutdown_res, + funded_chan, + ); handle_errors.push((*cp_id, Err(err))); log_info!(logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[&tx]); false - } else { true } + } else { + true + } }, Err(e) => { has_update = true; let (close_channel, res) = self.convert_channel_err_funded( - &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, e, funded_chan); - handle_errors.push((funded_chan.context.get_counterparty_node_id(), Err(res))); + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + e, + funded_chan, + ); + handle_errors.push(( + funded_chan.context.get_counterparty_node_id(), + Err(res), + )); !close_channel - } + }, } }, None => true, // Retain unfunded channels if present. @@ -14573,8 +14684,20 @@ where /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by /// the function. - fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> - (&self, height_opt: Option, f: FN) { + fn do_chain_event< + FN: Fn( + &mut FundedChannel, + ) -> Result< + ( + Option, + Vec<(HTLCSource, PaymentHash)>, + Option, + ), + ClosureReason, + >, + >( + &self, height_opt: Option, f: FN, + ) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // during initialization prior to the chain_monitor being fully configured in some cases. // See the docs for `ChannelManagerReadArgs` for more. @@ -14754,22 +14877,34 @@ where } if let Some(height) = height_opt { - self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| { - payment.htlcs.retain(|htlc| { - // If height is approaching the number of blocks we think it takes us to get - // our commitment transaction confirmed before the HTLC expires, plus the - // number of blocks we generally consider it to take to do a commitment update, - // just give up on it and fail the HTLC. - if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { - let reason = LocalHTLCFailureReason::PaymentClaimBuffer; - timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), - HTLCFailReason::reason(reason, invalid_payment_err_data(htlc.value, height)), - HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() })); - false - } else { true } - }); - !payment.htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. - }); + self.claimable_payments.lock().unwrap().claimable_payments.retain( + |payment_hash, payment| { + payment.htlcs.retain(|htlc| { + // If height is approaching the number of blocks we think it takes us to get + // our commitment transaction confirmed before the HTLC expires, plus the + // number of blocks we generally consider it to take to do a commitment update, + // just give up on it and fail the HTLC. + if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { + let reason = LocalHTLCFailureReason::PaymentClaimBuffer; + timed_out_htlcs.push(( + HTLCSource::PreviousHopData(htlc.prev_hop.clone()), + payment_hash.clone(), + HTLCFailReason::reason( + reason, + invalid_payment_err_data(htlc.value, height), + ), + HTLCHandlingFailureType::Receive { + payment_hash: payment_hash.clone(), + }, + )); + false + } else { + true + } + }); + !payment.htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. + }, + ); let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); intercepted_htlcs.retain(|_, htlc| { @@ -14779,15 +14914,29 @@ where PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, _ => unreachable!(), }; - timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash, - HTLCFailReason::from_failure_code(LocalHTLCFailureReason::ForwardExpiryBuffer), - HTLCHandlingFailureType::InvalidForward { requested_forward_scid })); + timed_out_htlcs.push(( + prev_hop_data, + htlc.forward_info.payment_hash, + HTLCFailReason::from_failure_code( + LocalHTLCFailureReason::ForwardExpiryBuffer, + ), + HTLCHandlingFailureType::InvalidForward { requested_forward_scid }, + )); let logger = WithContext::from( - &self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash) + &self.logger, + None, + Some(htlc.prev_channel_id), + Some(htlc.forward_info.payment_hash), + ); + log_trace!( + logger, + "Timing out intercepted HTLC with requested forward scid {}", + requested_forward_scid ); - log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid); false - } else { true } + } else { + true + } }); } From 6ff720b9f9b9fed39a951237a25675295ef50258 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 11 Dec 2025 15:52:25 +0000 Subject: [PATCH 36/42] Allow clippy's new assertions-on-constants lint This is really dumb, `assert!(cfg!(fuzzing))` is a perfectly reasonable thing to write! --- ci/check-lint.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/ci/check-lint.sh b/ci/check-lint.sh index c1f1b08a1e1..c0724267bf8 100755 --- a/ci/check-lint.sh +++ b/ci/check-lint.sh @@ -13,6 +13,7 @@ CLIPPY() { -A clippy::unwrap-or-default \ -A clippy::upper_case_acronyms \ -A clippy::swap-with-temporary \ + -A clippy::assertions-on-constants \ `# Things where we do odd stuff on purpose ` \ -A clippy::unusual_byte_groupings \ -A clippy::unit_arg \ From 3247fad63331df38dd4c514eff29cb5ce34affb6 Mon Sep 17 00:00:00 2001 From: elnosh Date: Thu, 11 Dec 2025 14:06:03 -0500 Subject: [PATCH 37/42] Convert send_channel_ready macro to method --- lightning/src/ln/channelmanager.rs | 51 ++++++++++++++++-------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 87f42a90fa7..bfaf1e68d6a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3201,24 +3201,6 @@ pub struct PhantomRouteHints { pub real_node_pubkey: PublicKey, } -macro_rules! send_channel_ready { - ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{ - if $channel.context.is_connected() { - $pending_msg_events.push(MessageSendEvent::SendChannelReady { - node_id: $channel.context.get_counterparty_node_id(), - msg: $channel_ready_msg, - }); - } - // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so - // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. - let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); - let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id())); - assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()), - "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); - insert_short_channel_id!(short_to_chan_info, $channel); - }} -} - macro_rules! insert_short_channel_id { ($short_to_chan_info: ident, $channel: expr) => {{ if let Some(real_scid) = $channel.funding.get_short_channel_id() { @@ -4091,6 +4073,29 @@ where } } + fn send_channel_ready( + &self, pending_msg_events: &mut Vec, channel: &FundedChannel, + channel_ready_msg: msgs::ChannelReady, + ) { + let counterparty_node_id = channel.context.get_counterparty_node_id(); + if channel.context.is_connected() { + pending_msg_events.push(MessageSendEvent::SendChannelReady { + node_id: counterparty_node_id, + msg: channel_ready_msg, + }); + } + // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so + // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. + let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); + let outbound_alias_insert = short_to_chan_info.insert( + channel.context.outbound_scid_alias(), + (counterparty_node_id, channel.context.channel_id()), + ); + assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == (counterparty_node_id, channel.context.channel_id()), + "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); + insert_short_channel_id!(short_to_chan_info, channel); + } + /// Gets the current [`UserConfig`] which controls some global behavior and includes the /// default configuration applied to all new channels. pub fn get_current_config(&self) -> UserConfig { @@ -9832,7 +9837,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if channel.context.is_connected() { if let ChannelReadyOrder::ChannelReadyFirst = channel_ready_order { if let Some(msg) = &channel_ready { - send_channel_ready!(self, pending_msg_events, channel, msg.clone()); + self.send_channel_ready(pending_msg_events, channel, msg.clone()); } if let Some(msg) = &announcement_sigs { @@ -9887,7 +9892,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let ChannelReadyOrder::SignaturesFirst = channel_ready_order { if let Some(msg) = channel_ready { - send_channel_ready!(self, pending_msg_events, channel, msg); + self.send_channel_ready(pending_msg_events, channel, msg); } if let Some(msg) = announcement_sigs { @@ -9898,7 +9903,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } } else if let Some(msg) = channel_ready { - send_channel_ready!(self, pending_msg_events, channel, msg); + self.send_channel_ready(pending_msg_events, channel, msg); } if let Some(tx) = funding_broadcastable { @@ -12598,7 +12603,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(funded_chan) = chan.as_funded() { if let Some(msg) = msgs.channel_ready { - send_channel_ready!(self, pending_msg_events, funded_chan, msg); + self.send_channel_ready(pending_msg_events, funded_chan, msg); } if let Some(broadcast_tx) = msgs.signed_closing_tx { log_info!(logger, "Broadcasting closing tx {}", log_tx!(broadcast_tx)); @@ -14740,7 +14745,7 @@ where let logger = WithChannelContext::from(&self.logger, &funded_channel.context, None); match funding_confirmed_opt { Some(FundingConfirmedMessage::Establishment(channel_ready)) => { - send_channel_ready!(self, pending_msg_events, funded_channel, channel_ready); + self.send_channel_ready(pending_msg_events, funded_channel, channel_ready); if funded_channel.context.is_usable() && peer_state.is_connected { log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty"); if let Ok((msg, _, _)) = self.get_channel_update_for_unicast(funded_channel) { From 7fb84e66d8c18898c9081e198abc0f74052b072a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 12 Dec 2025 08:32:14 +0100 Subject: [PATCH 38/42] Group channel closure methods together This commit is a pure move. --- lightning/src/ln/channelmanager.rs | 460 ++++++++++++++--------------- 1 file changed, 230 insertions(+), 230 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bfaf1e68d6a..af2408726ce 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3843,236 +3843,6 @@ where } } - /// Handles an error by closing the channel if required and generating peer messages. - fn handle_error( - &self, internal: Result, counterparty_node_id: PublicKey, - ) -> Result { - // In testing, ensure there are no deadlocks where the lock is already held upon - // entering the macro. - debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); - debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); - - internal.map_err(|err_internal| { - let mut msg_event = None; - - if let Some((shutdown_res, update_option)) = err_internal.shutdown_finish { - let counterparty_node_id = shutdown_res.counterparty_node_id; - let channel_id = shutdown_res.channel_id; - let logger = WithContext::from( - &self.logger, - Some(counterparty_node_id), - Some(channel_id), - None, - ); - log_error!(logger, "Closing channel: {}", err_internal.err.err); - - self.finish_close_channel(shutdown_res); - if let Some((update, node_id_1, node_id_2)) = update_option { - let mut pending_broadcast_messages = - self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { - msg: update, - node_id_1, - node_id_2, - }); - } - } else { - log_error!(self.logger, "Got non-closing error: {}", err_internal.err.err); - } - - if let msgs::ErrorAction::IgnoreError = err_internal.err.action { - if let Some(tx_abort) = err_internal.tx_abort { - msg_event = Some(MessageSendEvent::SendTxAbort { - node_id: counterparty_node_id, - msg: tx_abort, - }); - } - } else { - msg_event = Some(MessageSendEvent::HandleError { - node_id: counterparty_node_id, - action: err_internal.err.action.clone(), - }); - } - - if let Some(msg_event) = msg_event { - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let mut peer_state = peer_state_mutex.lock().unwrap(); - if peer_state.is_connected { - peer_state.pending_msg_events.push(msg_event); - } - } - } - - // Return error in case higher-API need one - err_internal.err - }) - } - - fn convert_funded_channel_err_internal( - &self, closed_channel_monitor_update_ids: &mut BTreeMap, - in_flight_monitor_updates: &mut BTreeMap)>, - coop_close_shutdown_res: Option, err: ChannelError, - chan: &mut FundedChannel, - ) -> (bool, MsgHandleErrInternal) { - let chan_id = chan.context.channel_id(); - convert_channel_err_internal(err, chan_id, |reason, msg| { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); - - let mut shutdown_res = if let Some(res) = coop_close_shutdown_res { - res - } else { - chan.force_shutdown(reason) - }; - let chan_update = self.get_channel_update_for_broadcast(chan).ok(); - - log_error!(logger, "Closed channel due to close-required error: {}", msg); - - if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { - handle_new_monitor_update_locked_actions_handled_by_caller!( - self, - funding_txo, - update, - in_flight_monitor_updates, - chan.context - ); - } - // If there's a possibility that we need to generate further monitor updates for this - // channel, we need to store the last update_id of it. However, we don't want to insert - // into the map (which prevents the `PeerState` from being cleaned up) for channels that - // never even got confirmations (which would open us up to DoS attacks). - let update_id = chan.context.get_latest_monitor_update_id(); - let funding_confirmed = chan.funding.get_funding_tx_confirmation_height().is_some(); - let chan_zero_conf = chan.context.minimum_depth(&chan.funding) == Some(0); - if funding_confirmed || chan_zero_conf || update_id > 1 { - closed_channel_monitor_update_ids.insert(chan_id, update_id); - } - let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); - if let Some(short_id) = chan.funding.get_short_channel_id() { - short_to_chan_info.remove(&short_id); - } else { - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let outbound_alias = chan.context.outbound_scid_alias(); - let alias_removed = - self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); - debug_assert!(alias_removed); - } - short_to_chan_info.remove(&chan.context.outbound_scid_alias()); - for scid in chan.context.historical_scids() { - short_to_chan_info.remove(scid); - } - - (shutdown_res, chan_update) - }) - } - - fn convert_unfunded_channel_err_internal( - &self, err: ChannelError, chan: &mut Channel, - ) -> (bool, MsgHandleErrInternal) - where - SP::Target: SignerProvider, - { - let chan_id = chan.context().channel_id(); - convert_channel_err_internal(err, chan_id, |reason, msg| { - let logger = WithChannelContext::from(&self.logger, chan.context(), None); - - let shutdown_res = chan.force_shutdown(reason); - log_error!(logger, "Closed channel due to close-required error: {}", msg); - self.short_to_chan_info.write().unwrap().remove(&chan.context().outbound_scid_alias()); - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let outbound_alias = chan.context().outbound_scid_alias(); - let alias_removed = self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); - debug_assert!(alias_removed); - (shutdown_res, None) - }) - } - - /// When a cooperatively closed channel is removed, two things need to happen: - /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, - /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except - /// [`ChannelManager::total_consistency_lock`]), which then calls - /// [`ChannelManager::finish_close_channel`]. - /// - /// Returns a mapped error. - fn convert_channel_err_coop( - &self, closed_update_ids: &mut BTreeMap, - in_flight_updates: &mut BTreeMap)>, - shutdown_result: ShutdownResult, funded_channel: &mut FundedChannel, - ) -> MsgHandleErrInternal { - let reason = - ChannelError::Close(("Coop Closed".to_owned(), shutdown_result.closure_reason.clone())); - let (close, mut err) = self.convert_funded_channel_err_internal( - closed_update_ids, - in_flight_updates, - Some(shutdown_result), - reason, - funded_channel, - ); - err.dont_send_error_message(); - debug_assert!(close); - err - } - - /// When a funded channel is removed, two things need to happen: - /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, - /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except - /// [`ChannelManager::total_consistency_lock`]), which then calls - /// [`ChannelManager::finish_close_channel`]. - /// - /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped - /// error)`. - fn convert_channel_err_funded( - &self, closed_update_ids: &mut BTreeMap, - in_flight_updates: &mut BTreeMap)>, - err: ChannelError, funded_channel: &mut FundedChannel, - ) -> (bool, MsgHandleErrInternal) { - self.convert_funded_channel_err_internal( - closed_update_ids, - in_flight_updates, - None, - err, - funded_channel, - ) - } - - /// When a channel that can be funded or unfunded is removed, two things need to happen: - /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, - /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except - /// [`ChannelManager::total_consistency_lock`]), which then calls - /// [`ChannelManager::finish_close_channel`]. - /// - /// Note that this step can be skipped if the channel was never opened (through the creation of a - /// [`ChannelMonitor`]/channel funding transaction) to begin with. - /// - /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped - /// error)`. - fn convert_channel_err( - &self, closed_update_ids: &mut BTreeMap, - in_flight_updates: &mut BTreeMap)>, - err: ChannelError, channel: &mut Channel, - ) -> (bool, MsgHandleErrInternal) { - match channel.as_funded_mut() { - Some(funded_channel) => self.convert_funded_channel_err_internal( - closed_update_ids, - in_flight_updates, - None, - err, - funded_channel, - ), - None => self.convert_unfunded_channel_err_internal(err, channel), - } - } - fn send_channel_ready( &self, pending_msg_events: &mut Vec, channel: &FundedChannel, channel_ready_msg: msgs::ChannelReady, @@ -4847,6 +4617,236 @@ where } } + /// Handles an error by closing the channel if required and generating peer messages. + fn handle_error( + &self, internal: Result, counterparty_node_id: PublicKey, + ) -> Result { + // In testing, ensure there are no deadlocks where the lock is already held upon + // entering the macro. + debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); + debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); + + internal.map_err(|err_internal| { + let mut msg_event = None; + + if let Some((shutdown_res, update_option)) = err_internal.shutdown_finish { + let counterparty_node_id = shutdown_res.counterparty_node_id; + let channel_id = shutdown_res.channel_id; + let logger = WithContext::from( + &self.logger, + Some(counterparty_node_id), + Some(channel_id), + None, + ); + log_error!(logger, "Closing channel: {}", err_internal.err.err); + + self.finish_close_channel(shutdown_res); + if let Some((update, node_id_1, node_id_2)) = update_option { + let mut pending_broadcast_messages = + self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { + msg: update, + node_id_1, + node_id_2, + }); + } + } else { + log_error!(self.logger, "Got non-closing error: {}", err_internal.err.err); + } + + if let msgs::ErrorAction::IgnoreError = err_internal.err.action { + if let Some(tx_abort) = err_internal.tx_abort { + msg_event = Some(MessageSendEvent::SendTxAbort { + node_id: counterparty_node_id, + msg: tx_abort, + }); + } + } else { + msg_event = Some(MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: err_internal.err.action.clone(), + }); + } + + if let Some(msg_event) = msg_event { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + if peer_state.is_connected { + peer_state.pending_msg_events.push(msg_event); + } + } + } + + // Return error in case higher-API need one + err_internal.err + }) + } + + fn convert_funded_channel_err_internal( + &self, closed_channel_monitor_update_ids: &mut BTreeMap, + in_flight_monitor_updates: &mut BTreeMap)>, + coop_close_shutdown_res: Option, err: ChannelError, + chan: &mut FundedChannel, + ) -> (bool, MsgHandleErrInternal) { + let chan_id = chan.context.channel_id(); + convert_channel_err_internal(err, chan_id, |reason, msg| { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); + + let mut shutdown_res = if let Some(res) = coop_close_shutdown_res { + res + } else { + chan.force_shutdown(reason) + }; + let chan_update = self.get_channel_update_for_broadcast(chan).ok(); + + log_error!(logger, "Closed channel due to close-required error: {}", msg); + + if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { + handle_new_monitor_update_locked_actions_handled_by_caller!( + self, + funding_txo, + update, + in_flight_monitor_updates, + chan.context + ); + } + // If there's a possibility that we need to generate further monitor updates for this + // channel, we need to store the last update_id of it. However, we don't want to insert + // into the map (which prevents the `PeerState` from being cleaned up) for channels that + // never even got confirmations (which would open us up to DoS attacks). + let update_id = chan.context.get_latest_monitor_update_id(); + let funding_confirmed = chan.funding.get_funding_tx_confirmation_height().is_some(); + let chan_zero_conf = chan.context.minimum_depth(&chan.funding) == Some(0); + if funding_confirmed || chan_zero_conf || update_id > 1 { + closed_channel_monitor_update_ids.insert(chan_id, update_id); + } + let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); + if let Some(short_id) = chan.funding.get_short_channel_id() { + short_to_chan_info.remove(&short_id); + } else { + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let outbound_alias = chan.context.outbound_scid_alias(); + let alias_removed = + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); + debug_assert!(alias_removed); + } + short_to_chan_info.remove(&chan.context.outbound_scid_alias()); + for scid in chan.context.historical_scids() { + short_to_chan_info.remove(scid); + } + + (shutdown_res, chan_update) + }) + } + + fn convert_unfunded_channel_err_internal( + &self, err: ChannelError, chan: &mut Channel, + ) -> (bool, MsgHandleErrInternal) + where + SP::Target: SignerProvider, + { + let chan_id = chan.context().channel_id(); + convert_channel_err_internal(err, chan_id, |reason, msg| { + let logger = WithChannelContext::from(&self.logger, chan.context(), None); + + let shutdown_res = chan.force_shutdown(reason); + log_error!(logger, "Closed channel due to close-required error: {}", msg); + self.short_to_chan_info.write().unwrap().remove(&chan.context().outbound_scid_alias()); + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let outbound_alias = chan.context().outbound_scid_alias(); + let alias_removed = self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); + debug_assert!(alias_removed); + (shutdown_res, None) + }) + } + + /// When a cooperatively closed channel is removed, two things need to happen: + /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + /// + /// Returns a mapped error. + fn convert_channel_err_coop( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + shutdown_result: ShutdownResult, funded_channel: &mut FundedChannel, + ) -> MsgHandleErrInternal { + let reason = + ChannelError::Close(("Coop Closed".to_owned(), shutdown_result.closure_reason.clone())); + let (close, mut err) = self.convert_funded_channel_err_internal( + closed_update_ids, + in_flight_updates, + Some(shutdown_result), + reason, + funded_channel, + ); + err.dont_send_error_message(); + debug_assert!(close); + err + } + + /// When a funded channel is removed, two things need to happen: + /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + fn convert_channel_err_funded( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + err: ChannelError, funded_channel: &mut FundedChannel, + ) -> (bool, MsgHandleErrInternal) { + self.convert_funded_channel_err_internal( + closed_update_ids, + in_flight_updates, + None, + err, + funded_channel, + ) + } + + /// When a channel that can be funded or unfunded is removed, two things need to happen: + /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + /// + /// Note that this step can be skipped if the channel was never opened (through the creation of a + /// [`ChannelMonitor`]/channel funding transaction) to begin with. + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + fn convert_channel_err( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + err: ChannelError, channel: &mut Channel, + ) -> (bool, MsgHandleErrInternal) { + match channel.as_funded_mut() { + Some(funded_channel) => self.convert_funded_channel_err_internal( + closed_update_ids, + in_flight_updates, + None, + err, + funded_channel, + ), + None => self.convert_unfunded_channel_err_internal(err, channel), + } + } + /// Initiate a splice in order to add value to (splice-in) or remove value from (splice-out) /// the channel. This will spend the channel's funding transaction output, effectively replacing /// it with a new one. From c7d1ba70eb84107ebb89da15acbc404e80148bf9 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 12 Dec 2025 17:25:45 +0100 Subject: [PATCH 39/42] LDK Node Integration CI: Also patch LDK dependencies if `git` By now, we switched our LDK Node `main` to a specific commit on LDK's `main`. Since we don't have the `crates.io` dependencies in the `Cargo.toml`, the patch command won't actually do anything but silently fail, i.e., *not* check the PR changes against the LDK Node main branch. Here we fix this by also patching the git repository path. --- .github/workflows/ldk-node-integration.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/ldk-node-integration.yml b/.github/workflows/ldk-node-integration.yml index 136a60bd98a..446abd40a07 100644 --- a/.github/workflows/ldk-node-integration.yml +++ b/.github/workflows/ldk-node-integration.yml @@ -39,6 +39,19 @@ jobs: lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } lightning-macros = { path = "../rust-lightning/lightning-macros" } + + [patch."https://github.com/lightningdevkit/rust-lightning"] + lightning = { path = "../rust-lightning/lightning" } + lightning-types = { path = "../rust-lightning/lightning-types" } + lightning-invoice = { path = "../rust-lightning/lightning-invoice" } + lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } + lightning-persister = { path = "../rust-lightning/lightning-persister" } + lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } + lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } + lightning-block-sync = { path = "../rust-lightning/lightning-block-sync" } + lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } + lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } + lightning-macros = { path = "../rust-lightning/lightning-macros" } EOF cargo check cargo check --features uniffi From 275b00629036cbe8193dbb3a16ff8ec94f927d34 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 12 Dec 2025 08:41:49 +0100 Subject: [PATCH 40/42] Rename convert_err methods Make the names more descriptive and link shared documentation. --- lightning/src/ln/channelmanager.rs | 124 ++++++++++++++++------------- 1 file changed, 68 insertions(+), 56 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index af2408726ce..a1bf543ebf8 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3584,7 +3584,7 @@ macro_rules! break_channel_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = $self.convert_channel_err( + let (drop, res) = $self.locked_handle_force_close( &mut $peer_state.closed_channel_monitor_update_ids, &mut $peer_state.in_flight_monitor_updates, e, @@ -3604,7 +3604,7 @@ macro_rules! try_channel_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = $self.convert_channel_err( + let (drop, res) = $self.locked_handle_force_close( &mut $peer_state.closed_channel_monitor_update_ids, &mut $peer_state.in_flight_monitor_updates, e, @@ -4225,7 +4225,7 @@ where let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = self.convert_channel_err( + let (_, mut e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -4367,8 +4367,11 @@ where } /// When a channel is removed, two things need to happen: - /// (a) [`ChannelManager::convert_channel_err`] must be called in the same `per_peer_state` lock as the - /// channel-closing action, + /// (a) Handle the initial within-lock closure for the channel via one of the following methods: + /// [`ChannelManager::locked_handle_unfunded_close`], + /// [`ChannelManager::locked_handle_funded_coop_close`], + /// [`ChannelManager::locked_handle_funded_force_close`] or + /// [`ChannelManager::locked_handle_force_close`]. /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except /// [`ChannelManager::total_consistency_lock`]), which then calls this. fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) { @@ -4437,7 +4440,7 @@ where if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) { let reason = ClosureReason::FundingBatchClosure; let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = self.convert_channel_err( + let (_, e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -4534,7 +4537,7 @@ where if let Some(mut chan) = peer_state.channel_by_id.remove(channel_id) { log_error!(logger, "Force-closing channel"); let err = ChannelError::Close((message, reason)); - let (_, mut e) = self.convert_channel_err( + let (_, mut e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -4683,7 +4686,12 @@ where }) } - fn convert_funded_channel_err_internal( + /// Handle the initial within-lock closure for a funded channel that is either force-closed or cooperatively + /// closed (as indicated by `coop_close_shutdown_res`). + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + fn locked_handle_funded_close_internal( &self, closed_channel_monitor_update_ids: &mut BTreeMap, in_flight_monitor_updates: &mut BTreeMap)>, coop_close_shutdown_res: Option, err: ChannelError, @@ -4745,7 +4753,13 @@ where }) } - fn convert_unfunded_channel_err_internal( + /// Handle the initial within-lock closure for an unfunded channel. + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + /// + /// The same closure semantics as described in [`ChannelManager::locked_handle_force_close`] apply. + fn locked_handle_unfunded_close( &self, err: ChannelError, chan: &mut Channel, ) -> (bool, MsgHandleErrInternal) where @@ -4771,21 +4785,19 @@ where }) } - /// When a cooperatively closed channel is removed, two things need to happen: - /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, - /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except - /// [`ChannelManager::total_consistency_lock`]), which then calls - /// [`ChannelManager::finish_close_channel`]. + /// Handle the initial within-lock closure for a channel that is cooperatively closed. /// /// Returns a mapped error. - fn convert_channel_err_coop( + /// + /// The same closure semantics as described in [`ChannelManager::locked_handle_force_close`] apply. + fn locked_handle_funded_coop_close( &self, closed_update_ids: &mut BTreeMap, in_flight_updates: &mut BTreeMap)>, shutdown_result: ShutdownResult, funded_channel: &mut FundedChannel, ) -> MsgHandleErrInternal { let reason = ChannelError::Close(("Coop Closed".to_owned(), shutdown_result.closure_reason.clone())); - let (close, mut err) = self.convert_funded_channel_err_internal( + let (close, mut err) = self.locked_handle_funded_close_internal( closed_update_ids, in_flight_updates, Some(shutdown_result), @@ -4797,20 +4809,18 @@ where err } - /// When a funded channel is removed, two things need to happen: - /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, - /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except - /// [`ChannelManager::total_consistency_lock`]), which then calls - /// [`ChannelManager::finish_close_channel`]. + /// Handle the initial within-lock closure for a funded channel that is force-closed. /// /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped /// error)`. - fn convert_channel_err_funded( + /// + /// The same closure semantics as described in [`ChannelManager::locked_handle_force_close`] apply. + fn locked_handle_funded_force_close( &self, closed_update_ids: &mut BTreeMap, in_flight_updates: &mut BTreeMap)>, err: ChannelError, funded_channel: &mut FundedChannel, ) -> (bool, MsgHandleErrInternal) { - self.convert_funded_channel_err_internal( + self.locked_handle_funded_close_internal( closed_update_ids, in_flight_updates, None, @@ -4819,31 +4829,32 @@ where ) } - /// When a channel that can be funded or unfunded is removed, two things need to happen: - /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, - /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except - /// [`ChannelManager::total_consistency_lock`]), which then calls - /// [`ChannelManager::finish_close_channel`]. - /// - /// Note that this step can be skipped if the channel was never opened (through the creation of a - /// [`ChannelMonitor`]/channel funding transaction) to begin with. + /// Handle the initial within-lock closure for a channel that is force-closed. /// /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped /// error)`. - fn convert_channel_err( + /// + /// # Closure semantics + /// + /// Two things need to happen: + /// (a) This method must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + fn locked_handle_force_close( &self, closed_update_ids: &mut BTreeMap, in_flight_updates: &mut BTreeMap)>, err: ChannelError, channel: &mut Channel, ) -> (bool, MsgHandleErrInternal) { match channel.as_funded_mut() { - Some(funded_channel) => self.convert_funded_channel_err_internal( + Some(funded_channel) => self.locked_handle_funded_close_internal( closed_update_ids, in_flight_updates, None, err, funded_channel, ), - None => self.convert_unfunded_channel_err_internal(err, channel), + None => self.locked_handle_unfunded_close(err, channel), } } @@ -6566,7 +6577,7 @@ where let reason = ClosureReason::ProcessingError { err: e.clone() }; let err = ChannelError::Close((e.clone(), reason)); let peer_state = &mut *peer_state_lock; - let (_, e) = self.convert_channel_err( + let (_, e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -8333,7 +8344,7 @@ where if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } if let Err(e) = funded_chan.timer_check_closing_negotiation_progress() { - let (needs_close, err) = self.convert_channel_err_funded(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, e, funded_chan); + let (needs_close, err) = self.locked_handle_funded_force_close(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, e, funded_chan); handle_errors.push((Err(err), counterparty_node_id)); if needs_close { return false; } } @@ -8410,7 +8421,7 @@ where let reason = ClosureReason::FundingTimedOut; let msg = "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(); let err = ChannelError::Close((msg, reason)); - let (_, e) = self.convert_channel_err( + let (_, e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -10614,7 +10625,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // concerning this channel as it is safe to do so. debug_assert!(matches!(err, ChannelError::Close(_))); let mut chan = Channel::from(inbound_chan); - return Err(self.convert_channel_err( + return Err(self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -10626,7 +10637,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Some(Err(mut chan)) => { let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id); let err = ChannelError::close(err_msg); - return Err(self.convert_channel_err( + return Err(self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -10647,7 +10658,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let err = ChannelError::close($err.to_owned()); chan.unset_funding_info(); let mut chan = Channel::from(chan); - return Err(self.convert_unfunded_channel_err_internal(err, &mut chan).1); + return Err(self.locked_handle_unfunded_close(err, &mut chan).1); }}; } @@ -11267,7 +11278,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = self.convert_channel_err( + let (_, mut e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -11332,7 +11343,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // also implies there are no pending HTLCs left on the channel, so we can // fully delete it from tracking (the channel monitor is still around to // watch for old state broadcasts)! - let err = self.convert_channel_err_coop(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, close_res, chan); + let err = self.locked_handle_funded_coop_close(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, close_res, chan); chan_entry.remove(); Some((tx, Err(err))) } else { @@ -12421,7 +12432,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, e) = self.convert_channel_err( + let (_, e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -12442,7 +12453,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let reason = ClosureReason::CommitmentTxConfirmed; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, e) = self.convert_channel_err( + let (_, e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -12639,7 +12650,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ _ => match unblock_chan(chan, &mut peer_state.pending_msg_events) { Ok(shutdown_result) => shutdown_result, Err(err) => { - let (_, err) = self.convert_channel_err( + let (_, err) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -12655,7 +12666,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let logger = WithChannelContext::from(&self.logger, context, None); log_trace!(logger, "Removing channel now that the signer is unblocked"); let (remove, err) = if let Some(funded) = chan.as_funded_mut() { - let err = self.convert_channel_err_coop( + let err = self.locked_handle_funded_coop_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, shutdown, @@ -12666,7 +12677,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ debug_assert!(false); let reason = shutdown.closure_reason.clone(); let err = ChannelError::Close((reason.to_string(), reason)); - self.convert_unfunded_channel_err_internal(err, chan) + self.locked_handle_unfunded_close(err, chan) }; debug_assert!(remove); shutdown_results.push((Err(err), *cp_id)); @@ -12725,7 +12736,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some((tx, shutdown_res)) = tx_shutdown_result_opt { // We're done with this channel. We got a closing_signed and sent back // a closing_signed with a closing transaction to broadcast. - let err = self.convert_channel_err_coop( + let err = self.locked_handle_funded_coop_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, shutdown_res, @@ -12742,12 +12753,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, Err(e) => { has_update = true; - let (close_channel, res) = self.convert_channel_err_funded( - &mut peer_state.closed_channel_monitor_update_ids, - &mut peer_state.in_flight_monitor_updates, - e, - funded_chan, - ); + let (close_channel, res) = self + .locked_handle_funded_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + e, + funded_chan, + ); handle_errors.push(( funded_chan.context.get_counterparty_node_id(), Err(res), @@ -14117,7 +14129,7 @@ where // Clean up for removal. let reason = ClosureReason::DisconnectedPeer; let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = self.convert_channel_err( + let (_, e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -14874,7 +14886,7 @@ where // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = self.convert_channel_err_funded( + let (_, e) = self.locked_handle_funded_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, funded_channel From 42a993d5e9259a2acc7449f745d464fea4a06ab8 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 15 Dec 2025 10:55:03 +0100 Subject: [PATCH 41/42] Use more specific locked_handle_unfunded_close when possible --- lightning/src/ln/channelmanager.rs | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a1bf543ebf8..f2419b21b67 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4225,13 +4225,7 @@ where let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = self.locked_handle_force_close( - &mut peer_state.closed_channel_monitor_update_ids, - &mut peer_state.in_flight_monitor_updates, - err, - &mut chan, - ); - + let (_, mut e) = self.locked_handle_unfunded_close(err, &mut chan); e.dont_send_error_message(); shutdown_result = Err(e); } @@ -8421,9 +8415,7 @@ where let reason = ClosureReason::FundingTimedOut; let msg = "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(); let err = ChannelError::Close((msg, reason)); - let (_, e) = self.locked_handle_force_close( - &mut peer_state.closed_channel_monitor_update_ids, - &mut peer_state.in_flight_monitor_updates, + let (_, e) = self.locked_handle_unfunded_close( err, chan, ); @@ -11278,12 +11270,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = self.locked_handle_force_close( - &mut peer_state.closed_channel_monitor_update_ids, - &mut peer_state.in_flight_monitor_updates, - err, - &mut chan, - ); + let (_, mut e) = self.locked_handle_unfunded_close(err, &mut chan); e.dont_send_error_message(); return Err(e); }, From e50280cbc3fe70f3c592f8a769d0507cf770c16e Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 12 Dec 2025 09:04:46 +0100 Subject: [PATCH 42/42] Inline format args --- lightning/src/ln/channelmanager.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f2419b21b67..f2e8fa70e4f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4514,8 +4514,7 @@ where let peer_state_mutex = per_peer_state.get(peer_node_id).ok_or_else(|| APIError::ChannelUnavailable { err: format!( - "Can't find a peer matching the passed counterparty node_id {}", - peer_node_id + "Can't find a peer matching the passed counterparty node_id {peer_node_id}", ), })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -4563,8 +4562,7 @@ where } else { Err(APIError::ChannelUnavailable { err: format!( - "Channel with id {} not found for the passed counterparty node_id {}", - channel_id, peer_node_id + "Channel with id {channel_id} not found for the passed counterparty node_id {peer_node_id}", ), }) }