From 4561bc5bf3887897f077bddb96330cccf3ccff0d Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 1 Dec 2025 16:12:17 -0800 Subject: [PATCH 1/9] Git-ignore lightning-tests/target Similar to the other /target directories we ignore where a bunch of files are generated during testing. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index ed10eb14387..56e94616eeb 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ lightning-dns-resolver/target ext-functional-test-demo/target no-std-check/target msrv-no-dev-deps-check/target +lightning-tests/target From 4cdaabdcf048c777c7aecd1e83805cf2bcede536 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 13 Nov 2025 14:20:46 -0500 Subject: [PATCH 2/9] Store inbound committed update_adds in Channel We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. As part of this, we plan to store at least parts of Channels in ChannelMonitors, and that Channel data will be used in rebuilding the manager. Once we store update_adds in Channels, we can use them on restart when reconstructing ChannelManager maps such as forward_htlcs and pending_intercepted_htlcs. Upcoming commits will start doing this reconstruction. --- lightning/src/ln/channel.rs | 77 ++++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 27 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5b4ac4c0aa5..c7003413ed3 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -211,7 +211,9 @@ enum InboundHTLCState { /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), - Committed, + Committed { + update_add_htlc_opt: Option, + }, /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack /// we'll drop it. @@ -235,7 +237,7 @@ impl From<&InboundHTLCState> for Option { InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => { Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd) }, - InboundHTLCState::Committed => Some(InboundHTLCStateDetails::Committed), + InboundHTLCState::Committed { .. } => Some(InboundHTLCStateDetails::Committed), InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) => { Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail) }, @@ -256,7 +258,7 @@ impl fmt::Display for InboundHTLCState { InboundHTLCState::RemoteAnnounced(_) => write!(f, "RemoteAnnounced"), InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => write!(f, "AwaitingRemoteRevokeToAnnounce"), InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => write!(f, "AwaitingAnnouncedRemoteRevoke"), - InboundHTLCState::Committed => write!(f, "Committed"), + InboundHTLCState::Committed { .. } => write!(f, "Committed"), InboundHTLCState::LocalRemoved(_) => write!(f, "LocalRemoved"), } } @@ -268,7 +270,7 @@ impl InboundHTLCState { InboundHTLCState::RemoteAnnounced(_) => !generated_by_local, InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => !generated_by_local, InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => true, - InboundHTLCState::Committed => true, + InboundHTLCState::Committed { .. } => true, InboundHTLCState::LocalRemoved(_) => !generated_by_local, } } @@ -296,7 +298,7 @@ impl InboundHTLCState { }, InboundHTLCResolution::Resolved { .. } => false, }, - InboundHTLCState::Committed | InboundHTLCState::LocalRemoved(_) => false, + InboundHTLCState::Committed { .. } | InboundHTLCState::LocalRemoved(_) => false, } } } @@ -4102,7 +4104,7 @@ where if self.pending_inbound_htlcs.iter() .any(|htlc| match htlc.state { - InboundHTLCState::Committed => false, + InboundHTLCState::Committed { .. } => false, // An HTLC removal from the local node is pending on the remote commitment. InboundHTLCState::LocalRemoved(_) => true, // An HTLC add from the remote node is pending on the local commitment. @@ -4531,7 +4533,7 @@ where (InboundHTLCState::RemoteAnnounced(..), _) => true, (InboundHTLCState::AwaitingRemoteRevokeToAnnounce(..), _) => true, (InboundHTLCState::AwaitingAnnouncedRemoteRevoke(..), _) => true, - (InboundHTLCState::Committed, _) => true, + (InboundHTLCState::Committed { .. }, _) => true, (InboundHTLCState::LocalRemoved(..), true) => true, (InboundHTLCState::LocalRemoved(..), false) => false, }) @@ -7320,7 +7322,7 @@ where payment_preimage_arg ); match htlc.state { - InboundHTLCState::Committed => {}, + InboundHTLCState::Committed { .. } => {}, InboundHTLCState::LocalRemoved(ref reason) => { if let &InboundHTLCRemovalReason::Fulfill { .. } = reason { } else { @@ -7413,7 +7415,7 @@ where { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; - if let InboundHTLCState::Committed = htlc.state { + if let InboundHTLCState::Committed { .. } = htlc.state { } else { debug_assert!( false, @@ -7548,7 +7550,7 @@ where for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { match htlc.state { - InboundHTLCState::Committed => {}, + InboundHTLCState::Committed { .. } => {}, InboundHTLCState::LocalRemoved(_) => { return Err(ChannelError::Ignore(format!("HTLC {} was already resolved", htlc.htlc_id))); }, @@ -8716,7 +8718,7 @@ where false }; if swap { - let mut state = InboundHTLCState::Committed; + let mut state = InboundHTLCState::Committed { update_add_htlc_opt: None }; mem::swap(&mut state, &mut htlc.state); if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { @@ -8755,14 +8757,19 @@ where PendingHTLCStatus::Forward(forward_info) => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash); to_forward_infos.push((forward_info, htlc.htlc_id)); - htlc.state = InboundHTLCState::Committed; + // TODO: this is currently unreachable, so is it okay? will we lose a forward? + htlc.state = InboundHTLCState::Committed { + update_add_htlc_opt: None, + }; }, } }, InboundHTLCResolution::Pending { update_add_htlc } => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); - pending_update_adds.push(update_add_htlc); - htlc.state = InboundHTLCState::Committed; + pending_update_adds.push(update_add_htlc.clone()); + htlc.state = InboundHTLCState::Committed { + update_add_htlc_opt: Some(update_add_htlc), + }; }, } } @@ -9297,7 +9304,7 @@ where // in response to it yet, so don't touch it. true }, - InboundHTLCState::Committed => true, + InboundHTLCState::Committed { .. } => true, InboundHTLCState::LocalRemoved(_) => { // We (hopefully) sent a commitment_signed updating this HTLC (which we can // re-transmit if needed) and they may have even sent a revoke_and_ack back @@ -14518,6 +14525,7 @@ where } } let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); + let mut inbound_committed_update_adds: Vec> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -14537,8 +14545,9 @@ where 2u8.write(writer)?; htlc_resolution.write(writer)?; }, - &InboundHTLCState::Committed => { + &InboundHTLCState::Committed { ref update_add_htlc_opt } => { 3u8.write(writer)?; + inbound_committed_update_adds.push(update_add_htlc_opt.clone()); }, &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; @@ -14914,6 +14923,7 @@ where (69, holding_cell_held_htlc_flags, optional_vec), // Added in 0.2 (71, holder_commitment_point_previous_revoked, option), // Added in 0.3 (73, holder_commitment_point_last_revoked, option), // Added in 0.3 + (75, inbound_committed_update_adds, optional_vec), }); Ok(()) @@ -14997,7 +15007,7 @@ where }; InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) }, - 3 => InboundHTLCState::Committed, + 3 => InboundHTLCState::Committed { update_add_htlc_opt: None }, 4 => { let reason = match ::read(reader)? { 0 => InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { @@ -15301,6 +15311,7 @@ where let mut pending_outbound_held_htlc_flags_opt: Option>> = None; let mut holding_cell_held_htlc_flags_opt: Option>> = None; + let mut inbound_committed_update_adds_opt: Option>> = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -15350,6 +15361,7 @@ where (69, holding_cell_held_htlc_flags_opt, optional_vec), // Added in 0.2 (71, holder_commitment_point_previous_revoked_opt, option), // Added in 0.3 (73, holder_commitment_point_last_revoked_opt, option), // Added in 0.3 + (75, inbound_committed_update_adds_opt, optional_vec), }); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); @@ -15473,6 +15485,17 @@ where return Err(DecodeError::InvalidValue); } } + if let Some(update_adds) = inbound_committed_update_adds_opt { + let mut iter = update_adds.into_iter(); + for htlc in pending_inbound_htlcs.iter_mut() { + if let InboundHTLCState::Committed { ref mut update_add_htlc_opt } = htlc.state { + *update_add_htlc_opt = iter.next().ok_or(DecodeError::InvalidValue)?; + } + } + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } + } if let Some(attribution_data_list) = removed_htlc_attribution_data { let mut removed_htlcs = pending_inbound_htlcs.iter_mut().filter_map(|status| { @@ -16057,7 +16080,7 @@ mod tests { amount_msat: htlc_amount_msat, payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()), cltv_expiry: 300000000, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput { @@ -16903,7 +16926,7 @@ mod tests { amount_msat: 1000000, cltv_expiry: 500, payment_hash: PaymentHash::from(payment_preimage_0), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); let payment_preimage_1 = @@ -16913,7 +16936,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); let payment_preimage_2 = @@ -16953,7 +16976,7 @@ mod tests { amount_msat: 4000000, cltv_expiry: 504, payment_hash: PaymentHash::from(payment_preimage_4), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); // commitment tx with all five HTLCs untrimmed (minimum feerate) @@ -17342,7 +17365,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); chan.context.pending_outbound_htlcs.clear(); @@ -17593,7 +17616,7 @@ mod tests { amount_msat: 5000000, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, })); chan.context.pending_outbound_htlcs.extend( @@ -17722,7 +17745,7 @@ mod tests { amount_msat: 100000, cltv_expiry: 920125, payment_hash: htlc_0_in_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); let htlc_1_in_preimage = @@ -17740,7 +17763,7 @@ mod tests { amount_msat: 49900000, cltv_expiry: 920125, payment_hash: htlc_1_in_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); chan.context.pending_outbound_htlcs.extend( @@ -17792,7 +17815,7 @@ mod tests { amount_msat: 30000, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }, )); @@ -17959,7 +17982,7 @@ mod tests { amount_msat, cltv_expiry, payment_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }), ); From aeddf0fb38f863f918822f7e624f4f89aac085d7 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 1 Dec 2025 16:11:45 -0800 Subject: [PATCH 3/9] Extract util for HTLCIntercepted event creation We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. As part of rebuilding ChannelManager forward HTLCs maps, we will also add a fix that will regenerate HTLCIntercepted events for HTLC intercepts that are present but have no corresponding event in the queue. That fix will use this new method. --- lightning/src/ln/channelmanager.rs | 44 +++++++++++++++++++----------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 72585d69f80..aa7871051e6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3911,6 +3911,25 @@ macro_rules! process_events_body { } } +/// Creates an [`Event::HTLCIntercepted`] from a [`PendingAddHTLCInfo`]. We generate this event in a +/// few places so this DRYs the code. +fn create_htlc_intercepted_event( + intercept_id: InterceptId, pending_add: &PendingAddHTLCInfo, +) -> Result { + let inbound_amount_msat = pending_add.forward_info.incoming_amt_msat.ok_or(())?; + let requested_next_hop_scid = match pending_add.forward_info.routing { + PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, + _ => return Err(()), + }; + Ok(Event::HTLCIntercepted { + requested_next_hop_scid, + payment_hash: pending_add.forward_info.payment_hash, + inbound_amount_msat, + expected_outbound_amount_msat: pending_add.forward_info.outgoing_amt_msat, + intercept_id, + }) +} + impl< M: Deref, T: Deref, @@ -11486,22 +11505,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); match pending_intercepts.entry(intercept_id) { hash_map::Entry::Vacant(entry) => { - new_intercept_events.push_back(( - events::Event::HTLCIntercepted { - requested_next_hop_scid: scid, - payment_hash, - inbound_amount_msat: pending_add - .forward_info - .incoming_amt_msat - .unwrap(), - expected_outbound_amount_msat: pending_add - .forward_info - .outgoing_amt_msat, - intercept_id, - }, - None, - )); - entry.insert(pending_add); + if let Ok(intercept_ev) = + create_htlc_intercepted_event(intercept_id, &pending_add) + { + new_intercept_events.push_back((intercept_ev, None)); + entry.insert(pending_add); + } else { + debug_assert!(false); + fail_intercepted_htlc(pending_add); + } }, hash_map::Entry::Occupied(_) => { log_info!( From fd4a5cc1b5394f663e15b7395f9068ef4aa69943 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 17 Nov 2025 17:27:00 -0500 Subject: [PATCH 4/9] Extract method to dedup pre-decode update_add We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. We'll use this new util when reconstructing the ChannelManager::decode_update_add_htlcs map from Channel data in upcoming commits. While the Channel data is not included in the monitors yet, it will be in future work. --- lightning/src/ln/channelmanager.rs | 45 +++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index aa7871051e6..15cf4f38f52 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -16819,6 +16819,33 @@ where } } +// If the HTLC corresponding to `prev_hop_data` is present in `decode_update_add_htlcs`, remove it +// from the map as it is already being stored and processed elsewhere. +fn dedup_decode_update_add_htlcs( + decode_update_add_htlcs: &mut HashMap>, + prev_hop_data: &HTLCPreviousHopData, removal_reason: &'static str, logger: &L, +) where + L::Target: Logger, +{ + decode_update_add_htlcs.retain(|src_outb_alias, update_add_htlcs| { + update_add_htlcs.retain(|update_add| { + let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias + && update_add.htlc_id == prev_hop_data.htlc_id; + if matches { + let logger = WithContext::from( + logger, + prev_hop_data.counterparty_node_id, + Some(update_add.channel_id), + Some(update_add.payment_hash), + ); + log_info!(logger, "Removing pending to-decode HTLC: {}", removal_reason); + } + !matches + }); + !update_add_htlcs.is_empty() + }); +} + // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the // SipmleArcChannelManager type: impl< @@ -17686,19 +17713,11 @@ where // still have an entry for this HTLC in `forward_htlcs` or // `pending_intercepted_htlcs`, we were apparently not persisted after // the monitor was when forwarding the payment. - decode_update_add_htlcs.retain( - |src_outb_alias, update_add_htlcs| { - update_add_htlcs.retain(|update_add_htlc| { - let matches = *src_outb_alias - == prev_hop_data.prev_outbound_scid_alias - && update_add_htlc.htlc_id == prev_hop_data.htlc_id; - if matches { - log_info!(logger, "Removing pending to-decode HTLC as it was forwarded to the closed channel"); - } - !matches - }); - !update_add_htlcs.is_empty() - }, + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, ); forward_htlcs.retain(|_, forwards| { forwards.retain(|forward| { From ed3af0769424167f683a5c66d548109948416519 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 20 Nov 2025 12:32:40 -0500 Subject: [PATCH 5/9] Rename manager HTLC forward maps to _legacy We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. Soon we'll be reconstructing these now-legacy maps from Channel data (that will also be included in ChannelMonitors in future work), so rename them as part of moving towards not needing to persist them in ChannelManager. --- lightning/src/ln/channelmanager.rs | 38 +++++++++++++++++++----------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 15cf4f38f52..32d7086f009 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17195,7 +17195,11 @@ where const MAX_ALLOC_SIZE: usize = 1024 * 64; let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + // This map is read but may no longer be used because we'll attempt to rebuild `forward_htlcs` + // from the `Channel{Monitor}`s instead, as a step towards getting rid of `ChannelManager` + // persistence. + let mut forward_htlcs_legacy: HashMap> = + hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); for _ in 0..forward_htlcs_count { let short_channel_id = Readable::read(reader)?; let pending_forwards_count: u64 = Readable::read(reader)?; @@ -17206,7 +17210,7 @@ where for _ in 0..pending_forwards_count { pending_forwards.push(Readable::read(reader)?); } - forward_htlcs.insert(short_channel_id, pending_forwards); + forward_htlcs_legacy.insert(short_channel_id, pending_forwards); } let claimable_htlcs_count: u64 = Readable::read(reader)?; @@ -17294,12 +17298,18 @@ where }; } + // Some maps are read but may no longer be used because we attempt to rebuild pending HTLC + // forwards from the `Channel{Monitor}`s instead, as a step towards getting rid of + // `ChannelManager` persistence. + let mut pending_intercepted_htlcs_legacy: Option> = + Some(new_hash_map()); + let mut decode_update_add_htlcs_legacy: Option>> = + None; + // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = None; let mut pending_outbound_payments = None; - let mut pending_intercepted_htlcs: Option> = - Some(new_hash_map()); let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; @@ -17317,13 +17327,12 @@ where let mut in_flight_monitor_updates: Option< HashMap<(PublicKey, ChannelId), Vec>, > = None; - let mut decode_update_add_htlcs: Option>> = None; let mut inbound_payment_id_secret = None; let mut peer_storage_dir: Option)>> = None; let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), - (2, pending_intercepted_htlcs, option), + (2, pending_intercepted_htlcs_legacy, option), (3, pending_outbound_payments, option), (4, pending_claiming_payments, option), (5, received_network_pubkey, option), @@ -17334,13 +17343,14 @@ where (10, legacy_in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs, option), + (14, decode_update_add_htlcs_legacy, option), (15, inbound_payment_id_secret, option), (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), (21, async_receive_offer_cache, (default_value, async_receive_offer_cache)), }); - let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map()); + let mut decode_update_add_htlcs_legacy = + decode_update_add_htlcs_legacy.unwrap_or_else(|| new_hash_map()); let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); @@ -17714,12 +17724,12 @@ where // `pending_intercepted_htlcs`, we were apparently not persisted after // the monitor was when forwarding the payment. dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, + &mut decode_update_add_htlcs_legacy, &prev_hop_data, "HTLC was forwarded to the closed channel", &args.logger, ); - forward_htlcs.retain(|_, forwards| { + forward_htlcs_legacy.retain(|_, forwards| { forwards.retain(|forward| { if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { if pending_forward_matches_htlc(&htlc_info) { @@ -17731,7 +17741,7 @@ where }); !forwards.is_empty() }); - pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { + pending_intercepted_htlcs_legacy.as_mut().unwrap().retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", &htlc.payment_hash, &monitor.channel_id()); @@ -18229,10 +18239,10 @@ where inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()), + pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy.unwrap()), - forward_htlcs: Mutex::new(forward_htlcs), - decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), + forward_htlcs: Mutex::new(forward_htlcs_legacy), + decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs_legacy), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap(), From e8d071150007958b12ae4060fda0855d277724e5 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 20 Nov 2025 18:24:32 -0500 Subject: [PATCH 6/9] Tweak pending_htlc_intercepts ser on manager read Makes an upcoming commit cleaner --- lightning/src/ln/channelmanager.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 32d7086f009..3d935842e8d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17302,7 +17302,7 @@ where // forwards from the `Channel{Monitor}`s instead, as a step towards getting rid of // `ChannelManager` persistence. let mut pending_intercepted_htlcs_legacy: Option> = - Some(new_hash_map()); + None; let mut decode_update_add_htlcs_legacy: Option>> = None; @@ -17351,6 +17351,8 @@ where }); let mut decode_update_add_htlcs_legacy = decode_update_add_htlcs_legacy.unwrap_or_else(|| new_hash_map()); + let mut pending_intercepted_htlcs_legacy = + pending_intercepted_htlcs_legacy.unwrap_or_else(|| new_hash_map()); let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); @@ -17741,7 +17743,7 @@ where }); !forwards.is_empty() }); - pending_intercepted_htlcs_legacy.as_mut().unwrap().retain(|intercepted_id, htlc_info| { + pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", &htlc.payment_hash, &monitor.channel_id()); @@ -18239,7 +18241,7 @@ where inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy.unwrap()), + pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy), forward_htlcs: Mutex::new(forward_htlcs_legacy), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs_legacy), From 2c9f80b05c821b8c289538cef5debee5fa21b205 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 20 Nov 2025 18:35:57 -0500 Subject: [PATCH 7/9] Gather to-decode HTLC fwds from channels on manager read We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. Here we start this process by rebuilding ChannelManager::decode_update_add_htlcs from the Channels, which will soon be included in the ChannelMonitors as part of a different series of PRs. The newly built map is not yet used but will be in the next commit. --- lightning/src/ln/channel.rs | 14 ++++++++++ lightning/src/ln/channelmanager.rs | 44 ++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c7003413ed3..6eef4c312f9 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7773,6 +7773,20 @@ where Ok(()) } + /// Useful for reconstructing forwarded HTLCs when deserializing the `ChannelManager`. + pub(super) fn get_inbound_committed_update_adds(&self) -> Vec { + self.context + .pending_inbound_htlcs + .iter() + .filter_map(|htlc| match htlc.state { + InboundHTLCState::Committed { ref update_add_htlc_opt } => { + update_add_htlc_opt.clone() + }, + _ => None, + }) + .collect() + } + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] fn mark_outbound_htlc_removed( diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3d935842e8d..e901e522045 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17353,6 +17353,7 @@ where decode_update_add_htlcs_legacy.unwrap_or_else(|| new_hash_map()); let mut pending_intercepted_htlcs_legacy = pending_intercepted_htlcs_legacy.unwrap_or_else(|| new_hash_map()); + let mut decode_update_add_htlcs = new_hash_map(); let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); @@ -17664,6 +17665,21 @@ where let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + let inbound_committed_update_adds = + funded_chan.get_inbound_committed_update_adds(); + if !inbound_committed_update_adds.is_empty() { + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel`. We are moving away from writing the `decode_update_add_htlcs` map as + // part of getting rid of `ChannelManager` persistence. + decode_update_add_htlcs.insert( + funded_chan.context.outbound_scid_alias(), + inbound_committed_update_adds, + ); + } + } + } } if is_channel_closed { @@ -17725,6 +17741,12 @@ where // still have an entry for this HTLC in `forward_htlcs` or // `pending_intercepted_htlcs`, we were apparently not persisted after // the monitor was when forwarding the payment. + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, + ); dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs_legacy, &prev_hop_data, @@ -18215,6 +18237,28 @@ where } } + for (src, _, _, _, _, _) in failed_htlcs.iter() { + if let HTLCSource::PreviousHopData(prev_hop_data) = src { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was failed backwards during manager read", + &args.logger, + ); + } + } + + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, + ); + } + } + let best_block = BestBlock::new(best_block_hash, best_block_height); let flow = OffersMessageFlow::new( chain_hash, From a1d327f1e69e1d4471a9ad58e953ffff78b0bc87 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 20 Nov 2025 18:39:39 -0500 Subject: [PATCH 8/9] Rebuild manager forwarded htlcs maps from Channels We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. Here we start this process by rebuilding ChannelManager::decode_update_add_htlcs, forward_htlcs, and pending_intercepted_htlcs from Channel data, which will soon be included in the ChannelMonitors as part of a different series of PRs. --- lightning/src/ln/channelmanager.rs | 55 +++++++++++++- lightning/src/ln/functional_test_utils.rs | 3 +- lightning/src/ln/reload_tests.rs | 87 ++++++++++++++++++++++- 3 files changed, 142 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e901e522045..6a2ced8a26f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11551,6 +11551,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if !new_intercept_events.is_empty() { let mut events = self.pending_events.lock().unwrap(); + new_intercept_events.retain(|new_ev| !events.contains(new_ev)); events.append(&mut new_intercept_events); } } @@ -18258,6 +18259,58 @@ where ); } } + // Remove forwarded HTLCs if they are also present in `decode_update_add_htlcs`. HTLCs present + // in both maps will be re-decoded from `decode_update_add_htlcs` and placed in + // `ChannelManager::forward_htlcs` on the next call to `process_pending_htlc_forwards`. + // + // In future versions we'll stop (de)serializing `forward_htlcs` as part of getting rid of the + // requirement to regularly persist the `ChannelManager`, so here we move in that direction by + // preferring to keep HTLCs in the new map that is rebuilt from `Channel{Monitor}` data. + forward_htlcs_legacy.retain(|scid, pending_fwds| { + for fwd in pending_fwds { + let (prev_scid, prev_htlc_id) = match fwd { + HTLCForwardInfo::AddHTLC(htlc) => { + (htlc.prev_outbound_scid_alias, htlc.prev_htlc_id) + }, + HTLCForwardInfo::FailHTLC { htlc_id, .. } + | HTLCForwardInfo::FailMalformedHTLC { htlc_id, .. } => (*scid, *htlc_id), + }; + if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { + if pending_update_adds + .iter() + .any(|update_add| update_add.htlc_id == prev_htlc_id) + { + return false; + } + } + } + true + }); + // Remove intercepted HTLC forwards if they are also present in `decode_update_add_htlcs`. See + // the above comment. + pending_intercepted_htlcs_legacy.retain(|id, fwd| { + let prev_scid = fwd.prev_outbound_scid_alias; + if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { + if pending_update_adds + .iter() + .any(|update_add| update_add.htlc_id == fwd.prev_htlc_id) + { + pending_events_read.retain( + |(ev, _)| !matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), + ); + return false; + } + } + if !pending_events_read.iter().any( + |(ev, _)| matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), + ) { + match create_htlc_intercepted_event(*id, &fwd) { + Ok(ev) => pending_events_read.push_back((ev, None)), + Err(()) => debug_assert!(false), + } + } + true + }); let best_block = BestBlock::new(best_block_hash, best_block_height); let flow = OffersMessageFlow::new( @@ -18288,7 +18341,7 @@ where pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy), forward_htlcs: Mutex::new(forward_htlcs_legacy), - decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs_legacy), + decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap(), diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index ff33d7508b5..3a5940cb161 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1382,9 +1382,10 @@ macro_rules! reload_node { $node.onion_messenger.set_async_payments_handler(&$new_channelmanager); }; ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { + let config = $node.node.get_current_config(); reload_node!( $node, - test_default_channel_config(), + config, $chanman_encoded, $monitors_encoded, $persister, diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 2e9471a787d..2a749c10549 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -20,6 +20,7 @@ use crate::chain::transaction::OutPoint; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields, RAACommitmentOrder}; use crate::ln::msgs; +use crate::ln::outbound_payment::Retry; use crate::ln::types::ChannelId; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, ErrorAction, MessageSendEvent}; use crate::util::test_channel_signer::TestChannelSigner; @@ -508,7 +509,6 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { #[cfg(feature = "std")] fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, not_stale: bool) { - use crate::ln::channelmanager::Retry; use crate::types::string::UntrustedString; // When we get a data_loss_protect proving we're behind, we immediately panic as the // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The @@ -1173,6 +1173,91 @@ fn removed_payment_no_manager_persistence() { expect_payment_failed!(nodes[0], payment_hash, false); } +#[test] +fn manager_persisted_pre_outbound_edge_forward() { + do_manager_persisted_pre_outbound_edge_forward(false); +} + +#[test] +fn manager_persisted_pre_outbound_edge_intercept_forward() { + do_manager_persisted_pre_outbound_edge_forward(true); +} + +fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let mut intercept_forwards_config = test_default_channel_config(); + intercept_forwards_config.accept_intercept_htlcs = true; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 5000; + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + if intercept_htlc { + route.paths[0].hops[1].short_channel_id = nodes[1].node.get_intercept_scid(); + } + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + // Decode the HTLC onion but don't forward it to the next hop, such that the HTLC ends up in + // `ChannelManager::forward_htlcs` or `ChannelManager::pending_intercepted_htlcs`. + nodes[1].node.process_pending_update_add_htlcs(); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]); + args_b_c.send_channel_ready = (true, true); + args_b_c.send_announcement_sigs = (true, true); + reconnect_nodes(args_b_c); + + // Forward the HTLC and ensure we can claim it post-reload. + nodes[1].node.process_pending_htlc_forwards(); + + if intercept_htlc { + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let (intercept_id, expected_outbound_amt_msat) = match events[0] { + Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { + (intercept_id, expected_outbound_amount_msat) + }, + _ => panic!() + }; + nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id_2, + nodes[2].node.get_our_node_id(), expected_outbound_amt_msat).unwrap(); + nodes[1].node.process_pending_htlc_forwards(); + } + check_added_monitors(&nodes[1], 1); + + let updates = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); + expect_and_process_pending_htlcs(&nodes[2], false); + + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3); From 26c6dc702d9d5e431e39f95766ba93456e91e7b9 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 1 Dec 2025 16:11:18 -0800 Subject: [PATCH 9/9] Test 0.2 -> 0.3 reload with with forward htlcs present We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. In the previous commit we started this process by rebuilding ChannelManager::decode_update_add_htlcs, forward_htlcs, and pending_intercepted_htlcs from the Channel data, which will soon be included in the ChannelMonitors as part of a different series of PRs. Here we test that HTLC forwards that were originally received on 0.2 can still be successfully forwarded using the new reload + legacy handling code that will be merged for 0.3. --- lightning-tests/Cargo.toml | 1 + .../src/upgrade_downgrade_tests.rs | 184 ++++++++++++++++++ 2 files changed, 185 insertions(+) diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml index 439157e528b..8ca58ebf6b3 100644 --- a/lightning-tests/Cargo.toml +++ b/lightning-tests/Cargo.toml @@ -15,6 +15,7 @@ lightning-types = { path = "../lightning-types", features = ["_test_utils"] } lightning-invoice = { path = "../lightning-invoice", default-features = false } lightning-macros = { path = "../lightning-macros" } lightning = { path = "../lightning", features = ["_test_utils"] } +lightning_0_2 = { package = "lightning", git = "https://github.com/lightningdevkit/rust-lightning.git", branch = "0.2", features = ["_test_utils"] } lightning_0_1 = { package = "lightning", version = "0.1.7", features = ["_test_utils"] } lightning_0_0_125 = { package = "lightning", version = "0.0.125", features = ["_test_utils"] } diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index cef180fbd4e..9a9fac75911 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -10,6 +10,16 @@ //! Tests which test upgrading from previous versions of LDK or downgrading to previous versions of //! LDK. +use lightning_0_2::commitment_signed_dance as commitment_signed_dance_0_2; +use lightning_0_2::events::Event as Event_0_2; +use lightning_0_2::get_monitor as get_monitor_0_2; +use lightning_0_2::ln::channelmanager::PaymentId as PaymentId_0_2; +use lightning_0_2::ln::channelmanager::RecipientOnionFields as RecipientOnionFields_0_2; +use lightning_0_2::ln::functional_test_utils as lightning_0_2_utils; +use lightning_0_2::ln::msgs::ChannelMessageHandler as _; +use lightning_0_2::routing::router as router_0_2; +use lightning_0_2::util::ser::Writeable as _; + use lightning_0_1::commitment_signed_dance as commitment_signed_dance_0_1; use lightning_0_1::events::ClosureReason as ClosureReason_0_1; use lightning_0_1::expect_pending_htlcs_forwardable_ignore as expect_pending_htlcs_forwardable_ignore_0_1; @@ -498,3 +508,177 @@ fn test_0_1_htlc_forward_after_splice() { do_test_0_1_htlc_forward_after_splice(true); do_test_0_1_htlc_forward_after_splice(false); } + +#[test] +fn upgrade_mid_htlc_forward() { + do_upgrade_mid_htlc_forward(false); +} +#[test] +fn upgrade_mid_htlc_intercept_forward() { + do_upgrade_mid_htlc_forward(true); +} +fn do_upgrade_mid_htlc_forward(intercept_htlc: bool) { + // In 0.3, we started reconstructing the `ChannelManager`'s HTLC forwards maps from the HTLCs + // contained in `Channel`s, as part of getting rid of `ChannelManager` persistence. However, HTLC + // forwards can only be reconstructed this way if they were received on 0.3 or higher. Test that + // HTLC forwards that were serialized on <=0.2 will still succeed when read on 0.3+. + let (node_a_ser, node_b_ser, node_c_ser, mon_a_1_ser, mon_b_1_ser, mon_b_2_ser, mon_c_1_ser); + let (node_a_id, node_b_id, node_c_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + let (node_a_blocks, node_b_blocks, node_c_blocks); + let chan_id_bytes_b_c; + + { + let chanmon_cfgs = lightning_0_2_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_2_utils::create_node_cfgs(3, &chanmon_cfgs); + + let mut intercept_cfg = lightning_0_2_utils::test_default_channel_config(); + intercept_cfg.accept_intercept_htlcs = true; + let cfgs = &[None, Some(intercept_cfg), None]; + let node_chanmgrs = lightning_0_2_utils::create_node_chanmgrs(3, &node_cfgs, cfgs); + let nodes = lightning_0_2_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_a = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + + let chan_id_b = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + chan_id_bytes_b_c = chan_id_b.0; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_0_2_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_0_2_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + let pay_params = router_0_2::PaymentParameters::from_node_id( + node_c_id, + lightning_0_2_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_2::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let mut route = lightning_0_2_utils::get_route(&nodes[0], &route_params).unwrap(); + + if intercept_htlc { + route.paths[0].hops[1].short_channel_id = nodes[1].node.get_intercept_scid(); + } + + let onion = RecipientOnionFields_0_2::secret_only(secret); + let id = PaymentId_0_2(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_2_utils::check_added_monitors(&nodes[0], 1); + let send_event = lightning_0_2_utils::SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of the forwarding node without initiating the outbound + // edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + commitment_signed_dance_0_2!(nodes[1], nodes[0], send_event.commitment_msg, false); + nodes[1].node.test_process_pending_update_add_htlcs(); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event_0_2::HTLCIntercepted { .. })); + + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + node_c_ser = nodes[2].node.encode(); + mon_a_1_ser = get_monitor_0_2!(nodes[0], chan_id_a).encode(); + mon_b_1_ser = get_monitor_0_2!(nodes[1], chan_id_a).encode(); + mon_b_2_ser = get_monitor_0_2!(nodes[1], chan_id_b).encode(); + mon_c_1_ser = get_monitor_0_2!(nodes[2], chan_id_b).encode(); + + node_a_blocks = Arc::clone(&nodes[0].blocks); + node_b_blocks = Arc::clone(&nodes[1].blocks); + node_c_blocks = Arc::clone(&nodes[2].blocks); + } + + // Create a dummy node to reload over with the 0.2 state + let mut chanmon_cfgs = create_chanmon_cfgs(3); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + chanmon_cfgs[0].tx_broadcaster.blocks = node_a_blocks; + chanmon_cfgs[1].tx_broadcaster.blocks = node_b_blocks; + chanmon_cfgs[2].tx_broadcaster.blocks = node_c_blocks; + + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let config = test_default_channel_config(); + let a_mons = &[&mon_a_1_ser[..]]; + reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; + reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + let c_mons = &[&mon_c_1_ser[..]]; + reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + reconnect_nodes(reconnect_b_c_args); + + // Now release the HTLC to node c, to be claimed back to node A + nodes[1].node.process_pending_htlc_forwards(); + + if intercept_htlc { + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let (intercept_id, expected_outbound_amt_msat) = match events[0] { + Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { + (intercept_id, expected_outbound_amount_msat) + }, + _ => panic!(), + }; + nodes[1] + .node + .forward_intercepted_htlc( + intercept_id, + &ChannelId(chan_id_bytes_b_c), + nodes[2].node.get_our_node_id(), + expected_outbound_amt_msat, + ) + .unwrap(); + nodes[1].node.process_pending_htlc_forwards(); + } + + let pay_secret = PaymentSecret(payment_secret_bytes); + let pay_hash = PaymentHash(payment_hash_bytes); + let pay_preimage = PaymentPreimage(payment_preimage_bytes); + + check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +}