From 1ddd91c25cb71ecb02e4e353089e888355d4604c Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 2 Mar 2026 15:47:37 +0200 Subject: [PATCH 01/12] ln/refactor: add previous_hop_data helper for HTLCSource --- lightning/src/ln/channelmanager.rs | 33 +++++++++++++----------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d042a69bf80..31fe59b6c1d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -857,6 +857,14 @@ mod fuzzy_channelmanager { }, } } + + pub(crate) fn previous_hop_data(&self) -> &[HTLCPreviousHopData] { + match self { + HTLCSource::PreviousHopData(prev_hop) => core::slice::from_ref(prev_hop), + HTLCSource::TrampolineForward { previous_hop_data, .. } => &previous_hop_data[..], + HTLCSource::OutboundRoute { .. } => &[], + } + } } /// Tracks the inbound corresponding to an outbound HTLC @@ -12532,15 +12540,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ chan.update_fulfill_htlc(&msg), chan_entry ); - let prev_hops = match &res.0 { - HTLCSource::PreviousHopData(prev_hop) => vec![prev_hop], - HTLCSource::TrampolineForward { previous_hop_data, .. } => { - previous_hop_data.iter().collect() - }, - _ => vec![], - }; let logger = WithChannelContext::from(&self.logger, &chan.context, None); - for prev_hop in prev_hops { + for prev_hop in res.0.previous_hop_data() { log_trace!(logger, "Holding the next revoke_and_ack until the preimage is durably persisted in the inbound edge's ChannelMonitor", ); @@ -19792,17 +19793,11 @@ impl< .into_iter() .filter_map(|(htlc_source, (htlc, preimage_opt))| { let payment_preimage = preimage_opt?; - let prev_htlcs = match &htlc_source { - HTLCSource::PreviousHopData(prev_hop) => vec![prev_hop], - HTLCSource::TrampolineForward { previous_hop_data, .. } => { - previous_hop_data.iter().collect() - }, - // If it was an outbound payment, we've handled it above - if a preimage - // came in and we persisted the `ChannelManager` we either handled it - // and are good to go or the channel force-closed - we don't have to - // handle the channel still live case here. - _ => vec![], - }; + // If it was an outbound payment, we've handled it above - if a preimage + // came in and we persisted the `ChannelManager` we either handled it + // and are good to go or the channel force-closed - we don't have to + // handle the channel still live case here. + let prev_htlcs = htlc_source.previous_hop_data(); let prev_htlcs_count = prev_htlcs.len(); if prev_htlcs_count == 0 { return None; From bc2d6c733b97e3d1e5abab958fa1faf90c0b90c7 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 12 Mar 2026 08:25:58 -0400 Subject: [PATCH 02/12] ln/refactor: rename shared secret and populate in HTLCPreviousHopData --- lightning/src/ln/channelmanager.rs | 12 ++++++++---- lightning/src/ln/onion_payment.rs | 2 +- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 31fe59b6c1d..818941db648 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -231,11 +231,12 @@ pub enum PendingHTLCRouting { }, /// An HTLC which should be forwarded on to another Trampoline node. TrampolineForward { - /// The onion shared secret we build with the sender (or the preceding Trampoline node) used - /// to decrypt the onion. + /// The onion shared secret we build with the node that forwarded us this trampoline + /// forward (either the original sender, or a preceding Trampoline node), used to decrypt + /// the inner trampoline onion. /// /// This is later used to encrypt failure packets in the event that the HTLC is failed. - incoming_shared_secret: [u8; 32], + trampoline_shared_secret: [u8; 32], /// The onion which should be included in the forwarded HTLC, telling the next hop what to /// do with the HTLC. onion_packet: msgs::TrampolineOnionPacket, @@ -474,6 +475,9 @@ impl PendingAddHTLCInfo { PendingHTLCRouting::Receive { trampoline_shared_secret, .. } => { trampoline_shared_secret }, + PendingHTLCRouting::TrampolineForward { trampoline_shared_secret, .. } => { + Some(trampoline_shared_secret) + }, _ => None, }; @@ -17572,7 +17576,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (11, invoice_request, option), }, (3, TrampolineForward) => { - (0, incoming_shared_secret, required), + (0, trampoline_shared_secret, required), (2, onion_packet, required), (4, blinded, option), (6, node_id, required), diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 5111f6982fe..bb5b8f21a48 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -249,7 +249,7 @@ pub(super) fn create_fwd_pending_htlc_info( hmac: next_hop_hmac, }; PendingHTLCRouting::TrampolineForward { - incoming_shared_secret: shared_secret.secret_bytes(), + trampoline_shared_secret: shared_secret.secret_bytes(), onion_packet: outgoing_packet, node_id: next_trampoline, incoming_cltv_expiry: msg.cltv_expiry, From a984732d9a4990dcc2b23835cd758f800d17d13c Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 30 Mar 2026 10:47:01 -0400 Subject: [PATCH 03/12] ln/refactor: move MPP information into separate struct to ClaimableHTLC Pull out all fields that are common to incoming claimable and trampoline MPP HTLCs. This will be used in future commits to accumulate MPP HTLCs that are part of trampoline forwards - we can't claim these, but need to accumulate them in the same way as receives before forwarding onwards. --- lightning/src/ln/channelmanager.rs | 236 ++++++++++++++++++----------- 1 file changed, 147 insertions(+), 89 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 818941db648..ae6db2ebbb7 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -529,9 +529,8 @@ enum OnionPayload { Spontaneous(PaymentPreimage), } -/// HTLCs that are to us and can be failed/claimed by the user #[derive(PartialEq, Eq)] -struct ClaimableHTLC { +struct MppPart { prev_hop: HTLCPreviousHopData, cltv_expiry: u32, /// The amount (in msats) of this MPP part @@ -539,23 +538,74 @@ struct ClaimableHTLC { /// The amount (in msats) that the sender intended to be sent in this MPP /// part (used for validating total MPP amount) sender_intended_value: u64, - onion_payload: OnionPayload, timer_ticks: u8, /// The total value received for a payment (sum of all MPP parts if the payment is a MPP). /// Gets set to the amount reported when pushing [`Event::PaymentClaimable`]. total_value_received: Option, +} + +impl MppPart { + fn new( + prev_hop: HTLCPreviousHopData, value: u64, sender_intended_value: u64, cltv_expiry: u32, + ) -> Self { + MppPart { + prev_hop, + cltv_expiry, + value, + sender_intended_value, + timer_ticks: 0, + total_value_received: None, + } + } +} + +impl PartialOrd for MppPart { + fn partial_cmp(&self, other: &MppPart) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for MppPart { + fn cmp(&self, other: &MppPart) -> cmp::Ordering { + let res = (self.prev_hop.channel_id, self.prev_hop.htlc_id) + .cmp(&(other.prev_hop.channel_id, other.prev_hop.htlc_id)); + if res.is_eq() { + debug_assert!(self == other, "MppParts from the same source should be identical"); + } + res + } +} + +/// Represents an incoming HTLC that can be claimed or failed by the user. +#[derive(PartialEq, Eq)] +struct ClaimableHTLC { + mpp_part: MppPart, + onion_payload: OnionPayload, /// The extra fee our counterparty skimmed off the top of this HTLC. counterparty_skimmed_fee_msat: Option, } +impl ClaimableHTLC { + fn new( + prev_hop: HTLCPreviousHopData, value: u64, sender_intended_value: u64, cltv_expiry: u32, + onion_payload: OnionPayload, counterparty_skimmed_fee_msat: Option, + ) -> Self { + ClaimableHTLC { + mpp_part: MppPart::new(prev_hop, value, sender_intended_value, cltv_expiry), + onion_payload, + counterparty_skimmed_fee_msat, + } + } +} + impl From<&ClaimableHTLC> for events::ClaimedHTLC { fn from(val: &ClaimableHTLC) -> Self { events::ClaimedHTLC { - counterparty_node_id: val.prev_hop.counterparty_node_id, - channel_id: val.prev_hop.channel_id, - user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0), - cltv_expiry: val.cltv_expiry, - value_msat: val.value, + counterparty_node_id: val.mpp_part.prev_hop.counterparty_node_id, + channel_id: val.mpp_part.prev_hop.channel_id, + user_channel_id: val.mpp_part.prev_hop.user_channel_id.unwrap_or(0), + cltv_expiry: val.mpp_part.cltv_expiry, + value_msat: val.mpp_part.value, counterparty_skimmed_fee_msat: val.counterparty_skimmed_fee_msat.unwrap_or(0), } } @@ -568,12 +618,7 @@ impl PartialOrd for ClaimableHTLC { } impl Ord for ClaimableHTLC { fn cmp(&self, other: &ClaimableHTLC) -> cmp::Ordering { - let res = (self.prev_hop.channel_id, self.prev_hop.htlc_id) - .cmp(&(other.prev_hop.channel_id, other.prev_hop.htlc_id)); - if res.is_eq() { - debug_assert!(self == other, "ClaimableHTLCs from the same source should be identical"); - } - res + self.mpp_part.cmp(&other.mpp_part) } } @@ -1230,7 +1275,9 @@ impl ClaimablePayment { fn inbound_payment_id(&self, secret: &[u8; 32]) -> PaymentId { PaymentId::for_inbound_from_htlcs( secret, - self.htlcs.iter().map(|htlc| (htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id)), + self.htlcs + .iter() + .map(|htlc| (htlc.mpp_part.prev_hop.channel_id, htlc.mpp_part.prev_hop.htlc_id)), ) } @@ -1240,7 +1287,7 @@ impl ClaimablePayment { fn receiving_channel_ids(&self) -> Vec<(ChannelId, Option)> { self.htlcs .iter() - .map(|htlc| (htlc.prev_hop.channel_id, htlc.prev_hop.user_channel_id)) + .map(|htlc| (htlc.mpp_part.prev_hop.channel_id, htlc.mpp_part.prev_hop.user_channel_id)) .collect() } } @@ -1337,7 +1384,7 @@ impl ClaimablePayments { let mut receiver_node_id = node_signer.get_node_id(Recipient::Node) .expect("Failed to get node_id for node recipient"); for htlc in payment.htlcs.iter() { - if htlc.prev_hop.phantom_shared_secret.is_some() { + if htlc.mpp_part.prev_hop.phantom_shared_secret.is_some() { let phantom_pubkey = node_signer.get_node_id(Recipient::PhantomNode) .expect("Failed to get node_id for phantom node recipient"); receiver_node_id = phantom_pubkey; @@ -1366,15 +1413,15 @@ impl ClaimablePayments { // Pick an "arbitrary" channel to block RAAs on until the `PaymentSent` // event is processed, specifically the last channel to get claimed. let durable_preimage_channel = payment.htlcs.last().map_or(None, |htlc| { - if let Some(node_id) = htlc.prev_hop.counterparty_node_id { - Some((htlc.prev_hop.outpoint, node_id, htlc.prev_hop.channel_id)) + if let Some(node_id) = htlc.mpp_part.prev_hop.counterparty_node_id { + Some((htlc.mpp_part.prev_hop.outpoint, node_id, htlc.mpp_part.prev_hop.channel_id)) } else { None } }); debug_assert!(durable_preimage_channel.is_some()); ClaimingPayment { - amount_msat: payment.htlcs.iter().map(|source| source.value).sum(), + amount_msat: payment.htlcs.iter().map(|source| source.mpp_part.value).sum(), payment_purpose: payment.purpose, receiver_node_id, htlcs, @@ -8306,19 +8353,17 @@ impl< panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); }, }; - let claimable_htlc = ClaimableHTLC { + let claimable_htlc = ClaimableHTLC::new( prev_hop, // We differentiate the received value from the sender intended value // if possible so that we don't prematurely mark MPP payments complete // if routing nodes overpay - value: incoming_amt_msat.unwrap_or(outgoing_amt_msat), - sender_intended_value: outgoing_amt_msat, - timer_ticks: 0, - total_value_received: None, + incoming_amt_msat.unwrap_or(outgoing_amt_msat), + outgoing_amt_msat, cltv_expiry, onion_payload, - counterparty_skimmed_fee_msat: skimmed_fee_msat, - }; + skimmed_fee_msat, + ); let mut committed_to_claimable = false; @@ -8326,21 +8371,22 @@ impl< ($htlc: expr, $payment_hash: expr) => { debug_assert!(!committed_to_claimable); let err_data = invalid_payment_err_data( - $htlc.value, + $htlc.mpp_part.value, self.best_block.read().unwrap().height, ); - let counterparty_node_id = $htlc.prev_hop.counterparty_node_id; + let counterparty_node_id = $htlc.mpp_part.prev_hop.counterparty_node_id; let incoming_packet_shared_secret = - $htlc.prev_hop.incoming_packet_shared_secret; - let prev_outbound_scid_alias = $htlc.prev_hop.prev_outbound_scid_alias; + $htlc.mpp_part.prev_hop.incoming_packet_shared_secret; + let prev_outbound_scid_alias = + $htlc.mpp_part.prev_hop.prev_outbound_scid_alias; failed_forwards.push(( HTLCSource::PreviousHopData(HTLCPreviousHopData { prev_outbound_scid_alias, - user_channel_id: $htlc.prev_hop.user_channel_id, + user_channel_id: $htlc.mpp_part.prev_hop.user_channel_id, counterparty_node_id, channel_id: prev_channel_id, outpoint: prev_funding_outpoint, - htlc_id: $htlc.prev_hop.htlc_id, + htlc_id: $htlc.mpp_part.prev_hop.htlc_id, incoming_packet_shared_secret, phantom_shared_secret, trampoline_shared_secret, @@ -8357,7 +8403,8 @@ impl< continue 'next_forwardable_htlc; }; } - let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret; + let phantom_shared_secret = + claimable_htlc.mpp_part.prev_hop.phantom_shared_secret; let mut receiver_node_id = self.our_network_pubkey; if phantom_shared_secret.is_some() { receiver_node_id = self @@ -8396,11 +8443,11 @@ impl< fail_htlc!(claimable_htlc, payment_hash); } let mut total_intended_recvd_value = - claimable_htlc.sender_intended_value; - let mut earliest_expiry = claimable_htlc.cltv_expiry; + claimable_htlc.mpp_part.sender_intended_value; + let mut earliest_expiry = claimable_htlc.mpp_part.cltv_expiry; for htlc in claimable_payment.htlcs.iter() { - total_intended_recvd_value += htlc.sender_intended_value; - earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry); + total_intended_recvd_value += htlc.mpp_part.sender_intended_value; + earliest_expiry = cmp::min(earliest_expiry, htlc.mpp_part.cltv_expiry); if total_intended_recvd_value >= msgs::MAX_VALUE_MSAT { break; } } let total_mpp_value = @@ -8409,7 +8456,7 @@ impl< // match exactly the condition used in `timer_tick_occurred` if total_intended_recvd_value >= msgs::MAX_VALUE_MSAT { fail_htlc!(claimable_htlc, payment_hash); - } else if total_intended_recvd_value - claimable_htlc.sender_intended_value >= total_mpp_value { + } else if total_intended_recvd_value - claimable_htlc.mpp_part.sender_intended_value >= total_mpp_value { log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable", &payment_hash); fail_htlc!(claimable_htlc, payment_hash); @@ -8419,9 +8466,9 @@ impl< } claimable_payment.htlcs.push(claimable_htlc); let amount_msat = - claimable_payment.htlcs.iter().map(|htlc| htlc.value).sum(); + claimable_payment.htlcs.iter().map(|htlc| htlc.mpp_part.value).sum(); claimable_payment.htlcs.iter_mut() - .for_each(|htlc| htlc.total_value_received = Some(amount_msat)); + .for_each(|htlc| htlc.mpp_part.total_value_received = Some(amount_msat)); let counterparty_skimmed_fee_msat = claimable_payment.htlcs.iter() .map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum(); debug_assert!(total_intended_recvd_value.saturating_sub(amount_msat) @@ -8888,18 +8935,18 @@ impl< // This condition determining whether the MPP is complete here must match // exactly the condition used in `process_pending_htlc_forwards`. let total_intended_recvd_value = - payment.htlcs.iter().map(|h| h.sender_intended_value).sum(); + payment.htlcs.iter().map(|h| h.mpp_part.sender_intended_value).sum(); let total_mpp_value = payment.onion_fields.total_mpp_amount_msat; if total_mpp_value <= total_intended_recvd_value { return true; } else if payment.htlcs.iter_mut().any(|htlc| { - htlc.timer_ticks += 1; - return htlc.timer_ticks >= MPP_TIMEOUT_TICKS; + htlc.mpp_part.timer_ticks += 1; + return htlc.mpp_part.timer_ticks >= MPP_TIMEOUT_TICKS; }) { let htlcs = payment .htlcs .drain(..) - .map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)); + .map(|htlc: ClaimableHTLC| (htlc.mpp_part.prev_hop, *payment_hash)); timed_out_mpp_htlcs.extend(htlcs); return false; } @@ -8988,7 +9035,7 @@ impl< if let Some(payment) = removed_source { for htlc in payment.htlcs { let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc); - let source = HTLCSource::PreviousHopData(htlc.prev_hop); + let source = HTLCSource::PreviousHopData(htlc.mpp_part.prev_hop); let receiver = HTLCHandlingFailureType::Receive { payment_hash: *payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver, None); } @@ -9007,7 +9054,7 @@ impl< HTLCFailReason::from_failure_code(failure_code.into()) }, FailureCode::IncorrectOrUnknownPaymentDetails => { - let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); + let mut htlc_msat_height_data = htlc.mpp_part.value.to_be_bytes().to_vec(); htlc_msat_height_data .extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes()); HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data) @@ -9342,7 +9389,7 @@ impl< FailureCode::InvalidOnionPayload(None), &htlc, ); - let source = HTLCSource::PreviousHopData(htlc.prev_hop); + let source = HTLCSource::PreviousHopData(htlc.mpp_part.prev_hop); let receiver = HTLCHandlingFailureType::Receive { payment_hash }; self.fail_htlc_backwards_internal( &source, @@ -9368,14 +9415,16 @@ impl< let mut errs = Vec::new(); let per_peer_state = self.per_peer_state.read().unwrap(); for htlc in sources.iter() { - if expected_amt_msat.is_some() && expected_amt_msat != htlc.total_value_received { + if expected_amt_msat.is_some() + && expected_amt_msat != htlc.mpp_part.total_value_received + { log_error!(self.logger, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!"); debug_assert!(false); valid_mpp = false; break; } - expected_amt_msat = htlc.total_value_received; - claimable_amt_msat += htlc.value; + expected_amt_msat = htlc.mpp_part.total_value_received; + claimable_amt_msat += htlc.mpp_part.value; } mem::drop(per_peer_state); if sources.is_empty() || expected_amt_msat.is_none() { @@ -9396,12 +9445,12 @@ impl< let mpp_parts: Vec<_> = sources .iter() .filter_map(|htlc| { - if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { + if let Some(cp_id) = htlc.mpp_part.prev_hop.counterparty_node_id { Some(MPPClaimHTLCSource { counterparty_node_id: cp_id, - funding_txo: htlc.prev_hop.outpoint, - channel_id: htlc.prev_hop.channel_id, - htlc_id: htlc.prev_hop.htlc_id, + funding_txo: htlc.mpp_part.prev_hop.outpoint, + channel_id: htlc.mpp_part.prev_hop.channel_id, + htlc_id: htlc.mpp_part.prev_hop.htlc_id, }) } else { None @@ -9427,11 +9476,11 @@ impl< for htlc in sources { let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().map(|pending_mpp_claim| { - let counterparty_id = htlc.prev_hop.counterparty_node_id; + let counterparty_id = htlc.mpp_part.prev_hop.counterparty_node_id; let counterparty_id = counterparty_id .expect("Prior to upgrading to LDK 0.1, all pending HTLCs forwarded by LDK 0.0.123 or before must be resolved. It appears at least one claimable payment was not resolved. Please downgrade to LDK 0.0.125 and resolve the HTLC by claiming the payment prior to upgrading."); let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim)); - (counterparty_id, htlc.prev_hop.channel_id, claim_ptr) + (counterparty_id, htlc.mpp_part.prev_hop.channel_id, claim_ptr) }); let raa_blocker = pending_mpp_claim_ptr_opt.as_ref().map(|pending_claim| { RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { @@ -9443,7 +9492,7 @@ impl< // non-zero value will not make a difference in the penalty that may be applied by the sender. If there // is a phantom hop, we need to double-process. let attribution_data = - if let Some(phantom_secret) = htlc.prev_hop.phantom_shared_secret { + if let Some(phantom_secret) = htlc.mpp_part.prev_hop.phantom_shared_secret { let attribution_data = process_fulfill_attribution_data(None, &phantom_secret, 0); Some(attribution_data) @@ -9453,12 +9502,12 @@ impl< let attribution_data = process_fulfill_attribution_data( attribution_data, - &htlc.prev_hop.incoming_packet_shared_secret, + &htlc.mpp_part.prev_hop.incoming_packet_shared_secret, 0, ); self.claim_funds_from_hop( - htlc.prev_hop, + &htlc.mpp_part.prev_hop, payment_preimage, payment_info.clone(), Some(attribution_data), @@ -9479,9 +9528,11 @@ impl< } } else { for htlc in sources { - let err_data = - invalid_payment_err_data(htlc.value, self.best_block.read().unwrap().height); - let source = HTLCSource::PreviousHopData(htlc.prev_hop); + let err_data = invalid_payment_err_data( + htlc.mpp_part.value, + self.best_block.read().unwrap().height, + ); + let source = HTLCSource::PreviousHopData(htlc.mpp_part.prev_hop); let reason = HTLCFailReason::reason( LocalHTLCFailureReason::IncorrectPaymentDetails, err_data, @@ -9529,7 +9580,7 @@ impl< #[cfg(test)] let claiming_chan_funding_outpoint = hop_data.outpoint; self.claim_funds_from_hop( - hop_data, + &hop_data, payment_preimage, None, Some(attribution_data), @@ -9628,7 +9679,7 @@ impl< bool, ) -> (Option, Option), >( - &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, + &self, prev_hop: &HTLCPreviousHopData, payment_preimage: PaymentPreimage, payment_info: Option, attribution_data: Option, completion_action: ComplFunc, ) { @@ -16214,14 +16265,14 @@ impl< // our commitment transaction confirmed before the HTLC expires, plus the // number of blocks we generally consider it to take to do a commitment update, // just give up on it and fail the HTLC. - if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { + if height >= htlc.mpp_part.cltv_expiry - HTLC_FAIL_BACK_BUFFER { let reason = LocalHTLCFailureReason::PaymentClaimBuffer; timed_out_htlcs.push(( - HTLCSource::PreviousHopData(htlc.prev_hop.clone()), + HTLCSource::PreviousHopData(htlc.mpp_part.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::reason( reason, - invalid_payment_err_data(htlc.value, height), + invalid_payment_err_data(htlc.mpp_part.value, height), ), HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone(), @@ -17703,13 +17754,13 @@ fn write_claimable_htlc( OnionPayload::Spontaneous(preimage) => (None, Some(preimage)), }; write_tlv_fields!(writer, { - (0, htlc.prev_hop, required), + (0, htlc.mpp_part.prev_hop, required), (1, total_mpp_value_msat, required), - (2, htlc.value, required), - (3, htlc.sender_intended_value, required), + (2, htlc.mpp_part.value, required), + (3, htlc.mpp_part.sender_intended_value, required), (4, payment_data, option), - (5, htlc.total_value_received, option), - (6, htlc.cltv_expiry, required), + (5, htlc.mpp_part.total_value_received, option), + (6, htlc.mpp_part.cltv_expiry, required), (8, keysend_preimage, option), (10, htlc.counterparty_skimmed_fee_msat, option), }); @@ -17742,13 +17793,15 @@ impl Readable for (ClaimableHTLC, u64) { None => OnionPayload::Invoice { _legacy_hop_data: payment_data }, }; Ok((ClaimableHTLC { - prev_hop: prev_hop.0.unwrap(), - timer_ticks: 0, - value, - sender_intended_value: sender_intended_value.unwrap_or(value), - total_value_received, + mpp_part: MppPart { + prev_hop: prev_hop.0.unwrap(), + timer_ticks: 0, + value, + sender_intended_value: sender_intended_value.unwrap_or(value), + total_value_received, + cltv_expiry: cltv_expiry.0.unwrap(), + }, onion_payload, - cltv_expiry: cltv_expiry.0.unwrap(), counterparty_skimmed_fee_msat, }, total_msat.0.expect("required field"))) } @@ -19866,10 +19919,13 @@ impl< // panic if we attempted to claim them at this point. for (payment_hash, payment) in claimable_payments.iter() { for htlc in payment.htlcs.iter() { - if htlc.prev_hop.counterparty_node_id.is_some() { + if htlc.mpp_part.prev_hop.counterparty_node_id.is_some() { continue; } - if short_to_chan_info.get(&htlc.prev_hop.prev_outbound_scid_alias).is_some() { + if short_to_chan_info + .get(&htlc.mpp_part.prev_hop.prev_outbound_scid_alias) + .is_some() + { log_error!(args.logger, "We do not have the required information to claim a pending payment with payment hash {} reliably.\ As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\ @@ -20057,10 +20113,10 @@ impl< // See above comment on `failed_htlcs`. for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { - for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + for htlc in htlcs.iter() { dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs, - prev_hop_data, + &htlc.mpp_part.prev_hop, "HTLC was already decoded and marked as a claimable payment", &args.logger, ); @@ -20367,7 +20423,8 @@ impl< log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash); let mut claimable_amt_msat = 0; let mut receiver_node_id = Some(our_network_pubkey); - let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret; + let phantom_shared_secret = + payment.htlcs[0].mpp_part.prev_hop.phantom_shared_secret; if phantom_shared_secret.is_some() { let phantom_pubkey = channel_manager .node_signer @@ -20376,7 +20433,7 @@ impl< receiver_node_id = Some(phantom_pubkey) } for claimable_htlc in &payment.htlcs { - claimable_amt_msat += claimable_htlc.value; + claimable_amt_msat += claimable_htlc.mpp_part.value; // Add a holding-cell claim of the payment to the Channel, which should be // applied ~immediately on peer reconnection. Because it won't generate a @@ -20393,7 +20450,7 @@ impl< // this channel as well. On the flip side, there's no harm in restarting // without the new monitor persisted - we'll end up right back here on // restart. - let previous_channel_id = claimable_htlc.prev_hop.channel_id; + let previous_channel_id = claimable_htlc.mpp_part.prev_hop.channel_id; let peer_node_id = monitor.get_counterparty_node_id(); { let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap(); @@ -20411,14 +20468,15 @@ impl< ); channel .claim_htlc_while_disconnected_dropping_mon_update_legacy( - claimable_htlc.prev_hop.htlc_id, + claimable_htlc.mpp_part.prev_hop.htlc_id, payment_preimage, &&logger, ); } } - if let Some(previous_hop_monitor) = - args.channel_monitors.get(&claimable_htlc.prev_hop.channel_id) + if let Some(previous_hop_monitor) = args + .channel_monitors + .get(&claimable_htlc.mpp_part.prev_hop.channel_id) { // Note that this is unsafe as we no longer require the // `ChannelMonitor`s to be re-persisted prior to this From 664abb19cf0c11e5fa867c9159ee72d996d19d31 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 30 Mar 2026 11:06:50 -0400 Subject: [PATCH 04/12] ln/refactor: move mpp timeout into helper function We'll use this shared logic when we need to timeout trampoline HTLCs. Note that there's a slight behavior change in this commit. Previously, we'd do a first pass to check out total received value and return early if we'd reached it without applying a MPP tick to any HTLC. Now, we'll apply the MPP tick as we accumulate our total value received. This does not make any difference, because we never MPP-timeout fully accumulated MPP payments so it doesn't matter if we've applied the tick when we've reached our full amount. --- lightning/src/ln/channelmanager.rs | 77 ++++++++++++++++++++---------- 1 file changed, 53 insertions(+), 24 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ae6db2ebbb7..ca138b944de 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -557,6 +557,12 @@ impl MppPart { total_value_received: None, } } + + // Increments timer ticks and returns a boolean indicating whether HTLC is timed out. + fn mpp_timer_tick(&mut self) -> bool { + self.timer_ticks += 1; + self.timer_ticks >= MPP_TIMEOUT_TICKS + } } impl PartialOrd for MppPart { @@ -1292,6 +1298,30 @@ impl ClaimablePayment { } } +/// Increments MPP timeout tick for all HTLCs and returns a boolean indicating whether the HTLC +/// set has hit its MPP timeout. Will return false if the set has reached the sender's intended +/// total, as the MPP has completed in this case. +fn check_mpp_timeout<'a>( + htlcs: impl Iterator, onion_fields: &RecipientOnionFields, +) -> bool { + // This condition determining whether the MPP is complete here must match exactly the condition + // used in `process_pending_htlc_forwards`. + let total_mpp_value = onion_fields.total_mpp_amount_msat; + let mut total_intended_recvd_value = 0; + let mut timed_out = false; + for htlc in htlcs { + total_intended_recvd_value += htlc.sender_intended_value; + if htlc.mpp_timer_tick() { + timed_out = true; + } + } + if total_intended_recvd_value >= total_mpp_value { + return false; + } + + timed_out +} + /// Represent the channel funding transaction type. enum FundingType { /// This variant is useful when we want LDK to validate the funding transaction and @@ -8925,42 +8955,41 @@ impl< self.claimable_payments.lock().unwrap().claimable_payments.retain( |payment_hash, payment| { if payment.htlcs.is_empty() { - // This should be unreachable debug_assert!(false); return false; } if let OnionPayload::Invoice { .. } = payment.htlcs[0].onion_payload { - // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat). - // In this case we're not going to handle any timeouts of the parts here. - // This condition determining whether the MPP is complete here must match - // exactly the condition used in `process_pending_htlc_forwards`. - let total_intended_recvd_value = - payment.htlcs.iter().map(|h| h.mpp_part.sender_intended_value).sum(); - let total_mpp_value = payment.onion_fields.total_mpp_amount_msat; - if total_mpp_value <= total_intended_recvd_value { - return true; - } else if payment.htlcs.iter_mut().any(|htlc| { - htlc.mpp_part.timer_ticks += 1; - return htlc.mpp_part.timer_ticks >= MPP_TIMEOUT_TICKS; - }) { - let htlcs = payment - .htlcs - .drain(..) - .map(|htlc: ClaimableHTLC| (htlc.mpp_part.prev_hop, *payment_hash)); - timed_out_mpp_htlcs.extend(htlcs); - return false; + let mpp_timeout = check_mpp_timeout( + payment.htlcs.iter_mut().map(|htlc| &mut htlc.mpp_part), + &payment.onion_fields, + ); + if mpp_timeout { + timed_out_mpp_htlcs.extend(payment.htlcs.drain(..).map(|h| { + ( + HTLCSource::PreviousHopData(h.mpp_part.prev_hop), + *payment_hash, + HTLCHandlingFailureType::Receive { + payment_hash: *payment_hash, + }, + ) + })); } + return !mpp_timeout; } true }, ); - for htlc_source in timed_out_mpp_htlcs.drain(..) { - let source = HTLCSource::PreviousHopData(htlc_source.0.clone()); + for (htlc_source, payment_hash, failure_type) in timed_out_mpp_htlcs.drain(..) { let failure_reason = LocalHTLCFailureReason::MPPTimeout; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCHandlingFailureType::Receive { payment_hash: htlc_source.1 }; - self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver, None); + self.fail_htlc_backwards_internal( + &htlc_source, + &payment_hash, + &reason, + failure_type, + None, + ); } for (err, counterparty_node_id) in handle_errors { From d025de88508a7072aa124228fb87da0fbda4b293 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 30 Mar 2026 11:15:25 -0400 Subject: [PATCH 05/12] ln/refactor: move on chain timeout check into claimable htlc We'll re-use this to check trampoline MPP timeout in future commits. --- lightning/src/ln/channelmanager.rs | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ca138b944de..195509fc8cc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -563,6 +563,12 @@ impl MppPart { self.timer_ticks += 1; self.timer_ticks >= MPP_TIMEOUT_TICKS } + + /// Returns a boolean indicating whether the HTLC has timed out on chain, accounting for a buffer + /// that gives us time to resolve it. + fn check_onchain_timeout(&self, height: u32, buffer: u32) -> bool { + height >= self.cltv_expiry - buffer + } } impl PartialOrd for MppPart { @@ -16287,14 +16293,16 @@ impl< } if let Some(height) = height_opt { + // If height is approaching the number of blocks we think it takes us to get our + // commitment transaction confirmed before the HTLC expires, plus the number of blocks + // we generally consider it to take to do a commitment update, just give up on it and + // fail the HTLC. self.claimable_payments.lock().unwrap().claimable_payments.retain( |payment_hash, payment| { payment.htlcs.retain(|htlc| { - // If height is approaching the number of blocks we think it takes us to get - // our commitment transaction confirmed before the HTLC expires, plus the - // number of blocks we generally consider it to take to do a commitment update, - // just give up on it and fail the HTLC. - if height >= htlc.mpp_part.cltv_expiry - HTLC_FAIL_BACK_BUFFER { + let htlc_timed_out = + htlc.mpp_part.check_onchain_timeout(height, HTLC_FAIL_BACK_BUFFER); + if htlc_timed_out { let reason = LocalHTLCFailureReason::PaymentClaimBuffer; timed_out_htlcs.push(( HTLCSource::PreviousHopData(htlc.mpp_part.prev_hop.clone()), @@ -16307,10 +16315,8 @@ impl< payment_hash: payment_hash.clone(), }, )); - false - } else { - true } + !htlc_timed_out }); !payment.htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. }, From 04c556552905077843c4ecb005062bac32298d69 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 30 Mar 2026 16:21:18 -0400 Subject: [PATCH 06/12] ln/refactor: remove claimable htlc from fail_htlc macro In the commit that follows we're going to need to take ownership of our htlc before this macro is used, so we pull out the information we need in advance. --- lightning/src/ln/channelmanager.rs | 67 ++++++++++++++---------------- 1 file changed, 32 insertions(+), 35 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 195509fc8cc..1bfcaeaa96b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -8389,12 +8389,26 @@ impl< panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); }, }; + let htlc_value = incoming_amt_msat.unwrap_or(outgoing_amt_msat); + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + prev_outbound_scid_alias: prev_hop.prev_outbound_scid_alias, + user_channel_id: prev_hop.user_channel_id, + counterparty_node_id: prev_hop.counterparty_node_id, + channel_id: prev_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_hop.htlc_id, + incoming_packet_shared_secret: prev_hop.incoming_packet_shared_secret, + phantom_shared_secret, + trampoline_shared_secret, + blinded_failure, + cltv_expiry: Some(cltv_expiry), + }); let claimable_htlc = ClaimableHTLC::new( prev_hop, // We differentiate the received value from the sender intended value // if possible so that we don't prematurely mark MPP payments complete // if routing nodes overpay - incoming_amt_msat.unwrap_or(outgoing_amt_msat), + htlc_value, outgoing_amt_msat, cltv_expiry, onion_payload, @@ -8404,31 +8418,14 @@ impl< let mut committed_to_claimable = false; macro_rules! fail_htlc { - ($htlc: expr, $payment_hash: expr) => { + ($payment_hash: expr) => { debug_assert!(!committed_to_claimable); let err_data = invalid_payment_err_data( - $htlc.mpp_part.value, + htlc_value, self.best_block.read().unwrap().height, ); - let counterparty_node_id = $htlc.mpp_part.prev_hop.counterparty_node_id; - let incoming_packet_shared_secret = - $htlc.mpp_part.prev_hop.incoming_packet_shared_secret; - let prev_outbound_scid_alias = - $htlc.mpp_part.prev_hop.prev_outbound_scid_alias; failed_forwards.push(( - HTLCSource::PreviousHopData(HTLCPreviousHopData { - prev_outbound_scid_alias, - user_channel_id: $htlc.mpp_part.prev_hop.user_channel_id, - counterparty_node_id, - channel_id: prev_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: $htlc.mpp_part.prev_hop.htlc_id, - incoming_packet_shared_secret, - phantom_shared_secret, - trampoline_shared_secret, - blinded_failure, - cltv_expiry: Some(cltv_expiry), - }), + htlc_source, payment_hash, HTLCFailReason::reason( LocalHTLCFailureReason::IncorrectPaymentDetails, @@ -8455,7 +8452,7 @@ impl< let is_keysend = $purpose.is_keysend(); let mut claimable_payments = self.claimable_payments.lock().unwrap(); if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); } let ref mut claimable_payment = claimable_payments.claimable_payments .entry(payment_hash) @@ -8471,12 +8468,12 @@ impl< if $purpose != claimable_payment.purpose { let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" }; log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), &payment_hash, log_keysend(!is_keysend)); - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); } let onions_compatible = claimable_payment.onion_fields.check_merge(&mut onion_fields); if onions_compatible.is_err() { - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); } let mut total_intended_recvd_value = claimable_htlc.mpp_part.sender_intended_value; @@ -8491,11 +8488,11 @@ impl< // The condition determining whether an MPP is complete must // match exactly the condition used in `timer_tick_occurred` if total_intended_recvd_value >= msgs::MAX_VALUE_MSAT { - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); } else if total_intended_recvd_value - claimable_htlc.mpp_part.sender_intended_value >= total_mpp_value { log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable", &payment_hash); - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); } else if total_intended_recvd_value >= total_mpp_value { #[allow(unused_assignments)] { committed_to_claimable = true; @@ -8556,7 +8553,7 @@ impl< Ok(result) => result, Err(()) => { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", &payment_hash); - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); }, }; if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta { @@ -8566,12 +8563,12 @@ impl< if (cltv_expiry as u64) < expected_min_expiry_height { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})", &payment_hash, cltv_expiry, expected_min_expiry_height); - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); } } payment_preimage } else { - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); } } else { None @@ -8587,7 +8584,7 @@ impl< let purpose = match from_parts_res { Ok(purpose) => purpose, Err(()) => { - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); }, }; check_total_value!(purpose); @@ -8604,7 +8601,7 @@ impl< false, "We checked that payment_data is Some above" ); - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); }, }; @@ -8623,13 +8620,13 @@ impl< verified_invreq.amount_msats() { if payment_data.total_msat < invreq_amt_msat { - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); } } verified_invreq }, None => { - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); }, }; let payment_purpose_context = @@ -8645,12 +8642,12 @@ impl< match from_parts_res { Ok(purpose) => purpose, Err(()) => { - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); }, } } else if payment_context.is_some() { log_trace!(self.logger, "Failing new HTLC with payment_hash {}: received a keysend payment to a non-async payments context {:#?}", payment_hash, payment_context); - fail_htlc!(claimable_htlc, payment_hash); + fail_htlc!(payment_hash); } else { events::PaymentPurpose::SpontaneousPayment(keysend_preimage) }; From cc5ea6e716cc00357f0dcf04f381a004478d2bc3 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 30 Mar 2026 16:19:17 -0400 Subject: [PATCH 07/12] ln/refactor: move checks on incoming mpp accumulation into method We're going to use the same logic for trampoline and for incoming MPP payments, so we pull this out into a separate function. --- lightning/src/ln/channelmanager.rs | 276 ++++++++++++++++++----------- 1 file changed, 172 insertions(+), 104 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1bfcaeaa96b..2d3ee03c18a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1302,6 +1302,11 @@ impl ClaimablePayment { .map(|htlc| (htlc.mpp_part.prev_hop.channel_id, htlc.mpp_part.prev_hop.user_channel_id)) .collect() } + + /// Returns the total counterparty skimmed fee across all HTLCs. + fn total_counterparty_skimmed_msat(&self) -> u64 { + self.htlcs.iter().map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum() + } } /// Increments MPP timeout tick for all HTLCs and returns a boolean indicating whether the HTLC @@ -8287,6 +8292,140 @@ impl< } } + // Checks whether an incoming htlc can be added to our [`claimable_payments`], and handles + // MPP accumulation. On successful add, returns Ok() with a boolean indicating whether all + // MPP parts have arrived. Callers *MUST NOT* fail htlcs if Ok(..) is returned. + fn check_claimable_incoming_htlc( + &self, claimable_payment: &mut ClaimablePayment, claimable_htlc: ClaimableHTLC, + mut onion_fields: RecipientOnionFields, payment_hash: PaymentHash, + ) -> Result { + let onions_compatible = claimable_payment.onion_fields.check_merge(&mut onion_fields); + if onions_compatible.is_err() { + return Err(()); + } + let mut total_intended_recvd_value = claimable_htlc.mpp_part.sender_intended_value; + for htlc in claimable_payment.htlcs.iter() { + total_intended_recvd_value += htlc.mpp_part.sender_intended_value; + if total_intended_recvd_value >= msgs::MAX_VALUE_MSAT { + break; + } + } + let total_mpp_value = claimable_payment.onion_fields.total_mpp_amount_msat; + // The condition determining whether an MPP is complete must + // match exactly the condition used in `timer_tick_occurred` + if total_intended_recvd_value >= msgs::MAX_VALUE_MSAT { + return Err(()); + } else if total_intended_recvd_value - claimable_htlc.mpp_part.sender_intended_value + >= total_mpp_value + { + log_trace!( + self.logger, + "Failing HTLC with payment_hash {} as payment is already claimable", + &payment_hash + ); + return Err(()); + } else if total_intended_recvd_value >= total_mpp_value { + claimable_payment.htlcs.push(claimable_htlc); + let amount_msat = claimable_payment.htlcs.iter().map(|htlc| htlc.mpp_part.value).sum(); + claimable_payment + .htlcs + .iter_mut() + .for_each(|htlc| htlc.mpp_part.total_value_received = Some(amount_msat)); + let counterparty_skimmed_fee_msat = claimable_payment.total_counterparty_skimmed_msat(); + debug_assert!( + total_intended_recvd_value.saturating_sub(amount_msat) + <= counterparty_skimmed_fee_msat + ); + claimable_payment.htlcs.sort(); + Ok(true) + } else { + // Nothing to do - we haven't reached the total + // payment value yet, wait until we receive more + // MPP parts. + claimable_payment.htlcs.push(claimable_htlc); + Ok(false) + } + } + + // Handles the addition of a HTLC associated with a payment we're receiving. Err(bool) indicates + // whether we have failed after committing to the HTLC - callers should assert that this + // value is false. + fn handle_claimable_htlc( + &self, purpose: events::PaymentPurpose, claimable_htlc: ClaimableHTLC, + onion_fields: RecipientOnionFields, payment_hash: PaymentHash, receiver_node_id: PublicKey, + new_events: &mut VecDeque<(Event, Option)>, + ) -> Result<(), bool> { + let mut committed_to_claimable = false; + + let mut claimable_payments = self.claimable_payments.lock().unwrap(); + if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { + return Err(committed_to_claimable); + } + + let ref mut claimable_payment = claimable_payments + .claimable_payments + .entry(payment_hash) + // Note that if we insert here we MUST NOT fail_htlc!() + .or_insert_with(|| { + committed_to_claimable = true; + ClaimablePayment { + purpose: purpose.clone(), + htlcs: Vec::new(), + onion_fields: onion_fields.clone(), + } + }); + + let is_keysend = purpose.is_keysend(); + if purpose != claimable_payment.purpose { + let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" }; + log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), &payment_hash, log_keysend(!is_keysend)); + return Err(committed_to_claimable); + } + + let htlc_expiry = claimable_htlc.mpp_part.cltv_expiry; + if self + .check_claimable_incoming_htlc( + claimable_payment, + claimable_htlc, + onion_fields, + payment_hash, + ) + .map_err(|_| committed_to_claimable)? + { + let claim_deadline = Some( + match claimable_payment.htlcs.iter().map(|h| h.mpp_part.cltv_expiry).min() { + Some(claim_deadline) => claim_deadline, + None => { + debug_assert!(false, "no htlcs in completed claimable_payment"); + htlc_expiry + }, + } - HTLC_FAIL_BACK_BUFFER, + ); + new_events.push_back(( + events::Event::PaymentClaimable { + receiver_node_id: Some(receiver_node_id), + payment_hash, + purpose, + amount_msat: claimable_payment + .htlcs + .iter() + .map(|htlc| htlc.mpp_part.value) + .sum(), + counterparty_skimmed_fee_msat: claimable_payment + .total_counterparty_skimmed_msat(), + receiving_channel_ids: claimable_payment.receiving_channel_ids(), + claim_deadline, + onion_fields: Some(claimable_payment.onion_fields.clone()), + payment_id: Some( + claimable_payment.inbound_payment_id(&self.inbound_payment_id_secret), + ), + }, + None, + )); + } + Ok(()) + } + fn process_receive_htlcs( &self, pending_forwards: &mut Vec, new_events: &mut VecDeque<(Event, Option)>, @@ -8317,7 +8456,7 @@ impl< payment_data, payment_context, phantom_shared_secret, - mut onion_fields, + onion_fields, has_recipient_created_payment_secret, invoice_request_opt, trampoline_shared_secret, @@ -8415,11 +8554,9 @@ impl< skimmed_fee_msat, ); - let mut committed_to_claimable = false; - macro_rules! fail_htlc { - ($payment_hash: expr) => { - debug_assert!(!committed_to_claimable); + ($payment_hash: expr, $committed_to_claimable: expr) => { + debug_assert!(!$committed_to_claimable); let err_data = invalid_payment_err_data( htlc_value, self.best_block.read().unwrap().height, @@ -8446,94 +8583,6 @@ impl< .expect("Failed to get node_id for phantom node recipient"); } - macro_rules! check_total_value { - ($purpose: expr) => {{ - let mut payment_claimable_generated = false; - let is_keysend = $purpose.is_keysend(); - let mut claimable_payments = self.claimable_payments.lock().unwrap(); - if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { - fail_htlc!(payment_hash); - } - let ref mut claimable_payment = claimable_payments.claimable_payments - .entry(payment_hash) - // Note that if we insert here we MUST NOT fail_htlc!() - .or_insert_with(|| { - committed_to_claimable = true; - ClaimablePayment { - purpose: $purpose.clone(), - htlcs: Vec::new(), - onion_fields: onion_fields.clone(), - } - }); - if $purpose != claimable_payment.purpose { - let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" }; - log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), &payment_hash, log_keysend(!is_keysend)); - fail_htlc!(payment_hash); - } - let onions_compatible = - claimable_payment.onion_fields.check_merge(&mut onion_fields); - if onions_compatible.is_err() { - fail_htlc!(payment_hash); - } - let mut total_intended_recvd_value = - claimable_htlc.mpp_part.sender_intended_value; - let mut earliest_expiry = claimable_htlc.mpp_part.cltv_expiry; - for htlc in claimable_payment.htlcs.iter() { - total_intended_recvd_value += htlc.mpp_part.sender_intended_value; - earliest_expiry = cmp::min(earliest_expiry, htlc.mpp_part.cltv_expiry); - if total_intended_recvd_value >= msgs::MAX_VALUE_MSAT { break; } - } - let total_mpp_value = - claimable_payment.onion_fields.total_mpp_amount_msat; - // The condition determining whether an MPP is complete must - // match exactly the condition used in `timer_tick_occurred` - if total_intended_recvd_value >= msgs::MAX_VALUE_MSAT { - fail_htlc!(payment_hash); - } else if total_intended_recvd_value - claimable_htlc.mpp_part.sender_intended_value >= total_mpp_value { - log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable", - &payment_hash); - fail_htlc!(payment_hash); - } else if total_intended_recvd_value >= total_mpp_value { - #[allow(unused_assignments)] { - committed_to_claimable = true; - } - claimable_payment.htlcs.push(claimable_htlc); - let amount_msat = - claimable_payment.htlcs.iter().map(|htlc| htlc.mpp_part.value).sum(); - claimable_payment.htlcs.iter_mut() - .for_each(|htlc| htlc.mpp_part.total_value_received = Some(amount_msat)); - let counterparty_skimmed_fee_msat = claimable_payment.htlcs.iter() - .map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum(); - debug_assert!(total_intended_recvd_value.saturating_sub(amount_msat) - <= counterparty_skimmed_fee_msat); - claimable_payment.htlcs.sort(); - let payment_id = - claimable_payment.inbound_payment_id(&self.inbound_payment_id_secret); - new_events.push_back((events::Event::PaymentClaimable { - receiver_node_id: Some(receiver_node_id), - payment_hash, - purpose: $purpose, - amount_msat, - counterparty_skimmed_fee_msat, - receiving_channel_ids: claimable_payment.receiving_channel_ids(), - claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER), - onion_fields: Some(claimable_payment.onion_fields.clone()), - payment_id: Some(payment_id), - }, None)); - payment_claimable_generated = true; - } else { - // Nothing to do - we haven't reached the total - // payment value yet, wait until we receive more - // MPP parts. - claimable_payment.htlcs.push(claimable_htlc); - #[allow(unused_assignments)] { - committed_to_claimable = true; - } - } - payment_claimable_generated - }} - } - // Check that the payment hash and secret are known. Note that we // MUST take care to handle the "unknown payment hash" and // "incorrect payment secret" cases here identically or we'd expose @@ -8553,7 +8602,7 @@ impl< Ok(result) => result, Err(()) => { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", &payment_hash); - fail_htlc!(payment_hash); + fail_htlc!(payment_hash, false); }, }; if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta { @@ -8563,12 +8612,12 @@ impl< if (cltv_expiry as u64) < expected_min_expiry_height { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})", &payment_hash, cltv_expiry, expected_min_expiry_height); - fail_htlc!(payment_hash); + fail_htlc!(payment_hash, false); } } payment_preimage } else { - fail_htlc!(payment_hash); + fail_htlc!(payment_hash, false); } } else { None @@ -8584,10 +8633,20 @@ impl< let purpose = match from_parts_res { Ok(purpose) => purpose, Err(()) => { - fail_htlc!(payment_hash); + fail_htlc!(payment_hash, false); }, }; - check_total_value!(purpose); + + if let Err(committed_to_claimable) = self.handle_claimable_htlc( + purpose, + claimable_htlc, + onion_fields, + payment_hash, + receiver_node_id, + new_events, + ) { + fail_htlc!(payment_hash, committed_to_claimable); + } }, OnionPayload::Spontaneous(keysend_preimage) => { let purpose = if let Some(PaymentContext::AsyncBolt12Offer( @@ -8601,7 +8660,7 @@ impl< false, "We checked that payment_data is Some above" ); - fail_htlc!(payment_hash); + fail_htlc!(payment_hash, false); }, }; @@ -8620,13 +8679,13 @@ impl< verified_invreq.amount_msats() { if payment_data.total_msat < invreq_amt_msat { - fail_htlc!(payment_hash); + fail_htlc!(payment_hash, false); } } verified_invreq }, None => { - fail_htlc!(payment_hash); + fail_htlc!(payment_hash, false); }, }; let payment_purpose_context = @@ -8642,16 +8701,25 @@ impl< match from_parts_res { Ok(purpose) => purpose, Err(()) => { - fail_htlc!(payment_hash); + fail_htlc!(payment_hash, false); }, } } else if payment_context.is_some() { log_trace!(self.logger, "Failing new HTLC with payment_hash {}: received a keysend payment to a non-async payments context {:#?}", payment_hash, payment_context); - fail_htlc!(payment_hash); + fail_htlc!(payment_hash, false); } else { events::PaymentPurpose::SpontaneousPayment(keysend_preimage) }; - check_total_value!(purpose); + if let Err(committed_to_claimable) = self.handle_claimable_htlc( + purpose, + claimable_htlc, + onion_fields, + payment_hash, + receiver_node_id, + new_events, + ) { + fail_htlc!(payment_hash, committed_to_claimable); + } }, } }, From 91a9066073f5415917d6347fd12b27c7ce1d13ed Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 30 Mar 2026 15:17:14 -0400 Subject: [PATCH 08/12] ln/refactor: introduce HasMppPart generic to share incoming mpp To allow re-use with trampoline payments which won't use the ClaimablePayment type, make handling generic for anything with MPP parts. --- lightning/src/ln/channelmanager.rs | 72 ++++++++++++++++++++---------- 1 file changed, 48 insertions(+), 24 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2d3ee03c18a..58936fd87cc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -588,6 +588,20 @@ impl Ord for MppPart { } } +trait HasMppPart { + fn mpp_part(&self) -> &MppPart; + fn mpp_part_mut(&mut self) -> &mut MppPart; +} + +impl HasMppPart for MppPart { + fn mpp_part(&self) -> &MppPart { + self + } + fn mpp_part_mut(&mut self) -> &mut MppPart { + self + } +} + /// Represents an incoming HTLC that can be claimed or failed by the user. #[derive(PartialEq, Eq)] struct ClaimableHTLC { @@ -610,6 +624,15 @@ impl ClaimableHTLC { } } +impl HasMppPart for ClaimableHTLC { + fn mpp_part(&self) -> &MppPart { + &self.mpp_part + } + fn mpp_part_mut(&mut self) -> &mut MppPart { + &mut self.mpp_part + } +} + impl From<&ClaimableHTLC> for events::ClaimedHTLC { fn from(val: &ClaimableHTLC) -> Self { events::ClaimedHTLC { @@ -8295,27 +8318,27 @@ impl< // Checks whether an incoming htlc can be added to our [`claimable_payments`], and handles // MPP accumulation. On successful add, returns Ok() with a boolean indicating whether all // MPP parts have arrived. Callers *MUST NOT* fail htlcs if Ok(..) is returned. - fn check_claimable_incoming_htlc( - &self, claimable_payment: &mut ClaimablePayment, claimable_htlc: ClaimableHTLC, + fn check_claimable_incoming_htlc( + &self, htlcs: &mut Vec, payment_onion_fields: &mut RecipientOnionFields, new_htlc: H, mut onion_fields: RecipientOnionFields, payment_hash: PaymentHash, ) -> Result { - let onions_compatible = claimable_payment.onion_fields.check_merge(&mut onion_fields); + let onions_compatible = payment_onion_fields.check_merge(&mut onion_fields); if onions_compatible.is_err() { return Err(()); } - let mut total_intended_recvd_value = claimable_htlc.mpp_part.sender_intended_value; - for htlc in claimable_payment.htlcs.iter() { - total_intended_recvd_value += htlc.mpp_part.sender_intended_value; + let mut total_intended_recvd_value = new_htlc.mpp_part().sender_intended_value; + for htlc in htlcs.iter() { + total_intended_recvd_value += htlc.mpp_part().sender_intended_value; if total_intended_recvd_value >= msgs::MAX_VALUE_MSAT { break; } } - let total_mpp_value = claimable_payment.onion_fields.total_mpp_amount_msat; + let total_mpp_value = payment_onion_fields.total_mpp_amount_msat; // The condition determining whether an MPP is complete must // match exactly the condition used in `timer_tick_occurred` if total_intended_recvd_value >= msgs::MAX_VALUE_MSAT { return Err(()); - } else if total_intended_recvd_value - claimable_htlc.mpp_part.sender_intended_value + } else if total_intended_recvd_value - new_htlc.mpp_part().sender_intended_value >= total_mpp_value { log_trace!( @@ -8325,24 +8348,17 @@ impl< ); return Err(()); } else if total_intended_recvd_value >= total_mpp_value { - claimable_payment.htlcs.push(claimable_htlc); - let amount_msat = claimable_payment.htlcs.iter().map(|htlc| htlc.mpp_part.value).sum(); - claimable_payment - .htlcs + htlcs.push(new_htlc); + let amount_msat = htlcs.iter().map(|htlc| htlc.mpp_part().value).sum(); + htlcs .iter_mut() - .for_each(|htlc| htlc.mpp_part.total_value_received = Some(amount_msat)); - let counterparty_skimmed_fee_msat = claimable_payment.total_counterparty_skimmed_msat(); - debug_assert!( - total_intended_recvd_value.saturating_sub(amount_msat) - <= counterparty_skimmed_fee_msat - ); - claimable_payment.htlcs.sort(); + .for_each(|htlc| htlc.mpp_part_mut().total_value_received = Some(amount_msat)); + htlcs.sort(); Ok(true) } else { - // Nothing to do - we haven't reached the total - // payment value yet, wait until we receive more - // MPP parts. - claimable_payment.htlcs.push(claimable_htlc); + // Nothing to do - we haven't reached the total payment value yet, wait until we + // receive more MPP parts. + htlcs.push(new_htlc); Ok(false) } } @@ -8385,13 +8401,21 @@ impl< let htlc_expiry = claimable_htlc.mpp_part.cltv_expiry; if self .check_claimable_incoming_htlc( - claimable_payment, + &mut claimable_payment.htlcs, + &mut claimable_payment.onion_fields, claimable_htlc, onion_fields, payment_hash, ) .map_err(|_| committed_to_claimable)? { + let counterparty_skimmed_fee_msat = claimable_payment.total_counterparty_skimmed_msat(); + let amount_msat: u64 = claimable_payment.htlcs.iter().map(|h| h.mpp_part.value).sum(); + let total_sender_intended: u64 = + claimable_payment.htlcs.iter().map(|h| h.mpp_part.sender_intended_value).sum(); + debug_assert!( + total_sender_intended.saturating_sub(amount_msat) <= counterparty_skimmed_fee_msat + ); let claim_deadline = Some( match claimable_payment.htlcs.iter().map(|h| h.mpp_part.cltv_expiry).min() { Some(claim_deadline) => claim_deadline, From fc95fde0d71fc93499f65039e0cacd95cc4fff4f Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 12 Feb 2026 13:31:49 +0200 Subject: [PATCH 09/12] ln/refactor: pass minimum delta into check_incoming_htlc_cltv For trampoline payments, we don't want to enforce a minimum cltv delta between our incoming and outer onion outgoing CLTV because we'll calculate our delta from the inner trampoline onion's value. However, we still want to check that we get at least the CLTV that the sending node intended for us and we still want to validate our incoming value. Refactor to allow setting a zero delta, for use for trampoline payments. --- lightning/src/ln/channelmanager.rs | 8 ++++++-- lightning/src/ln/onion_payment.rs | 6 +++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 58936fd87cc..538e132374a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5281,8 +5281,12 @@ impl< }; let cur_height = self.best_block.read().unwrap().height + 1; - check_incoming_htlc_cltv(cur_height, next_hop.outgoing_cltv_value, msg.cltv_expiry)?; - + check_incoming_htlc_cltv( + cur_height, + next_hop.outgoing_cltv_value, + msg.cltv_expiry, + MIN_CLTV_EXPIRY_DELTA, + )?; Ok(intercept) } diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index bb5b8f21a48..615c357d11b 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -515,7 +515,7 @@ pub fn peel_payment_onion }; if let Err(reason) = check_incoming_htlc_cltv( - cur_height, outgoing_cltv_value, msg.cltv_expiry, + cur_height, outgoing_cltv_value, msg.cltv_expiry, MIN_CLTV_EXPIRY_DELTA, ) { return Err(InboundHTLCErr { msg: "incoming cltv check failed", @@ -719,9 +719,9 @@ pub(super) fn decode_incoming_update_add_htlc_onion Result<(), LocalHTLCFailureReason> { - if (cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 { + if (cltv_expiry as u64) < (outgoing_cltv_value) as u64 + min_cltv_expiry_delta as u64 { return Err(LocalHTLCFailureReason::IncorrectCLTVExpiry); } // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, From 1287117736d9cedf457cd4b058af5cfa75e1469b Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 17 Mar 2026 08:37:11 -0400 Subject: [PATCH 10/12] blinded_path/refactor: make construction generic over forwarding type To use helper functions for either trampoline or regular paths. --- lightning/src/blinded_path/payment.rs | 94 +++++++++++++++++++++------ 1 file changed, 73 insertions(+), 21 deletions(-) diff --git a/lightning/src/blinded_path/payment.rs b/lightning/src/blinded_path/payment.rs index 03b676adc92..60a3774f9f9 100644 --- a/lightning/src/blinded_path/payment.rs +++ b/lightning/src/blinded_path/payment.rs @@ -161,8 +161,12 @@ impl BlindedPaymentPath { ) } - fn new_inner( - intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, + fn new_inner< + F: ForwardTlvsInfo, + ES: EntropySource, + T: secp256k1::Signing + secp256k1::Verification, + >( + intermediate_nodes: &[ForwardNode], payee_node_id: PublicKey, local_node_receive_key: ReceiveAuthKey, dummy_tlvs: &[DummyTlvs], payee_tlvs: ReceiveTlvs, htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, secp_ctx: &Secp256k1, @@ -323,18 +327,36 @@ impl BlindedPaymentPath { } } -/// An intermediate node, its outbound channel, and relay parameters. +/// Common interface for forward TLV types used in blinded payment paths. +/// +/// Both [`ForwardTlvs`] (channel-based forwarding) and [`TrampolineForwardTlvs`] (trampoline +/// node-based forwarding) implement this trait, allowing blinded path construction to be generic +/// over the forwarding mechanism. +pub trait ForwardTlvsInfo: Writeable + Clone { + /// The payment relay parameters for this hop. + fn payment_relay(&self) -> &PaymentRelay; + /// The payment constraints for this hop. + fn payment_constraints(&self) -> &PaymentConstraints; + /// The features for this hop. + fn features(&self) -> &BlindedHopFeatures; +} + +/// An intermediate node, its forwarding parameters, and its [`ForwardTlvsInfo`] for use in a +/// [`BlindedPaymentPath`]. #[derive(Clone, Debug)] -pub struct PaymentForwardNode { +pub struct ForwardNode { /// The TLVs for this node's [`BlindedHop`], where the fee parameters contained within are also /// used for [`BlindedPayInfo`] construction. - pub tlvs: ForwardTlvs, + pub tlvs: F, /// This node's pubkey. pub node_id: PublicKey, /// The maximum value, in msat, that may be accepted by this node. pub htlc_maximum_msat: u64, } +/// An intermediate node for a regular (non-trampoline) [`BlindedPaymentPath`]. +pub type PaymentForwardNode = ForwardNode; + /// Data to construct a [`BlindedHop`] for forwarding a payment. #[derive(Clone, Debug)] pub struct ForwardTlvs { @@ -354,6 +376,18 @@ pub struct ForwardTlvs { pub next_blinding_override: Option, } +impl ForwardTlvsInfo for ForwardTlvs { + fn payment_relay(&self) -> &PaymentRelay { + &self.payment_relay + } + fn payment_constraints(&self) -> &PaymentConstraints { + &self.payment_constraints + } + fn features(&self) -> &BlindedHopFeatures { + &self.features + } +} + /// Data to construct a [`BlindedHop`] for forwarding a Trampoline payment. #[derive(Clone, Debug)] pub struct TrampolineForwardTlvs { @@ -373,6 +407,18 @@ pub struct TrampolineForwardTlvs { pub next_blinding_override: Option, } +impl ForwardTlvsInfo for TrampolineForwardTlvs { + fn payment_relay(&self) -> &PaymentRelay { + &self.payment_relay + } + fn payment_constraints(&self) -> &PaymentConstraints { + &self.payment_constraints + } + fn features(&self) -> &BlindedHopFeatures { + &self.features + } +} + /// TLVs carried by a dummy hop within a blinded payment path. /// /// Dummy hops do not correspond to real forwarding decisions, but are processed @@ -440,8 +486,8 @@ pub(crate) enum BlindedTrampolineTlvs { // Used to include forward and receive TLVs in the same iterator for encoding. #[derive(Clone)] -enum BlindedPaymentTlvsRef<'a> { - Forward(&'a ForwardTlvs), +enum BlindedPaymentTlvsRef<'a, F: ForwardTlvsInfo = ForwardTlvs> { + Forward(&'a F), Dummy(&'a DummyTlvs), Receive(&'a ReceiveTlvs), } @@ -619,7 +665,7 @@ impl Writeable for ReceiveTlvs { } } -impl<'a> Writeable for BlindedPaymentTlvsRef<'a> { +impl<'a, F: ForwardTlvsInfo> Writeable for BlindedPaymentTlvsRef<'a, F> { fn write(&self, w: &mut W) -> Result<(), io::Error> { match self { Self::Forward(tlvs) => tlvs.write(w)?, @@ -723,8 +769,8 @@ impl Readable for BlindedTrampolineTlvs { pub(crate) const PAYMENT_PADDING_ROUND_OFF: usize = 30; /// Construct blinded payment hops for the given `intermediate_nodes` and payee info. -pub(super) fn blinded_hops( - secp_ctx: &Secp256k1, intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, +pub(super) fn blinded_hops( + secp_ctx: &Secp256k1, intermediate_nodes: &[ForwardNode], payee_node_id: PublicKey, dummy_tlvs: &[DummyTlvs], payee_tlvs: ReceiveTlvs, session_priv: &SecretKey, local_node_receive_key: ReceiveAuthKey, ) -> Vec { @@ -823,15 +869,15 @@ where Ok((curr_base_fee, curr_prop_mil)) } -pub(super) fn compute_payinfo( - intermediate_nodes: &[PaymentForwardNode], dummy_tlvs: &[DummyTlvs], payee_tlvs: &ReceiveTlvs, +pub(super) fn compute_payinfo( + intermediate_nodes: &[ForwardNode], dummy_tlvs: &[DummyTlvs], payee_tlvs: &ReceiveTlvs, payee_htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, ) -> Result { let routing_fees = intermediate_nodes .iter() .map(|node| RoutingFees { - base_msat: node.tlvs.payment_relay.fee_base_msat, - proportional_millionths: node.tlvs.payment_relay.fee_proportional_millionths, + base_msat: node.tlvs.payment_relay().fee_base_msat, + proportional_millionths: node.tlvs.payment_relay().fee_proportional_millionths, }) .chain(dummy_tlvs.iter().map(|tlvs| RoutingFees { base_msat: tlvs.payment_relay.fee_base_msat, @@ -847,24 +893,24 @@ pub(super) fn compute_payinfo( for node in intermediate_nodes.iter() { // In the future, we'll want to take the intersection of all supported features for the // `BlindedPayInfo`, but there are no features in that context right now. - if node.tlvs.features.requires_unknown_bits_from(&BlindedHopFeatures::empty()) { + if node.tlvs.features().requires_unknown_bits_from(&BlindedHopFeatures::empty()) { return Err(()); } cltv_expiry_delta = - cltv_expiry_delta.checked_add(node.tlvs.payment_relay.cltv_expiry_delta).ok_or(())?; + cltv_expiry_delta.checked_add(node.tlvs.payment_relay().cltv_expiry_delta).ok_or(())?; // The min htlc for an intermediate node is that node's min minus the fees charged by all of the // following hops for forwarding that min, since that fee amount will automatically be included // in the amount that this node receives and contribute towards reaching its min. htlc_minimum_msat = amt_to_forward_msat( - core::cmp::max(node.tlvs.payment_constraints.htlc_minimum_msat, htlc_minimum_msat), - &node.tlvs.payment_relay, + core::cmp::max(node.tlvs.payment_constraints().htlc_minimum_msat, htlc_minimum_msat), + node.tlvs.payment_relay(), ) .unwrap_or(1); // If underflow occurs, we definitely reached this node's min htlc_maximum_msat = amt_to_forward_msat( core::cmp::min(node.htlc_maximum_msat, htlc_maximum_msat), - &node.tlvs.payment_relay, + node.tlvs.payment_relay(), ) .ok_or(())?; // If underflow occurs, we cannot send to this hop without exceeding their max } @@ -1038,8 +1084,14 @@ mod tests { payment_constraints: PaymentConstraints { max_cltv_expiry: 0, htlc_minimum_msat: 1 }, payment_context: PaymentContext::Bolt12Refund(Bolt12RefundContext {}), }; - let blinded_payinfo = - super::compute_payinfo(&[], &[], &recv_tlvs, 4242, TEST_FINAL_CLTV as u16).unwrap(); + let blinded_payinfo = super::compute_payinfo::( + &[], + &[], + &recv_tlvs, + 4242, + TEST_FINAL_CLTV as u16, + ) + .unwrap(); assert_eq!(blinded_payinfo.fee_base_msat, 0); assert_eq!(blinded_payinfo.fee_proportional_millionths, 0); assert_eq!(blinded_payinfo.cltv_expiry_delta, TEST_FINAL_CLTV as u16); From 3360f8d99c785741a56e295d163f82e0ffcd6918 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 17 Mar 2026 13:46:45 -0400 Subject: [PATCH 11/12] blinded_path: add constructor for trampoline blinded path --- lightning/src/blinded_path/payment.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/lightning/src/blinded_path/payment.rs b/lightning/src/blinded_path/payment.rs index 60a3774f9f9..e97f93146f9 100644 --- a/lightning/src/blinded_path/payment.rs +++ b/lightning/src/blinded_path/payment.rs @@ -161,6 +161,29 @@ impl BlindedPaymentPath { ) } + /// Create a blinded path for a trampoline payment, to be forwarded along `intermediate_nodes`. + #[cfg(any(test, feature = "_test_utils"))] + pub(crate) fn new_for_trampoline< + ES: EntropySource, + T: secp256k1::Signing + secp256k1::Verification, + >( + intermediate_nodes: &[ForwardNode], payee_node_id: PublicKey, + local_node_receive_key: ReceiveAuthKey, payee_tlvs: ReceiveTlvs, htlc_maximum_msat: u64, + min_final_cltv_expiry_delta: u16, entropy_source: ES, secp_ctx: &Secp256k1, + ) -> Result { + Self::new_inner( + intermediate_nodes, + payee_node_id, + local_node_receive_key, + &[], + payee_tlvs, + htlc_maximum_msat, + min_final_cltv_expiry_delta, + entropy_source, + secp_ctx, + ) + } + fn new_inner< F: ForwardTlvsInfo, ES: EntropySource, From 6aae0dd2c7b675de37c58f67310147f65db8c597 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 17 Mar 2026 08:39:50 -0400 Subject: [PATCH 12/12] ln/test: add multi-purpose trampoline test helper To create trampoline forwarding and single hop receiving tails. --- lightning/src/ln/blinded_payment_tests.rs | 58 +++++------------------ lightning/src/ln/functional_test_utils.rs | 51 +++++++++++++++++++- lightning/src/routing/router.rs | 2 +- 3 files changed, 61 insertions(+), 50 deletions(-) diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index e148ce2c474..6d12d2137a8 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -2420,50 +2420,6 @@ fn test_trampoline_blinded_receive() { do_test_trampoline_relay(true, TrampolineTestCase::OuterCLTVLessThanTrampoline); } -/// Creates a blinded tail where Carol receives via a blinded path. -fn create_blinded_tail( - secp_ctx: &Secp256k1, override_random_bytes: [u8; 32], carol_node_id: PublicKey, - carol_auth_key: ReceiveAuthKey, trampoline_cltv_expiry_delta: u32, - excess_final_cltv_delta: u32, final_value_msat: u64, payment_secret: PaymentSecret, -) -> BlindedTail { - let outer_session_priv = SecretKey::from_slice(&override_random_bytes).unwrap(); - let trampoline_session_priv = onion_utils::compute_trampoline_session_priv(&outer_session_priv); - - let carol_blinding_point = PublicKey::from_secret_key(&secp_ctx, &trampoline_session_priv); - let carol_blinded_hops = { - let payee_tlvs = ReceiveTlvs { - payment_secret, - payment_constraints: PaymentConstraints { - max_cltv_expiry: u32::max_value(), - htlc_minimum_msat: final_value_msat, - }, - payment_context: PaymentContext::Bolt12Refund(Bolt12RefundContext {}), - } - .encode(); - - let path = [((carol_node_id, Some(carol_auth_key)), WithoutLength(&payee_tlvs))]; - - blinded_path::utils::construct_blinded_hops( - &secp_ctx, - path.into_iter(), - &trampoline_session_priv, - ) - }; - - BlindedTail { - trampoline_hops: vec![TrampolineHop { - pubkey: carol_node_id, - node_features: Features::empty(), - fee_msat: final_value_msat, - cltv_expiry_delta: trampoline_cltv_expiry_delta + excess_final_cltv_delta, - }], - hops: carol_blinded_hops, - blinding_point: carol_blinding_point, - excess_final_cltv_expiry_delta: excess_final_cltv_delta, - final_value_msat, - } -} - // Creates a replacement onion that is used to produce scenarios that we don't support, specifically // payloads that send to unblinded receives and invalid payloads. fn replacement_onion( @@ -2631,15 +2587,23 @@ fn do_test_trampoline_relay(blinded: bool, test_case: TrampolineTestCase) { // Create a blinded tail where Carol is receiving. In our unblinded test cases, we'll // override this anyway (with a tail sending to an unblinded receive, which LDK doesn't // allow). - blinded_tail: Some(create_blinded_tail( + blinded_tail: Some(create_trampoline_forward_blinded_tail( &secp_ctx, - override_random_bytes, + &nodes[2].keys_manager, + &[], carol_node_id, nodes[2].keys_manager.get_receive_auth_key(), + ReceiveTlvs { + payment_secret, + payment_constraints: PaymentConstraints { + max_cltv_expiry: u32::max_value(), + htlc_minimum_msat: original_amt_msat, + }, + payment_context: PaymentContext::Bolt12Refund(Bolt12RefundContext {}), + }, original_trampoline_cltv, excess_final_cltv, original_amt_msat, - payment_secret, )), }], route_params: None, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 80274d180b4..cde3614f47f 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -10,7 +10,9 @@ //! A bunch of useful utilities for building networks of nodes and exchanging messages between //! nodes for functional tests. -use crate::blinded_path::payment::DummyTlvs; +use crate::blinded_path::payment::{ + BlindedPaymentPath, DummyTlvs, ForwardNode, ReceiveTlvs, TrampolineForwardTlvs, +}; use crate::chain::channelmonitor::{ChannelMonitor, HTLC_FAIL_BACK_BUFFER}; use crate::chain::transaction::OutPoint; use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; @@ -40,7 +42,8 @@ use crate::ln::types::ChannelId; use crate::onion_message::messenger::OnionMessenger; use crate::routing::gossip::{NetworkGraph, NetworkUpdate, P2PGossipSync}; use crate::routing::router::{self, PaymentParameters, Route, RouteParameters}; -use crate::sign::{EntropySource, RandomBytes}; +use crate::routing::router::{compute_fees, BlindedTail, TrampolineHop}; +use crate::sign::{EntropySource, RandomBytes, ReceiveAuthKey}; use crate::types::features::ChannelTypeFeatures; use crate::types::features::InitFeatures; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; @@ -5798,3 +5801,47 @@ pub fn get_scid_from_channel_id<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, channel_id: .short_channel_id .unwrap() } + +/// Creates a [`BlindedTail`] for a trampoline forward through a single intermediate node. +/// +/// The resulting tail contains blinded hops built from `intermediate_nodes` plus a dummy receive +/// TLV, with the `TrampolineHop` fee and CLTV derived from the blinded path's aggregated payinfo. +pub fn create_trampoline_forward_blinded_tail( + secp_ctx: &bitcoin::secp256k1::Secp256k1, entropy_source: ES, + intermediate_nodes: &[ForwardNode], payee_node_id: PublicKey, + payee_receive_key: ReceiveAuthKey, payee_tlvs: ReceiveTlvs, min_final_cltv_expiry_delta: u32, + excess_final_cltv_delta: u32, final_value_msat: u64, +) -> BlindedTail { + let blinded_path = BlindedPaymentPath::new_for_trampoline( + intermediate_nodes, + payee_node_id, + payee_receive_key, + payee_tlvs, + u64::max_value(), + min_final_cltv_expiry_delta as u16, + entropy_source, + secp_ctx, + ) + .unwrap(); + + BlindedTail { + trampoline_hops: vec![TrampolineHop { + pubkey: intermediate_nodes.first().map(|n| n.node_id).unwrap_or(payee_node_id), + node_features: types::features::Features::empty(), + fee_msat: compute_fees( + final_value_msat, + lightning_types::routing::RoutingFees { + base_msat: blinded_path.payinfo.fee_base_msat, + proportional_millionths: blinded_path.payinfo.fee_proportional_millionths, + }, + ) + .unwrap(), + cltv_expiry_delta: blinded_path.payinfo.cltv_expiry_delta as u32 + + excess_final_cltv_delta, + }], + hops: blinded_path.blinded_hops().to_vec(), + blinding_point: blinded_path.blinding_point(), + excess_final_cltv_expiry_delta: excess_final_cltv_delta, + final_value_msat, + } +} diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 0c0d14b43fd..edb048c8c7d 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -2464,7 +2464,7 @@ impl<'a> PaymentPath<'a> { #[inline(always)] /// Calculate the fees required to route the given amount over a channel with the given fees. #[rustfmt::skip] -fn compute_fees(amount_msat: u64, channel_fees: RoutingFees) -> Option { +pub(crate) fn compute_fees(amount_msat: u64, channel_fees: RoutingFees) -> Option { amount_msat.checked_mul(channel_fees.proportional_millionths as u64) .and_then(|part| (channel_fees.base_msat as u64).checked_add(part / 1_000_000)) }