diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 91ead9903cb..4baa577e0df 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -46,6 +46,55 @@ cargo test -p lightning --verbose --color always --features dnssec cargo check -p lightning --verbose --color always --features dnssec cargo doc -p lightning --document-private-items --features dnssec +echo -e "\n\nChecking and testing lightning with safe_channels" +RUSTFLAGS="${RUSTFLAGS//-D warnings/}" cargo test -p lightning --verbose --color always --features safe_channels -- \ + --skip channel_holding_cell_serialize \ + --skip test_blocked_chan_preimage_release \ + --skip test_durable_preimages_on_closed_channel \ + --skip test_inbound_reload_without_init_mon \ + --skip test_inverted_mon_completion_order \ + --skip test_outbound_reload_without_init_mon \ + --skip test_partial_claim_mon_update_compl_actions \ + --skip test_reload_mon_update_completion_actions \ + --skip test_malformed_forward_htlcs_ser \ + --skip test_multi_post_event_actions \ + --skip test_anchors_aggregated_revoked_htlc_tx \ + --skip test_anchors_monitor_fixes_counterparty_payment_script_on_reload \ + --skip test_claim_event_never_handled \ + --skip test_event_replay_causing_monitor_replay \ + --skip test_lost_timeout_monitor_events \ + --skip no_double_pay_with_stale_channelmanager \ + --skip test_onion_failure_stale_channel_update \ + --skip automatic_retries \ + --skip no_missing_sent_on_midpoint_reload \ + --skip no_missing_sent_on_reload \ + --skip retry_with_no_persist \ + --skip test_completed_payment_not_retryable_on_reload \ + --skip test_dup_htlc_onchain_doesnt_fail_on_reload \ + --skip test_fulfill_restart_failure \ + --skip test_payment_metadata_consistency \ + --skip test_priv_forwarding_rejection \ + --skip test_quiescence_termination_on_disconnect \ + --skip forwarded_payment_no_manager_persistence \ + --skip intercepted_payment_no_manager_persistence \ + --skip removed_payment_no_manager_persistence \ + --skip test_data_loss_protect \ + --skip test_htlc_localremoved_persistence \ + --skip test_manager_serialize_deserialize_events \ + --skip test_manager_serialize_deserialize_inconsistent_monitor \ + --skip test_no_txn_manager_serialize_deserialize \ + --skip test_partial_claim_before_restart \ + --skip test_reload_partial_funding_batch \ + --skip test_simple_manager_serialize_deserialize \ + --skip test_unconf_chan \ + --skip test_unconf_chan_via_funding_unconfirmed \ + --skip test_unconf_chan_via_listen \ + --skip test_propose_splice_while_disconnected \ + --skip test_splice_reestablish \ + --skip test_splice_state_reset_on_disconnect +RUSTFLAGS="${RUSTFLAGS//-D warnings/}" cargo check -p lightning --verbose --color always --features safe_channels +RUSTFLAGS="${RUSTFLAGS//-D warnings/}" cargo doc -p lightning --document-private-items --features safe_channels + echo -e "\n\nChecking and testing Block Sync Clients with features" cargo test -p lightning-block-sync --verbose --color always --features rest-client diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index 257c7596ac0..d421fdccb3a 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -23,6 +23,7 @@ _externalize_tests = ["inventory", "_test_utils"] # Allow signing of local transactions that may have been revoked or will be revoked, for functional testing (e.g. justice tx handling). # This is unsafe to use in production because it may result in the counterparty publishing taking our funds. unsafe_revoked_tx_signing = [] +safe_channels = [] std = [] diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 10e5049682e..7fff3b9f1ff 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -50,6 +50,8 @@ use crate::ln::chan_utils::{ self, ChannelTransactionParameters, CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCClaim, HTLCOutputInCommitment, HolderCommitmentTransaction, }; +#[cfg(feature = "safe_channels")] +use crate::ln::channel::FundedChannelState; use crate::ln::channel::INITIAL_COMMITMENT_NUMBER; use crate::ln::channel_keys::{ DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, HtlcKey, RevocationBasepoint, @@ -111,8 +113,28 @@ pub struct ChannelMonitorUpdate { /// Will be `None` for `ChannelMonitorUpdate`s constructed on LDK versions prior to 0.0.121 and /// always `Some` otherwise. pub channel_id: Option, + + /// The channel state associated with this ChannelMonitorUpdate, if any. + #[cfg(feature = "safe_channels")] + pub channel_state: Option, +} + +/// The state of a channel to be stored alongside a ChannelMonitor. For closed channels, no state is stored. +#[cfg(feature = "safe_channels")] +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum UpdateChannelState { + /// Open channel in funded state. + Funded(FundedChannelState), + /// Closed channel. + Closed, } +#[cfg(feature = "safe_channels")] +impl_writeable_tlv_based_enum!(UpdateChannelState, + (1, Closed) => {}, + {0, Funded} => (), +); + impl ChannelMonitorUpdate { pub(crate) fn internal_renegotiated_funding_data( &self, @@ -156,10 +178,17 @@ impl Writeable for ChannelMonitorUpdate { for update_step in self.updates.iter() { update_step.write(w)?; } + #[cfg(not(feature = "safe_channels"))] write_tlv_fields!(w, { // 1 was previously used to store `counterparty_node_id` (3, self.channel_id, option), }); + #[cfg(feature = "safe_channels")] + write_tlv_fields!(w, { + // 1 was previously used to store `counterparty_node_id` + (3, self.channel_id, option), + (5, self.channel_state, option) + }); Ok(()) } } @@ -176,11 +205,24 @@ impl Readable for ChannelMonitorUpdate { } } let mut channel_id = None; + #[cfg(not(feature = "safe_channels"))] read_tlv_fields!(r, { // 1 was previously used to store `counterparty_node_id` (3, channel_id, option), }); - Ok(Self { update_id, updates, channel_id }) + #[cfg(feature = "safe_channels")] + let mut channel_state = None; + #[cfg(feature = "safe_channels")] + read_tlv_fields!(r, { + // 1 was previously used to store `counterparty_node_id` + (3, channel_id, option), + (5, channel_state, option) + }); + Ok(Self { + update_id, updates, channel_id, + #[cfg(feature = "safe_channels")] + channel_state + }) } } @@ -1402,6 +1444,11 @@ pub(crate) struct ChannelMonitorImpl { /// make deciding whether to do so simple, here we track whether this monitor was last written /// prior to 0.1. written_by_0_1_or_later: bool, + + /// The channel state as provided via the last `ChannelMonitorUpdate` or via a call to + /// [`ChannelMonitor::update_channel_state`]. + #[cfg(feature = "safe_channels")] + channel_state: Option, } // Returns a `&FundingScope` for the one we are currently observing/handling commitment transactions @@ -1521,6 +1568,12 @@ const MIN_SERIALIZATION_VERSION: u8 = 1; pub(crate) fn write_chanmon_internal( channel_monitor: &ChannelMonitorImpl, _is_stub: bool, writer: &mut W, ) -> Result<(), Error> { + // Check that the encoded channel (if present) is consistent with the rest of the monitor. This sets an invariant + // for the safe_channels feature. + #[cfg(feature = "safe_channels")] + if let Some(UpdateChannelState::Funded(ref channel_state)) = channel_monitor.channel_state { + channel_monitor.check_channel_state_consistency(channel_state); + } write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); channel_monitor.latest_update_id.write(writer)?; @@ -1733,6 +1786,7 @@ pub(crate) fn write_chanmon_internal( _ => channel_monitor.pending_monitor_events.clone(), }; + #[cfg(not(feature = "safe_channels"))] write_tlv_fields!(writer, { (1, channel_monitor.funding_spend_confirmed, option), (3, channel_monitor.htlcs_resolved_on_chain, required_vec), @@ -1757,6 +1811,32 @@ pub(crate) fn write_chanmon_internal( (37, channel_monitor.funding_seen_onchain, required), }); + #[cfg(feature = "safe_channels")] + write_tlv_fields!(writer, { + (1, channel_monitor.funding_spend_confirmed, option), + (3, channel_monitor.htlcs_resolved_on_chain, required_vec), + (5, pending_monitor_events, required_vec), + (7, channel_monitor.funding_spend_seen, required), + (9, channel_monitor.counterparty_node_id, required), + (11, channel_monitor.confirmed_commitment_tx_counterparty_output, option), + (13, channel_monitor.spendable_txids_confirmed, required_vec), + (15, channel_monitor.counterparty_fulfilled_htlcs, required), + (17, channel_monitor.initial_counterparty_commitment_info, option), + (19, channel_monitor.channel_id, required), + (21, channel_monitor.balances_empty_height, option), + (23, channel_monitor.holder_pays_commitment_tx_fee, option), + (25, channel_monitor.payment_preimages, required), + (27, channel_monitor.first_negotiated_funding_txo, required), + (29, channel_monitor.initial_counterparty_commitment_tx, option), + (31, channel_monitor.funding.channel_parameters, required), + (32, channel_monitor.pending_funding, optional_vec), + (33, channel_monitor.htlcs_resolved_to_user, required), + (34, channel_monitor.alternative_funding_confirmed, option), + (35, channel_monitor.is_manual_broadcast, required), + (37, channel_monitor.funding_seen_onchain, required), + (39, channel_monitor.channel_state, option), + }); + Ok(()) } @@ -1994,6 +2074,8 @@ impl ChannelMonitor { alternative_funding_confirmed: None, written_by_0_1_or_later: true, + #[cfg(feature = "safe_channels")] + channel_state: None, }) } @@ -2114,6 +2196,19 @@ impl ChannelMonitor { inner.update_monitor(updates, broadcaster, fee_estimator, &logger) } + /// Gets the encoded channel data, if any, associated with this ChannelMonitor. + #[cfg(feature = "safe_channels")] + pub fn get_channel_state(&self) -> Option { + self.inner.lock().unwrap().channel_state.clone() + } + + /// Updates the encoded channel data associated with this ChannelMonitor. To clear the encoded channel data (for + /// example after shut down of a channel), pass `None`. + #[cfg(feature = "safe_channels")] + pub fn update_channel_state(&self, channel_state: UpdateChannelState) { + self.inner.lock().unwrap().update_channel_state(channel_state); + } + /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this /// ChannelMonitor. /// @@ -2310,14 +2405,17 @@ impl ChannelMonitor { self.inner.lock().unwrap().sign_to_local_justice_tx(justice_tx, input_idx, value, commitment_number) } + #[cfg(not(feature = "safe_channels"))] pub(crate) fn get_min_seen_secret(&self) -> u64 { self.inner.lock().unwrap().get_min_seen_secret() } + #[cfg(not(feature = "safe_channels"))] pub(crate) fn get_cur_counterparty_commitment_number(&self) -> u64 { self.inner.lock().unwrap().get_cur_counterparty_commitment_number() } + #[cfg(not(feature = "safe_channels"))] pub(crate) fn get_cur_holder_commitment_number(&self) -> u64 { self.inner.lock().unwrap().get_cur_holder_commitment_number() } @@ -2719,6 +2817,48 @@ impl ChannelMonitor { } impl ChannelMonitorImpl { + #[cfg(feature = "safe_channels")] + fn check_channel_state_consistency(&self, encoded: &FundedChannelState) { + debug_assert!( + encoded.get_cur_holder_commitment_transaction_number() + <= self.get_cur_holder_commitment_number(), + "cur_holder_commitment_transaction_number - channel: {} vs monitor: {}", + encoded.get_cur_holder_commitment_transaction_number(), + self.get_cur_holder_commitment_number() + ); + debug_assert!( + encoded.get_revoked_counterparty_commitment_transaction_number() + <= self.get_min_seen_secret(), + "revoked_counterparty_commitment_transaction_number - channel: {} vs monitor: {}", + encoded.get_revoked_counterparty_commitment_transaction_number(), + self.get_min_seen_secret() + ); + debug_assert!( + encoded.get_cur_counterparty_commitment_transaction_number() + <= self.get_cur_counterparty_commitment_number(), + "cur_counterparty_commitment_transaction_number - channel: {} vs monitor: {}", + encoded.get_cur_counterparty_commitment_transaction_number(), + self.get_cur_counterparty_commitment_number() + ); + debug_assert!( + encoded.latest_monitor_update_id >= self.get_latest_update_id(), + "latest_monitor_update_id - channel: {} vs monitor: {}", + encoded.latest_monitor_update_id, + self.get_latest_update_id() + ); + } + + #[cfg(feature = "safe_channels")] + fn update_channel_state(&mut self, encoded: UpdateChannelState) { + if let UpdateChannelState::Funded(ref channel) = encoded { + // Check that the encoded channel is consistent with the rest of the monitor. This sets an invariant for the + // safe_channels feature. + self.check_channel_state_consistency(channel); + } + + self.channel_state = Some(encoded); + } + /// Helper for get_claimable_balances which does the work for an individual HTLC, generating up /// to one `Balance` for the HTLC. #[rustfmt::skip] @@ -4405,6 +4545,13 @@ impl ChannelMonitorImpl { } } + // Assume that if the update contains no encoded channel, that the channel remained unchanged. We + // therefore do not update the monitor. + #[cfg(feature="safe_channels")] + if let Some(channel_state) = updates.channel_state.as_ref() { + self.update_channel_state(channel_state.clone()); + } + if ret.is_ok() && self.no_further_updates_allowed() && is_pre_close_update { log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); Err(()) @@ -6644,6 +6791,33 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let mut alternative_funding_confirmed = None; let mut is_manual_broadcast = RequiredWrapper(None); let mut funding_seen_onchain = RequiredWrapper(None); + #[cfg(not(feature="safe_channels"))] + read_tlv_fields!(reader, { + (1, funding_spend_confirmed, option), + (3, htlcs_resolved_on_chain, optional_vec), + (5, pending_monitor_events, optional_vec), + (7, funding_spend_seen, option), + (9, counterparty_node_id, option), + (11, confirmed_commitment_tx_counterparty_output, option), + (13, spendable_txids_confirmed, optional_vec), + (15, counterparty_fulfilled_htlcs, option), + (17, initial_counterparty_commitment_info, option), + (19, channel_id, option), + (21, balances_empty_height, option), + (23, holder_pays_commitment_tx_fee, option), + (25, payment_preimages_with_info, option), + (27, first_negotiated_funding_txo, (default_value, outpoint)), + (29, initial_counterparty_commitment_tx, option), + (31, channel_parameters, (option: ReadableArgs, None)), + (32, pending_funding, optional_vec), + (33, htlcs_resolved_to_user, option), + (34, alternative_funding_confirmed, option), + (35, is_manual_broadcast, (default_value, false)), + (37, funding_seen_onchain, (default_value, true)), + }); + #[cfg(feature="safe_channels")] + let mut channel_state = None; + #[cfg(feature="safe_channels")] read_tlv_fields!(reader, { (1, funding_spend_confirmed, option), (3, htlcs_resolved_on_chain, optional_vec), @@ -6666,6 +6840,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (34, alternative_funding_confirmed, option), (35, is_manual_broadcast, (default_value, false)), (37, funding_seen_onchain, (default_value, true)), + (39, channel_state, option), }); // Note that `payment_preimages_with_info` was added (and is always written) in LDK 0.1, so // we can use it to determine if this monitor was last written by LDK 0.1 or later. @@ -6843,6 +7018,8 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP alternative_funding_confirmed, written_by_0_1_or_later, + #[cfg(feature="safe_channels")] + channel_state, }); if counterparty_node_id.is_none() { diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index 431fdd2859c..3a39145d320 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -400,8 +400,7 @@ pub fn build_closing_transaction(to_holder_value_sat: Amount, to_counterparty_va /// /// Allows us to keep track of all of the revocation secrets of our counterparty in just 50*32 bytes /// or so. -#[derive(Clone)] -#[cfg_attr(test, derive(Debug))] +#[derive(Clone, Debug)] pub struct CounterpartyCommitmentSecrets { old_secrets: [([u8; 32], u64); 49], } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5b4ac4c0aa5..e3b15d550f1 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -30,6 +30,8 @@ use crate::blinded_path::message::BlindedMessagePath; use crate::chain::chaininterface::{ fee_for_weight, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, }; +#[cfg(feature = "safe_channels")] +use crate::chain::channelmonitor::UpdateChannelState; use crate::chain::channelmonitor::{ ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, CommitmentHTLCData, LATENCY_GRACE_PERIOD_BLOCKS, @@ -123,7 +125,7 @@ pub struct AvailableBalances { pub next_outbound_htlc_minimum_msat: u64, } -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] enum FeeUpdateState { // Inbound states mirroring InboundHTLCState RemoteAnnounced, @@ -138,16 +140,33 @@ enum FeeUpdateState { Outbound, } -#[derive(Debug)] +impl_writeable_tlv_based_enum!(FeeUpdateState, + (0, RemoteAnnounced) => {}, + (1, AwaitingRemoteRevokeToAnnounce) => {}, + (2, Outbound) => {}, +); + +#[derive(Debug, Clone, PartialEq, Eq)] enum InboundHTLCRemovalReason { FailRelay(msgs::OnionErrorPacket), FailMalformed { sha256_of_onion: [u8; 32], failure_code: u16 }, Fulfill { preimage: PaymentPreimage, attribution_data: Option }, } +impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason, + (1, FailMalformed) => { + (0, sha256_of_onion, required), + (1, failure_code, required), + }, + (2, Fulfill) => { + (0, preimage, required), + (1, attribution_data, required), + }, + {0, FailRelay} => (), +); + /// Represents the resolution status of an inbound HTLC. -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq, Eq)] enum InboundHTLCResolution { /// Resolved implies the action we must take with the inbound HTLC has already been determined, /// i.e., we already know whether it must be failed back or forwarded. @@ -170,7 +189,7 @@ impl_writeable_tlv_based_enum!(InboundHTLCResolution, }, ); -#[cfg_attr(test, derive(Debug))] +#[derive(Clone, Debug, PartialEq, Eq)] enum InboundHTLCState { /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an /// update_add_htlc message for this HTLC. @@ -225,6 +244,14 @@ enum InboundHTLCState { LocalRemoved(InboundHTLCRemovalReason), } +impl_writeable_tlv_based_enum!(InboundHTLCState, + (3, Committed) => {}, // Strangely this one needs to come first?!? + {0, RemoteAnnounced} => (), + {1, AwaitingRemoteRevokeToAnnounce} => (), + {2, AwaitingAnnouncedRemoteRevoke} => (), + {4, LocalRemoved} => (), +); + impl From<&InboundHTLCState> for Option { fn from(state: &InboundHTLCState) -> Option { match state { @@ -301,7 +328,7 @@ impl InboundHTLCState { } } -#[cfg_attr(test, derive(Debug))] +#[derive(Clone, Debug, PartialEq, Eq)] struct InboundHTLCOutput { htlc_id: u64, amount_msat: u64, @@ -310,8 +337,15 @@ struct InboundHTLCOutput { state: InboundHTLCState, } -#[derive(Debug)] -#[cfg_attr(test, derive(Clone, PartialEq))] +impl_writeable_tlv_based!(InboundHTLCOutput, { + (0, htlc_id, required), + (1, amount_msat, required), + (2, cltv_expiry, required), + (3, payment_hash, required), + (4, state, required), +}); + +#[derive(Debug, Clone, PartialEq, Eq)] enum OutboundHTLCState { /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack @@ -344,6 +378,14 @@ enum OutboundHTLCState { AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome), } +impl_writeable_tlv_based_enum!(OutboundHTLCState, + (3, Committed) => {}, // Strangely this one needs to come first?!? + {0, LocalAnnounced} => (), + {1, RemoteRemoved} => (), + {2, AwaitingRemoteRevokeToRemove} => (), + {4, AwaitingRemovedRemoteRevoke} => (), +); + impl From<&OutboundHTLCState> for OutboundHTLCStateDetails { fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails { match state { @@ -410,8 +452,7 @@ impl OutboundHTLCState { } } -#[derive(Clone, Debug)] -#[cfg_attr(test, derive(PartialEq))] +#[derive(Clone, Debug, PartialEq, Eq)] enum OutboundHTLCOutcome { /// We started always filling in the preimages here in 0.0.105, and the requirement /// that the preimages always be filled in was added in 0.2. @@ -422,6 +463,14 @@ enum OutboundHTLCOutcome { Failure(HTLCFailReason), } +impl_writeable_tlv_based_enum!(OutboundHTLCOutcome, + (0, Success) => { + (0, preimage, required), + (1, attribution_data, required), + }, + {1, Failure} => (), +); + impl<'a> Into> for &'a OutboundHTLCOutcome { fn into(self) -> Option<&'a HTLCFailReason> { match self { @@ -431,8 +480,7 @@ impl<'a> Into> for &'a OutboundHTLCOutcome { } } -#[derive(Debug)] -#[cfg_attr(test, derive(Clone, PartialEq))] +#[derive(Debug, Clone, PartialEq, Eq)] struct OutboundHTLCOutput { htlc_id: u64, amount_msat: u64, @@ -446,9 +494,21 @@ struct OutboundHTLCOutput { hold_htlc: Option<()>, } +impl_writeable_tlv_based!(OutboundHTLCOutput, { + (0, htlc_id, required), + (1, amount_msat, required), + (2, cltv_expiry, required), + (3, payment_hash, required), + (4, state, required), + (5, source, required), + (6, blinding_point, required), + (7, skimmed_fee_msat, required), + (8, send_timestamp, required), + (9, hold_htlc, required), +}); + /// See AwaitingRemoteRevoke ChannelState for more info -#[derive(Debug)] -#[cfg_attr(test, derive(Clone, PartialEq))] +#[derive(Debug, Clone, PartialEq, Eq)] enum HTLCUpdateAwaitingACK { AddHTLC { // TODO: Time out if we're getting close to cltv_expiry @@ -479,6 +539,33 @@ enum HTLCUpdateAwaitingACK { }, } +impl_writeable_tlv_based_enum!(HTLCUpdateAwaitingACK, + (0, AddHTLC) => { + (0, amount_msat, required), + (1, cltv_expiry, required), + (2, payment_hash, required), + (3, source, required), + (4, onion_routing_packet, required), + (5, skimmed_fee_msat, required), + (6, blinding_point, required), + (7, hold_htlc, required), + }, + (1, ClaimHTLC) => { + (0, payment_preimage, required), + (1, attribution_data, required), + (2, htlc_id, required), + }, + (2, FailHTLC) => { + (0, htlc_id, required), + (1, err_packet, required), + }, + (3, FailMalformedHTLC) => { + (0, htlc_id, required), + (1, failure_code, required), + (2, sha256_of_onion, required), + } +); + macro_rules! define_state_flags { ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),*], $extra_flags: expr) => { #[doc = $flag_type_doc] @@ -718,6 +805,19 @@ enum ChannelState { ShutdownComplete, } +impl Writeable for ChannelState { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + self.to_u32().write(w) + } +} + +impl Readable for ChannelState { + fn read(r: &mut R) -> Result { + let state_u32 = u32::read(r)?; + ChannelState::from_u32(state_u32).map_err(|_| DecodeError::InvalidValue) + } +} + macro_rules! impl_state_flag { ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => { #[allow(unused)] @@ -1031,7 +1131,7 @@ macro_rules! secp_check { /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to /// our channel_update message and track the current state here. /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`]. -#[derive(Clone, Copy, PartialEq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub(super) enum ChannelUpdateStatus { /// We've announced the channel as enabled and are connected to our peer. Enabled, @@ -1044,8 +1144,7 @@ pub(super) enum ChannelUpdateStatus { } /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here. -#[cfg_attr(test, derive(Debug))] -#[derive(PartialEq)] +#[derive(PartialEq, Clone, Debug, Eq)] pub enum AnnouncementSigsState { /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since /// we sent the last `AnnouncementSignatures`. @@ -1225,7 +1324,7 @@ pub(crate) struct DisconnectResult { /// Tracks the transaction number, along with current and next commitment points. /// This consolidates the logic to advance our commitment number and request new /// commitment points from our signer. -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] struct HolderCommitmentPoint { next_transaction_number: u64, current_point: Option, @@ -1240,6 +1339,15 @@ struct HolderCommitmentPoint { last_revoked_point: Option, } +impl_writeable_tlv_based!(HolderCommitmentPoint, { + (0, next_transaction_number, required), + (1, current_point, required), + (2, next_point, required), + (3, pending_next_point, required), + (4, previous_revoked_point, required), + (5, last_revoked_point, required), +}); + impl HolderCommitmentPoint { #[rustfmt::skip] pub fn new(signer: &ChannelSignerType, secp_ctx: &Secp256k1) -> Option @@ -1433,7 +1541,7 @@ pub(crate) const CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY: u32 = 14 * 24 * 6 * 4; #[cfg(test)] pub(crate) const CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY: u32 = 144; -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] struct PendingChannelMonitorUpdate { update: ChannelMonitorUpdate, } @@ -2384,6 +2492,53 @@ pub(super) struct FundingScope { minimum_depth_override: Option, } +impl Eq for FundingScope {} + +impl PartialEq for FundingScope { + fn eq(&self, other: &Self) -> bool { + self.value_to_self_msat == other.value_to_self_msat + && self.counterparty_selected_channel_reserve_satoshis + == other.counterparty_selected_channel_reserve_satoshis + && self.holder_selected_channel_reserve_satoshis + == other.holder_selected_channel_reserve_satoshis + && self.channel_transaction_parameters == other.channel_transaction_parameters + && self.funding_transaction == other.funding_transaction + && self.funding_tx_confirmed_in == other.funding_tx_confirmed_in + && self.funding_tx_confirmation_height == other.funding_tx_confirmation_height + && self.short_channel_id == other.short_channel_id + && self.minimum_depth_override == other.minimum_depth_override + } +} + +impl Clone for FundingScope { + fn clone(&self) -> Self { + FundingScope { + value_to_self_msat: self.value_to_self_msat, + counterparty_selected_channel_reserve_satoshis: self + .counterparty_selected_channel_reserve_satoshis, + holder_selected_channel_reserve_satoshis: self.holder_selected_channel_reserve_satoshis, + #[cfg(debug_assertions)] + holder_max_commitment_tx_output: Mutex::new( + *self.holder_max_commitment_tx_output.lock().unwrap(), + ), + #[cfg(debug_assertions)] + counterparty_max_commitment_tx_output: Mutex::new( + *self.counterparty_max_commitment_tx_output.lock().unwrap(), + ), + #[cfg(any(test, fuzzing))] + next_local_fee: Mutex::new(*self.next_local_fee.lock().unwrap()), + #[cfg(any(test, fuzzing))] + next_remote_fee: Mutex::new(*self.next_remote_fee.lock().unwrap()), + channel_transaction_parameters: self.channel_transaction_parameters.clone(), + funding_transaction: self.funding_transaction.clone(), + funding_tx_confirmed_in: self.funding_tx_confirmed_in, + funding_tx_confirmation_height: self.funding_tx_confirmation_height, + short_channel_id: self.short_channel_id, + minimum_depth_override: self.minimum_depth_override, + } + } +} + impl Writeable for FundingScope { fn write(&self, writer: &mut W) -> Result<(), io::Error> { write_tlv_fields!(writer, { @@ -2453,6 +2608,7 @@ impl FundingScope { self.channel_transaction_parameters.channel_value_satoshis } + #[cfg(not(feature = "safe_channels"))] pub(crate) fn get_value_to_self_msat(&self) -> u64 { self.value_to_self_msat } @@ -2670,7 +2826,7 @@ impl FundingScope { /// Information about pending attempts at funding a channel. This includes funding currently under /// negotiation and any negotiated attempts waiting enough on-chain confirmations. More than one /// such attempt indicates use of RBF to increase the chances of confirmation. -#[derive(Debug)] +#[derive(Debug, Clone, Eq, PartialEq)] struct PendingFunding { funding_negotiation: Option, @@ -2692,7 +2848,7 @@ impl_writeable_tlv_based!(PendingFunding, { (7, received_funding_txid, option), }); -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] enum FundingNegotiation { AwaitingAck { context: FundingNegotiationContext, @@ -2772,7 +2928,7 @@ impl PendingFunding { } } -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct SpliceInstructions { adjusted_funding_contribution: SignedAmount, our_funding_inputs: Vec, @@ -2800,7 +2956,7 @@ impl_writeable_tlv_based!(SpliceInstructions, { (11, locktime, required), }); -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(crate) enum QuiescentAction { Splice(SpliceInstructions), #[cfg(any(test, fuzzing))] @@ -6109,6 +6265,8 @@ where should_broadcast: broadcast, }], channel_id: Some(self.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Closed), }; Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), update)) } else { @@ -6617,7 +6775,7 @@ fn check_v2_funding_inputs_sufficient( } /// Context for negotiating channels (dual-funded V2 open, splicing) -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(super) struct FundingNegotiationContext { /// Whether we initiated the funding negotiation. pub is_initiator: bool, @@ -6776,6 +6934,313 @@ where quiescent_action: Option, } +#[cfg(feature = "safe_channels")] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct FundedChannelState { + funding: FundingScope, + config: LegacyChannelConfig, + user_id: u128, + channel_id: ChannelId, + temporary_channel_id: Option, + channel_state: ChannelState, + announcement_sigs_state_sent: bool, + pub(crate) latest_monitor_update_id: u64, + shutdown_scriptpubkey: Option, + destination_script: ScriptBuf, + counterparty_next_commitment_transaction_number: u64, + pending_inbound_htlcs: Vec, + pending_outbound_htlcs: Vec, + holding_cell_htlc_updates: Vec, + resend_order: RAACommitmentOrder, + monitor_pending_channel_ready: bool, + monitor_pending_revoke_and_ack: bool, + monitor_pending_commitment_signed: bool, + monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, + monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + monitor_pending_finalized_fulfills: Vec<(HTLCSource, Option)>, + monitor_pending_update_adds: Vec, + pending_update_fee: Option<(u32, FeeUpdateState)>, + holding_cell_update_fee: Option, + next_holder_htlc_id: u64, + next_counterparty_htlc_id: u64, + feerate_per_kw: u32, + update_time_counter: u32, + target_closing_feerate_sats_per_kw: Option, + channel_creation_height: u32, + counterparty_dust_limit_satoshis: u64, + holder_dust_limit_satoshis: u64, + counterparty_max_htlc_value_in_flight_msat: u64, + holder_max_htlc_value_in_flight_msat: u64, + counterparty_htlc_minimum_msat: u64, + holder_htlc_minimum_msat: u64, + counterparty_max_accepted_htlcs: u16, + holder_max_accepted_htlcs: u16, + minimum_depth: Option, + counterparty_forwarding_info: Option, + is_manual_broadcast: bool, + is_batch_funding: Option<()>, + counterparty_next_commitment_point: Option, + counterparty_current_commitment_point: Option, + counterparty_node_id: PublicKey, + counterparty_shutdown_scriptpubkey: Option, + commitment_secrets: CounterpartyCommitmentSecrets, + channel_update_status_enabled: bool, + announcement_sigs: Option<(Signature, Signature)>, + latest_inbound_scid_alias: Option, + outbound_scid_alias: u64, + historical_scids: Vec, + channel_pending_event_emitted: bool, + funding_tx_broadcast_safe_event_emitted: bool, + initial_channel_ready_event_emitted: bool, + local_initiated_shutdown: Option<()>, + channel_keys_id: [u8; 32], + blocked_monitor_updates: Vec>, + interactive_tx_signing_session: Option, + holder_commitment_point: HolderCommitmentPoint, + pending_splice: Option, + quiescent_action: Option, +} + +#[cfg(feature = "safe_channels")] +impl FundedChannelState { + pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { + self.holder_commitment_point.current_transaction_number() + } + + pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 { + self.counterparty_next_commitment_transaction_number + 1 + - if self.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 } + } + + pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 { + let ret = self.counterparty_next_commitment_transaction_number + 2; + debug_assert_eq!(self.commitment_secrets.get_min_seen_secret(), ret); + ret + } +} + +#[cfg(feature = "safe_channels")] +impl From<&mut FundedChannel> for FundedChannelState +where + SP::Target: SignerProvider, +{ + fn from(channel: &mut FundedChannel) -> Self { + // Replicate pre-serialization channel state change from original write method. + let mut channel_state = channel.context.channel_state; + match channel_state { + ChannelState::AwaitingChannelReady(_) => {}, + ChannelState::ChannelReady(_) => { + if channel.quiescent_action.is_some() { + // If we're trying to get quiescent to do something, try again when we + // reconnect to the peer. + channel_state.set_awaiting_quiescence(); + } + channel_state.clear_local_stfu_sent(); + channel_state.clear_remote_stfu_sent(); + if channel.should_reset_pending_splice_state(false) + || !channel.has_pending_splice_awaiting_signatures() + { + // We shouldn't be quiescent anymore upon reconnecting if: + // - We were in quiescence but a splice/RBF was never negotiated or + // - We were in quiescence but the splice negotiation failed due to + // disconnecting + channel_state.clear_quiescent(); + } + }, + ChannelState::FundingNegotiated(_) + if channel.context.interactive_tx_signing_session.is_some() => {}, + _ => debug_assert!(false, "Pre-funded/shutdown channels should not be written"), + } + channel_state.set_peer_disconnected(); + + // Replicate pre-serialization pending splice state change from original write method. Serialize the result + // already now because PendingFunding is difficult to clone. + // + // We don't have to worry about resetting the pending `FundingNegotiation` because we can only read + // `FundingNegotiation::AwaitingSignatures` variants anyway. + let pending_splice = channel + .pending_splice + .as_ref() + .filter(|_| !channel.should_reset_pending_splice_state(false)) + .cloned(); + + // Prevent recursive serialization compiler errors by storing the serialized updates. + let serialized_blocked_monitor_updates = + channel.context.blocked_monitor_updates.iter().map(|update| update.encode()).collect(); + + // We only care about writing out the current state as if we had just disconnected, at + // which point we always set anything but AnnouncementSigsReceived to NotSent. + let announcement_sigs_state_sent = match channel.context.announcement_sigs_state { + AnnouncementSigsState::NotSent => false, + AnnouncementSigsState::MessageSent => false, + AnnouncementSigsState::Committed => false, + AnnouncementSigsState::PeerReceived => true, + }; + + // We only care about writing out the current state as it was announced, ie only either Enabled or Disabled. In + // the case of DisabledStaged, we most recently announced the channel as enabled. + let channel_update_status_enabled = match channel.context.channel_update_status { + ChannelUpdateStatus::Enabled => true, + ChannelUpdateStatus::DisabledStaged(_) => true, + ChannelUpdateStatus::EnabledStaged(_) => false, + ChannelUpdateStatus::Disabled => false, + }; + + // Mirror existing [`LegacyChannelConfig`] behavior by resetting this flag. + let mut config = channel.context.config; + config.options.accept_underpaying_htlcs = false; + + FundedChannelState { + funding: channel.funding.clone(), + user_id: channel.context.user_id, + channel_id: channel.context.channel_id, + channel_state, + latest_monitor_update_id: channel.context.latest_monitor_update_id, + shutdown_scriptpubkey: channel.context.shutdown_scriptpubkey.clone(), + destination_script: channel.context.destination_script.clone(), + counterparty_next_commitment_transaction_number: channel + .context + .counterparty_next_commitment_transaction_number, + interactive_tx_signing_session: channel.context.interactive_tx_signing_session.clone(), + pending_inbound_htlcs: channel.context.pending_inbound_htlcs.clone(), + config, + temporary_channel_id: channel.context.temporary_channel_id, + announcement_sigs_state_sent, + pending_outbound_htlcs: channel.context.pending_outbound_htlcs.clone(), + holding_cell_htlc_updates: channel.context.holding_cell_htlc_updates.clone(), + resend_order: channel.context.resend_order.clone(), + monitor_pending_channel_ready: channel.context.monitor_pending_channel_ready, + monitor_pending_revoke_and_ack: channel.context.monitor_pending_revoke_and_ack, + monitor_pending_commitment_signed: channel.context.monitor_pending_commitment_signed, + monitor_pending_forwards: channel.context.monitor_pending_forwards.clone(), + monitor_pending_failures: channel.context.monitor_pending_failures.clone(), + monitor_pending_finalized_fulfills: channel + .context + .monitor_pending_finalized_fulfills + .clone(), + monitor_pending_update_adds: channel.context.monitor_pending_update_adds.clone(), + pending_update_fee: channel.context.pending_update_fee, + holding_cell_update_fee: channel.context.holding_cell_update_fee, + next_holder_htlc_id: channel.context.next_holder_htlc_id, + next_counterparty_htlc_id: channel.context.next_counterparty_htlc_id, + feerate_per_kw: channel.context.feerate_per_kw, + update_time_counter: channel.context.update_time_counter, + target_closing_feerate_sats_per_kw: channel.context.target_closing_feerate_sats_per_kw, + channel_creation_height: channel.context.channel_creation_height, + counterparty_dust_limit_satoshis: channel.context.counterparty_dust_limit_satoshis, + holder_dust_limit_satoshis: channel.context.holder_dust_limit_satoshis, + counterparty_max_htlc_value_in_flight_msat: channel + .context + .counterparty_max_htlc_value_in_flight_msat, + holder_max_htlc_value_in_flight_msat: channel + .context + .holder_max_htlc_value_in_flight_msat, + counterparty_htlc_minimum_msat: channel.context.counterparty_htlc_minimum_msat, + holder_htlc_minimum_msat: channel.context.holder_htlc_minimum_msat, + counterparty_max_accepted_htlcs: channel.context.counterparty_max_accepted_htlcs, + holder_max_accepted_htlcs: channel.context.holder_max_accepted_htlcs, + minimum_depth: channel.context.minimum_depth, + counterparty_forwarding_info: channel.context.counterparty_forwarding_info.clone(), + is_manual_broadcast: channel.context.is_manual_broadcast, + is_batch_funding: channel.context.is_batch_funding, + counterparty_next_commitment_point: channel.context.counterparty_next_commitment_point, + counterparty_current_commitment_point: channel + .context + .counterparty_current_commitment_point, + counterparty_node_id: channel.context.counterparty_node_id, + counterparty_shutdown_scriptpubkey: channel + .context + .counterparty_shutdown_scriptpubkey + .clone(), + commitment_secrets: channel.context.commitment_secrets.clone(), + channel_update_status_enabled, + announcement_sigs: channel.context.announcement_sigs, + latest_inbound_scid_alias: channel.context.latest_inbound_scid_alias, + outbound_scid_alias: channel.context.outbound_scid_alias, + historical_scids: channel.context.historical_scids.clone(), + channel_pending_event_emitted: channel.context.channel_pending_event_emitted, + funding_tx_broadcast_safe_event_emitted: channel + .context + .funding_tx_broadcast_safe_event_emitted, + initial_channel_ready_event_emitted: channel + .context + .initial_channel_ready_event_emitted, + local_initiated_shutdown: channel.context.local_initiated_shutdown, + channel_keys_id: channel.context.channel_keys_id, + blocked_monitor_updates: serialized_blocked_monitor_updates, + holder_commitment_point: channel.holder_commitment_point, + pending_splice, + quiescent_action: channel.quiescent_action.clone(), + } + } +} + +#[cfg(feature = "safe_channels")] +impl_writeable_tlv_based!(FundedChannelState, { + (0, funding, required), + (1, config, required), + (2, user_id, required), + (3, channel_id, required), + (4, temporary_channel_id, required), + (5, channel_state, required), + (6, announcement_sigs_state_sent, required), + (7, latest_monitor_update_id, required), + (8, shutdown_scriptpubkey, required), + (9, destination_script, required), + (10, counterparty_next_commitment_transaction_number, required), + (11, pending_inbound_htlcs, required_vec), + (12, pending_outbound_htlcs, required_vec), + (13, holding_cell_htlc_updates, required_vec), + (14, resend_order, required), + (15, monitor_pending_channel_ready, required), + (16, monitor_pending_revoke_and_ack, required), + (17, monitor_pending_commitment_signed, required), + (18, monitor_pending_forwards, required), + (19, monitor_pending_failures, required_vec), + (20, monitor_pending_finalized_fulfills, required), + (21, monitor_pending_update_adds, required), + (22, pending_update_fee, required), + (23, holding_cell_update_fee, required), + (24, next_holder_htlc_id, required), + (25, next_counterparty_htlc_id, required), + (26, feerate_per_kw, required), + (27, update_time_counter, required), + (28, target_closing_feerate_sats_per_kw, required), + (29, channel_creation_height, required), + (30, counterparty_dust_limit_satoshis, required), + (31, holder_dust_limit_satoshis, required), + (32, counterparty_max_htlc_value_in_flight_msat, required), + (33, holder_max_htlc_value_in_flight_msat, required), + (34, counterparty_htlc_minimum_msat, required), + (35, holder_htlc_minimum_msat, required), + (36, counterparty_max_accepted_htlcs, required), + (37, holder_max_accepted_htlcs, required), + (38, minimum_depth, required), + (39, counterparty_forwarding_info, required), + (40, is_manual_broadcast, required), + (41, is_batch_funding, required), + (42, counterparty_next_commitment_point, required), + (43, counterparty_current_commitment_point, required), + (44, counterparty_node_id, required), + (45, counterparty_shutdown_scriptpubkey, required), + (46, commitment_secrets, required), + (47, channel_update_status_enabled, required), + (48, announcement_sigs, required), + (49, latest_inbound_scid_alias, required), + (50, outbound_scid_alias, required), + (51, historical_scids, required), + (52, channel_pending_event_emitted, required), + (53, funding_tx_broadcast_safe_event_emitted, required), + (54, initial_channel_ready_event_emitted, required), + (55, local_initiated_shutdown, required), + (56, channel_keys_id, required), + (57, blocked_monitor_updates, required_vec), + (58, interactive_tx_signing_session, required), + (59, holder_commitment_point, required), + (60, pending_splice, required), + (61, quiescent_action, upgradable_option), +}); + #[cfg(any(test, fuzzing))] #[derive(Clone, Copy, Default, Debug)] struct PredictedNextFee { @@ -6949,6 +7414,161 @@ where SP::Target: SignerProvider, ::EcdsaSigner: EcdsaChannelSigner, { + #[cfg(feature = "safe_channels")] + pub(crate) fn new_from_state( + channel: FundedChannelState, entropy_source: &ES, signer_provider: &SP, + _our_supported_features: &ChannelTypeFeatures, + ) -> Result + where + ES::Target: EntropySource, + { + let holder_signer = signer_provider.derive_channel_signer(channel.channel_keys_id); + + let mut secp_ctx = Secp256k1::new(); + secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); + + if let Some(funding_negotiation) = channel + .pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.funding_negotiation.as_ref()) + { + if !matches!(funding_negotiation, FundingNegotiation::AwaitingSignatures { .. }) { + return Err(DecodeError::InvalidValue); + } + } + + let announcement_sigs_state = if channel.announcement_sigs_state_sent { + AnnouncementSigsState::PeerReceived + } else { + AnnouncementSigsState::NotSent + }; + + let blocked_monitor_updates = channel + .blocked_monitor_updates + .iter() + .map(|update| { + let mut reader = &update[..]; + PendingChannelMonitorUpdate::read(&mut reader) + }) + .collect::, _>>() + .map_err(|_| DecodeError::InvalidValue)?; + + let channel_update_status = if channel.channel_update_status_enabled { + ChannelUpdateStatus::Enabled + } else { + ChannelUpdateStatus::Disabled + }; + + Ok(FundedChannel { + funding: channel.funding, + context: ChannelContext { + user_id: channel.user_id, + config: channel.config, + prev_config: None, + inbound_handshake_limits_override: None, + channel_id: channel.channel_id, + temporary_channel_id: channel.temporary_channel_id, + channel_state: channel.channel_state, + announcement_sigs_state, + secp_ctx, + latest_monitor_update_id: channel.latest_monitor_update_id, + holder_signer: ChannelSignerType::Ecdsa(holder_signer), + shutdown_scriptpubkey: channel.shutdown_scriptpubkey, + destination_script: channel.destination_script, + counterparty_next_commitment_transaction_number: channel + .counterparty_next_commitment_transaction_number, + holder_max_accepted_htlcs: channel.holder_max_accepted_htlcs, + pending_inbound_htlcs: channel.pending_inbound_htlcs, + pending_outbound_htlcs: channel.pending_outbound_htlcs, + holding_cell_htlc_updates: channel.holding_cell_htlc_updates, + resend_order: channel.resend_order, + + monitor_pending_channel_ready: channel.monitor_pending_channel_ready, + monitor_pending_revoke_and_ack: channel.monitor_pending_revoke_and_ack, + monitor_pending_commitment_signed: channel.monitor_pending_commitment_signed, + monitor_pending_forwards: channel.monitor_pending_forwards, + monitor_pending_failures: channel.monitor_pending_failures, + monitor_pending_finalized_fulfills: channel.monitor_pending_finalized_fulfills, + monitor_pending_update_adds: channel.monitor_pending_update_adds, + + signer_pending_revoke_and_ack: false, + signer_pending_commitment_update: false, + signer_pending_funding: false, + signer_pending_closing: false, + signer_pending_channel_ready: false, + signer_pending_stale_state_verification: None, + + pending_update_fee: channel.pending_update_fee, + holding_cell_update_fee: channel.holding_cell_update_fee, + next_holder_htlc_id: channel.next_holder_htlc_id, + next_counterparty_htlc_id: channel.next_counterparty_htlc_id, + update_time_counter: channel.update_time_counter, + feerate_per_kw: channel.feerate_per_kw, + + last_sent_closing_fee: None, + last_received_closing_sig: None, + pending_counterparty_closing_signed: None, + expecting_peer_commitment_signed: false, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw: channel.target_closing_feerate_sats_per_kw, + + channel_creation_height: channel.channel_creation_height, + + counterparty_dust_limit_satoshis: channel.counterparty_dust_limit_satoshis, + holder_dust_limit_satoshis: channel.holder_dust_limit_satoshis, + counterparty_max_htlc_value_in_flight_msat: channel + .counterparty_max_htlc_value_in_flight_msat, + holder_max_htlc_value_in_flight_msat: channel.holder_max_htlc_value_in_flight_msat, + counterparty_htlc_minimum_msat: channel.counterparty_htlc_minimum_msat, + holder_htlc_minimum_msat: channel.holder_htlc_minimum_msat, + counterparty_max_accepted_htlcs: channel.counterparty_max_accepted_htlcs, + minimum_depth: channel.minimum_depth, + + counterparty_forwarding_info: channel.counterparty_forwarding_info, + + is_batch_funding: channel.is_batch_funding, + + counterparty_next_commitment_point: channel.counterparty_next_commitment_point, + counterparty_current_commitment_point: channel + .counterparty_current_commitment_point, + counterparty_node_id: channel.counterparty_node_id, + + counterparty_shutdown_scriptpubkey: channel.counterparty_shutdown_scriptpubkey, + + commitment_secrets: channel.commitment_secrets, + + channel_update_status, + closing_signed_in_flight: false, + + announcement_sigs: channel.announcement_sigs, + + workaround_lnd_bug_4006: None, + sent_message_awaiting_response: None, + + latest_inbound_scid_alias: channel.latest_inbound_scid_alias, + outbound_scid_alias: channel.outbound_scid_alias, + historical_scids: channel.historical_scids, + + funding_tx_broadcast_safe_event_emitted: channel + .funding_tx_broadcast_safe_event_emitted, + channel_pending_event_emitted: channel.channel_pending_event_emitted, + initial_channel_ready_event_emitted: channel.initial_channel_ready_event_emitted, + + channel_keys_id: channel.channel_keys_id, + + local_initiated_shutdown: channel.local_initiated_shutdown, + + blocked_monitor_updates, + is_manual_broadcast: channel.is_manual_broadcast, + + interactive_tx_signing_session: channel.interactive_tx_signing_session, + }, + holder_commitment_point: channel.holder_commitment_point, + pending_splice: channel.pending_splice, + quiescent_action: channel.quiescent_action, + }) + } + pub fn context(&self) -> &ChannelContext { &self.context } @@ -7358,6 +7978,8 @@ where payment_info, }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Funded(self.into())), }; if !self.context.channel_state.can_generate_new_commitment() { @@ -7471,6 +8093,11 @@ where // to be strictly increasing by one, so decrement it here. self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); + #[cfg(feature = "safe_channels")] + { + monitor_update.channel_state = + Some(UpdateChannelState::Funded(self.into())); + } } else { let blocked_upd = self.context.blocked_monitor_updates.get(0); let new_mon_id = blocked_upd @@ -7980,7 +8607,8 @@ where ); self.context.latest_monitor_update_id += 1; - let monitor_update = ChannelMonitorUpdate { + #[allow(unused_mut)] + let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::RenegotiatedFunding { channel_parameters: pending_splice_funding.channel_transaction_parameters.clone(), @@ -7988,6 +8616,8 @@ where counterparty_commitment_tx, }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: None, }; self.context @@ -7997,6 +8627,10 @@ where .received_commitment_signed(); self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + #[cfg(feature = "safe_channels")] + { + monitor_update.channel_state = Some(UpdateChannelState::Funded(self.into())); + } Ok(self.push_ret_blockable_mon_update(monitor_update)) } @@ -8256,6 +8890,8 @@ where update_id: self.context.latest_monitor_update_id, updates: vec![update], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: None, }; self.context.expecting_peer_commitment_signed = false; @@ -8280,6 +8916,10 @@ where } log_debug!(logger, "Received valid commitment_signed from peer, updated HTLC state but awaiting a monitor update resolution to reply.", ); + #[cfg(feature = "safe_channels")] + { + monitor_update.channel_state = Some(UpdateChannelState::Funded(self.into())); + } return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -8308,6 +8948,10 @@ where Vec::new(), Vec::new(), ); + #[cfg(feature = "safe_channels")] + { + monitor_update.channel_state = Some(UpdateChannelState::Funded(self.into())); + } return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -8360,6 +9004,8 @@ where update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! updates: Vec::new(), channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: None, }; let mut htlc_updates = Vec::new(); @@ -8455,6 +9101,12 @@ where unreachable!() }; update_fulfill_count += 1; + + #[cfg(feature = "safe_channels")] + { + additional_monitor_update.channel_state = + Some(UpdateChannelState::Funded(self.into())); + } monitor_update.updates.append(&mut additional_monitor_update.updates); None }, @@ -8513,6 +9165,11 @@ where update_add_count, update_fulfill_count, update_fail_count); self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + + #[cfg(feature = "safe_channels")] + { + monitor_update.channel_state = Some(UpdateChannelState::Funded(self.into())); + } (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) @@ -8629,6 +9286,8 @@ where secret: msg.per_commitment_secret, }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: None, }; // Update state now that we've passed all the can-fail calls... @@ -8853,6 +9512,10 @@ where }; macro_rules! return_with_htlcs_to_fail { ($htlcs_to_fail: expr) => { + #[cfg(feature = "safe_channels")] + { + monitor_update.channel_state = Some(UpdateChannelState::Funded(self.into())); + } if !release_monitor { self.context .blocked_monitor_updates @@ -8931,8 +9594,8 @@ where ); return_with_htlcs_to_fail!(htlcs_to_fail); } else { - log_debug!(logger, "Received a valid revoke_and_ack with no reply necessary. {} monitor update.", - release_state_str); + log_debug!(logger, "Received a valid revoke_and_ack with no reply necessary. {} monitor update {}.", + release_state_str, monitor_update.update_id); self.monitor_updating_paused( false, @@ -10506,6 +11169,8 @@ where scriptpubkey: self.get_closing_scriptpubkey(), }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Funded(self.into())), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); self.push_ret_blockable_mon_update(monitor_update) @@ -10897,10 +11562,12 @@ where .try_for_each(|funding| self.context.can_accept_incoming_htlc(funding, dust_exposure_limiting_feerate, &logger)) } + #[cfg(not(feature = "safe_channels"))] pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { self.holder_commitment_point.current_transaction_number() } + #[cfg(not(feature = "safe_channels"))] pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 { self.context.counterparty_next_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 } @@ -10960,10 +11627,13 @@ where if self.context.blocked_monitor_updates.is_empty() { return None; } - Some(( - self.context.blocked_monitor_updates.remove(0).update, - !self.context.blocked_monitor_updates.is_empty(), - )) + #[allow(unused_mut)] + let mut update = self.context.blocked_monitor_updates.remove(0).update; + #[cfg(feature = "safe_channels")] + { + update.channel_state = Some(UpdateChannelState::Funded(self.into())); + } + Some((update, !self.context.blocked_monitor_updates.is_empty())) } /// Pushes a new monitor update into our monitor update queue, returning it if it should be @@ -11011,6 +11681,7 @@ where /// transaction. If the channel is inbound, this implies simply that the channel has not /// advanced state. #[rustfmt::skip] + #[cfg(not(feature = "safe_channels"))] pub fn is_awaiting_initial_mon_persist(&self) -> bool { if !self.is_awaiting_monitor_update() { return false; } if matches!( @@ -11263,6 +11934,8 @@ where funding_txid: funding_txo.txid, }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Funded(self.into())), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); let monitor_update = self.push_ret_blockable_mon_update(monitor_update); @@ -12835,12 +13508,14 @@ where } self.context.latest_monitor_update_id += 1; + self.context.channel_state.set_awaiting_remote_revoke(); let monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, updates: vec![update], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Funded(self.into())), }; - self.context.channel_state.set_awaiting_remote_revoke(); monitor_update } @@ -13085,6 +13760,8 @@ where scriptpubkey: self.get_closing_scriptpubkey(), }], channel_id: Some(self.context.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Funded(self.into())), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); self.push_ret_blockable_mon_update(monitor_update) @@ -14443,6 +15120,7 @@ impl Readable for AnnouncementSigsState { } } +#[cfg(not(feature = "safe_channels"))] impl Writeable for FundedChannel where SP::Target: SignerProvider, @@ -14920,6 +15598,7 @@ where } } +#[cfg(not(feature = "safe_channels"))] impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, &'c ChannelTypeFeatures)> for FundedChannel where @@ -15809,6 +16488,8 @@ mod tests { use crate::chain::transaction::OutPoint; use crate::chain::BestBlock; use crate::ln::chan_utils::{self, commit_tx_fee_sat, ChannelTransactionParameters}; + #[cfg(feature = "safe_channels")] + use crate::ln::channel::FundedChannelState; use crate::ln::channel::{ AwaitingChannelReadyFlags, ChannelState, FundedChannel, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundV1Channel, @@ -15836,6 +16517,8 @@ mod tests { use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::util::config::UserConfig; use crate::util::errors::APIError; + #[cfg(feature = "safe_channels")] + use crate::util::ser::Readable; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_utils::{ self, OnGetShutdownScriptpubkey, TestFeeEstimator, TestKeysInterface, TestLogger, @@ -16620,14 +17303,34 @@ mod tests { chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone(); // Encode and decode the channel and ensure that the HTLCs within are the same. - let encoded_chan = chan.encode(); - let mut s = crate::io::Cursor::new(&encoded_chan); - let mut reader = - crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); let features = channelmanager::provided_channel_type_features(&config); - let decoded_chan = - FundedChannel::read(&mut reader, (&&keys_provider, &&keys_provider, &features)) - .unwrap(); + let decoded_chan; + #[cfg(not(feature = "safe_channels"))] + { + let encoded_chan = chan.encode(); + let mut s = crate::io::Cursor::new(&encoded_chan); + let mut reader = + crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); + decoded_chan = + FundedChannel::read(&mut reader, (&&keys_provider, &&keys_provider, &features)) + .unwrap(); + } + #[cfg(feature = "safe_channels")] + { + let channel_state: FundedChannelState = (&mut chan).into(); + let encoded_chan = channel_state.encode(); + let mut s = crate::io::Cursor::new(&encoded_chan); + let mut reader = + crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); + let decoded_state = FundedChannelState::read(&mut reader).unwrap(); + decoded_chan = FundedChannel::new_from_state( + decoded_state, + &&keys_provider, + &&keys_provider, + &features, + ) + .unwrap(); + } assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs); assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates); } diff --git a/lightning/src/ln/channel_state.rs b/lightning/src/ln/channel_state.rs index 81a7cb4755e..4ac868d19b2 100644 --- a/lightning/src/ln/channel_state.rs +++ b/lightning/src/ln/channel_state.rs @@ -213,7 +213,7 @@ impl_writeable_tlv_based!(OutboundHTLCDetails, { }); /// Information needed for constructing an invoice route hint for this channel. -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct CounterpartyForwardingInfo { /// Base routing fee in millisatoshis. pub fee_base_msat: u32, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 399c51b9d9a..dbc11f173cd 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -41,6 +41,8 @@ use crate::chain; use crate::chain::chaininterface::{ BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, }; +#[cfg(feature = "safe_channels")] +use crate::chain::channelmonitor::UpdateChannelState; use crate::chain::channelmonitor::{ Balance, ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, MonitorEvent, WithChannelMonitor, ANTI_REORG_DELAY, CLTV_CLAIM_BUFFER, HTLC_FAIL_BACK_BUFFER, @@ -207,8 +209,7 @@ use crate::ln::script::ShutdownScript; // our payment, which we can use to decode errors or inform the user that the payment was sent. /// Information about where a received HTLC('s onion) has indicated the HTLC should go. -#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug -#[cfg_attr(test, derive(Debug, PartialEq))] +#[derive(Clone, Debug, PartialEq, Eq)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug pub enum PendingHTLCRouting { /// An HTLC which should be forwarded on to another node. Forward { @@ -386,8 +387,7 @@ impl PendingHTLCRouting { /// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it /// should go next. -#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug -#[cfg_attr(test, derive(Debug, PartialEq))] +#[derive(Clone, Debug, PartialEq, Eq)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug pub struct PendingHTLCInfo { /// Further routing details based on whether the HTLC is being forwarded or received. pub routing: PendingHTLCRouting, @@ -429,15 +429,14 @@ pub struct PendingHTLCInfo { pub skimmed_fee_msat: Option, } -#[derive(Clone, Debug)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug +#[derive(Clone, Debug, PartialEq, Eq)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug pub(super) enum HTLCFailureMsg { Relay(msgs::UpdateFailHTLC), Malformed(msgs::UpdateFailMalformedHTLC), } /// Stores whether we can't forward an HTLC or relevant forwarding info -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug +#[derive(Clone, Debug, PartialEq, Eq)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug pub(super) enum PendingHTLCStatus { Forward(PendingHTLCInfo), Fail(HTLCFailureMsg), @@ -1009,7 +1008,7 @@ impl MsgHandleErrInternal { /// be sent in the order they appear in the return value, however sometimes the order needs to be /// variable at runtime (eg FundedChannel::channel_reestablish needs to re-send messages in the order /// they were originally sent). In those cases, this enum is also returned. -#[derive(Clone, PartialEq, Debug)] +#[derive(Clone, PartialEq, Eq, Debug)] pub(super) enum RAACommitmentOrder { /// Send the CommitmentUpdate messages first CommitmentFirst, @@ -1017,6 +1016,11 @@ pub(super) enum RAACommitmentOrder { RevokeAndACKFirst, } +impl_writeable_tlv_based_enum!(RAACommitmentOrder, + (0, CommitmentFirst) => {}, + (1, RevokeAndACKFirst) => {}, +); + /// Similar to scenarios used by [`RAACommitmentOrder`], this determines whether a `channel_ready` /// message should be sent first (i.e., prior to a `commitment_update`) or after the initial /// `commitment_update` and `tx_signatures` for channel funding. @@ -9082,6 +9086,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ payment_info, }], channel_id: Some(prev_hop.channel_id), + #[cfg(feature = "safe_channels")] + channel_state: None, }; // We don't have any idea if this is a duplicate claim without interrogating the @@ -9451,6 +9457,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ for action in actions.into_iter() { match action { MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => { + let logger = WithContext::from(&self.logger, None, None, Some(payment_hash)); + log_trace!(logger, "Handling PaymentClaimed monitor update completion action"); + if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim { let per_peer_state = self.per_peer_state.read().unwrap(); per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| { @@ -9526,6 +9535,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // `payment_id` should suffice to ensure we never spuriously drop a second // event for a duplicate payment. if !pending_events.contains(&event_action) { + log_trace!(logger, "Queuing PaymentClaimed event with event completion action {:?}", event_action.1); pending_events.push_back(event_action); } } @@ -10414,6 +10424,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fail_chan!("Already had channel with the new channel_id"); }, hash_map::Entry::Vacant(e) => { + #[cfg(feature = "safe_channels")] + { + monitor.update_channel_state(UpdateChannelState::Funded((&mut chan).into())); + } let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { // There's no problem signing a counterparty's funding transaction if our monitor @@ -10578,6 +10592,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match chan .funding_signed(&msg, best_block, &self.signer_provider, &self.logger) .and_then(|(funded_chan, monitor)| { + #[cfg(feature = "safe_channels")] + { + monitor.update_channel_state(UpdateChannelState::Funded(funded_chan.into())); + } self.chain_monitor .watch_channel(funded_chan.context.channel_id(), monitor) .map_err(|()| { @@ -11296,6 +11314,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = chan.as_funded_mut() { if let Some(monitor) = monitor_opt { + #[cfg(feature = "safe_channels")] + { + monitor.update_channel_state(UpdateChannelState::Funded(chan.into())); + } let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { handle_initial_monitor!(self, persist_state, peer_state_lock, peer_state, @@ -13720,6 +13742,8 @@ where updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, }], + #[cfg(feature = "safe_channels")] + channel_state: None, }; let during_startup = @@ -16310,6 +16334,7 @@ where MR::Target: MessageRouter, L::Target: Logger, { + #[cfg(not(feature = "safe_channels"))] #[rustfmt::skip] fn write(&self, writer: &mut W) -> Result<(), io::Error> { let _consistency_lock = self.total_consistency_lock.write().unwrap(); @@ -16568,6 +16593,48 @@ where Ok(()) } + + #[cfg(feature = "safe_channels")] + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + let _consistency_lock = self.total_consistency_lock.write().unwrap(); + let best_block = self.best_block.read().unwrap().clone(); + + let per_peer_state = self.per_peer_state.write().unwrap(); + + let mut peer_states = Vec::new(); + for (_, peer_state_mutex) in per_peer_state.iter() { + // Because we're holding the owning `per_peer_state` write lock here there's no chance + // of a lockorder violation deadlock - no other thread can be holding any + // per_peer_state lock at all. + peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self()); + } + + let mut peer_storage_dir: Vec<(&PublicKey, &Vec)> = Vec::new(); + + for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) { + // Peers which we have no channels to should be dropped once disconnected. As we + // disconnect all peers when shutting down and serializing the ChannelManager, we + // consider all peers as disconnected here. There's therefore no need write peers with + // no channels. + if !peer_state.ok_to_remove(false) { + peer_storage_dir.push((peer_pubkey, &peer_state.peer_storage)); + } + } + + write_tlv_fields!(writer, { + (5, self.our_network_pubkey, required), + (7, self.fake_scid_rand_bytes, required), + (11, self.probing_cookie_secret, required), + (15, self.inbound_payment_id_secret, required), + (19, peer_storage_dir, required_vec), + (21, WithoutLength(&self.flow.writeable_async_receive_offer_cache()), required), + (22, self.chain_hash, required), + (23, best_block.height, required), + (24, best_block.block_hash, required), + }); + + Ok(()) + } } impl Writeable for VecDeque<(Event, Option)> { @@ -16843,6 +16910,7 @@ where MR::Target: MessageRouter, L::Target: Logger, { + #[cfg(not(feature = "safe_channels"))] fn read( reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, ) -> Result { @@ -17109,17 +17177,20 @@ where let logger = WithChannelMonitor::from(&args.logger, monitor, None); let channel_id = monitor.channel_id(); - log_info!( - logger, - "Queueing monitor update to ensure missing channel is force closed", - ); let monitor_update = ChannelMonitorUpdate { update_id: monitor.get_latest_update_id().saturating_add(1), updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true, }], channel_id: Some(monitor.channel_id()), + #[cfg(feature = "safe_channels")] + channel_state: Some(UpdateChannelState::Closed), }; + log_info!( + logger, + "Queueing monitor update {} to ensure missing channel is force closed", + monitor_update.update_id + ); let funding_txo = monitor.get_funding_txo(); let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, @@ -17752,6 +17823,8 @@ where updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, }], + #[cfg(feature = "safe_channels")] + channel_state: None, }, }); } @@ -18507,6 +18580,1029 @@ where Ok((best_block_hash.clone(), channel_manager)) } + + #[cfg(feature = "safe_channels")] + fn read( + reader_unused: &mut Reader, + mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, + ) -> Result { + let mut received_network_pubkey = PublicKey::from_slice(&[2; 33]).unwrap(); + let mut fake_scid_rand_bytes = [0u8; 32]; + let mut probing_cookie_secret = [0u8; 32]; + let mut inbound_payment_id_secret = [0u8; 32]; + let mut peer_storage_dir: Vec<(PublicKey, Vec)> = Vec::new(); + let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); + let mut chain_hash = ChainHash::from([0; 32]); + let mut best_block_height = 0; + let mut best_block_hash = BlockHash::all_zeros(); + + read_tlv_fields!(reader_unused, { + (5, received_network_pubkey, required), + (7, fake_scid_rand_bytes, required), + (11, probing_cookie_secret, required), + (15, inbound_payment_id_secret, required), + (19, peer_storage_dir, required_vec), + (21, async_receive_offer_cache, required), + (22, chain_hash, required), + (23, best_block_height, required), + (24, best_block_hash, required), + }); + + let empty_peer_state = || PeerState { + channel_by_id: new_hash_map(), + inbound_channel_request_by_id: new_hash_map(), + latest_features: InitFeatures::empty(), + pending_msg_events: Vec::new(), + in_flight_monitor_updates: BTreeMap::new(), + monitor_update_blocked_actions: BTreeMap::new(), + actions_blocking_raa_monitor_updates: BTreeMap::new(), + closed_channel_monitor_update_ids: BTreeMap::new(), + peer_storage: Vec::new(), + is_connected: false, + }; + + let mut failed_htlcs = Vec::new(); + let mut channel_id_set = new_hash_set(); + let mut per_peer_state = new_hash_map(); + let mut short_to_chan_info = new_hash_map(); + let mut channel_closures = VecDeque::new(); + let mut close_background_events = Vec::new(); + + // Decode channels from monitors. + log_trace!( + args.logger, + "Deserializing ChannelManager with {} channel monitors", + args.channel_monitors.len() + ); + for (_, monitor) in args.channel_monitors.iter() { + let opt_encoded_channel = monitor.get_channel_state(); + let encoded_channel = match opt_encoded_channel { + // Monitor exists, but there is no channel state. This can happen initially or after channel shut down. + None => { + log_trace!( + args.logger, + "ChannelMonitor for channel {} has no channel state to load.", + monitor.channel_id() + ); + continue; + }, + Some(UpdateChannelState::Closed) => { + log_trace!( + args.logger, + "ChannelMonitor for channel {} indicates channel is closed. No channel state to load.", + monitor.channel_id() + ); + continue; + }, + Some(UpdateChannelState::Funded(channel)) => channel, + }; + let mut channel: FundedChannel = FundedChannel::new_from_state( + encoded_channel, + &args.entropy_source, + &args.signer_provider, + &provided_channel_type_features(&args.config), + )?; + let logger = WithChannelContext::from(&args.logger, &channel.context, None); + let channel_id = channel.context.channel_id(); + channel_id_set.insert(channel_id); + + channel.on_startup_drop_completed_blocked_mon_updates_through( + &logger, + monitor.get_latest_update_id(), + ); + log_info!(logger, "Successfully loaded at update_id {} against monitor at update id {} with {} blocked updates", + channel.context.get_latest_monitor_update_id(), + monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending()); + if let Some(short_channel_id) = channel.funding.get_short_channel_id() { + short_to_chan_info.insert( + short_channel_id, + (channel.context.get_counterparty_node_id(), channel.context.channel_id()), + ); + } + + for short_channel_id in channel.context.historical_scids() { + let cp_id = channel.context.get_counterparty_node_id(); + let chan_id = channel.context.channel_id(); + short_to_chan_info.insert(*short_channel_id, (cp_id, chan_id)); + } + + per_peer_state + .entry(channel.context.get_counterparty_node_id()) + .or_insert_with(|| Mutex::new(empty_peer_state())) + .get_mut() + .unwrap() + .channel_by_id + .insert(channel.context.channel_id(), Channel::from(channel)); + } + + for (channel_id, monitor) in args.channel_monitors.iter() { + if !channel_id_set.contains(channel_id) { + let mut should_queue_fc_update = false; + let counterparty_node_id = monitor.get_counterparty_node_id(); + + // If the ChannelMonitor had any updates, we may need to update it further and + // thus track it in `closed_channel_monitor_update_ids`. If the channel never + // had any updates at all, there can't be any HTLCs pending which we need to + // claim. + // Note that a `ChannelMonitor` is created with `update_id` 0 and after we + // provide it with a closure update its `update_id` will be at 1. + if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 { + should_queue_fc_update = !monitor.no_further_updates_allowed(); + let mut latest_update_id = monitor.get_latest_update_id(); + if should_queue_fc_update { + // Note that for channels closed pre-0.1, the latest update_id is + // `u64::MAX`. + latest_update_id = latest_update_id.saturating_add(1); + } + per_peer_state + .entry(counterparty_node_id) + .or_insert_with(|| Mutex::new(empty_peer_state())) + .lock() + .unwrap() + .closed_channel_monitor_update_ids + .entry(monitor.channel_id()) + .and_modify(|v| *v = cmp::max(latest_update_id, *v)) + .or_insert(latest_update_id); + } + + if !should_queue_fc_update { + continue; + } + + let logger = WithChannelMonitor::from(&args.logger, monitor, None); + let channel_id = monitor.channel_id(); + let monitor_update = ChannelMonitorUpdate { + update_id: monitor.get_latest_update_id().saturating_add(1), + updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { + should_broadcast: true, + }], + channel_id: Some(monitor.channel_id()), + channel_state: Some(UpdateChannelState::Closed), + }; + log_info!( + logger, + "Queueing monitor update {} to ensure missing channel is force closed", + monitor_update.update_id + ); + let funding_txo = monitor.get_funding_txo(); + let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo, + channel_id, + update: monitor_update, + }; + close_background_events.push(update); + } + } + + let mut pending_events_read: VecDeque<(events::Event, Option)> = + VecDeque::new(); + + let highest_seen_timestamp: u32 = 0; + + if !channel_closures.is_empty() { + pending_events_read.append(&mut channel_closures); + } + + let pending_outbounds = OutboundPayments::new(new_hash_map(), args.logger.clone()); + + for (peer_pubkey, peer_storage) in peer_storage_dir { + if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) { + peer_state.get_mut().unwrap().peer_storage = peer_storage; + } + } + + // We have to replay (or skip, if they were completed after we wrote the `ChannelManager`) + // each `ChannelMonitorUpdate` in `in_flight_monitor_updates`. After doing so, we have to + // check that each channel we have isn't newer than the latest `ChannelMonitorUpdate`(s) we + // replayed, and for each monitor update we have to replay we have to ensure there's a + // `ChannelMonitor` for it. + // + // In order to do so we first walk all of our live channels (so that we can check their + // state immediately after doing the update replays, when we have the `update_id`s + // available) and then walk any remaining in-flight updates. + // + // Because the actual handling of the in-flight updates is the same, it's macro'ized here: + let mut pending_background_events = Vec::new(); + for (_counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() { + let mut peer_state_lock = peer_state_mtx.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for (chan_id, chan) in peer_state.channel_by_id.iter() { + if let Some(funded_chan) = chan.as_funded() { + let logger = WithChannelContext::from(&args.logger, &funded_chan.context, None); + + // Channels that were persisted have to be funded, otherwise they should have been + // discarded. + let monitor = args + .channel_monitors + .get(chan_id) + .expect("We already checked for monitor presence when loading channels"); + let max_in_flight_update_id = monitor.get_latest_update_id(); + if funded_chan.get_latest_unblocked_monitor_update_id() + > max_in_flight_update_id + { + // If the channel is ahead of the monitor, return DangerousValue: + log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); + log_error!(logger, " The ChannelMonitor is at update_id {} with update_id through {} in-flight", + monitor.get_latest_update_id(), max_in_flight_update_id); + log_error!( + logger, + " but the ChannelManager is at update_id {}.", + funded_chan.get_latest_unblocked_monitor_update_id() + ); + log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); + log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); + log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); + log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); + return Err(DecodeError::DangerousValue); + } + } else { + // We shouldn't have persisted (or read) any unfunded channel types so none should have been + // created in this `channel_by_id` map. + debug_assert!(false); + return Err(DecodeError::InvalidValue); + } + } + } + + // The newly generated `close_background_events` have to be added after any updates that + // were already in-flight on shutdown, so we append them here. + pending_background_events.reserve(close_background_events.len()); + 'each_bg_event: for mut new_event in close_background_events { + if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo, + channel_id, + update, + } = &mut new_event + { + debug_assert_eq!(update.updates.len(), 1); + debug_assert!(matches!( + update.updates[0], + ChannelMonitorUpdateStep::ChannelForceClosed { .. } + )); + let mut updated_id = false; + for pending_event in pending_background_events.iter() { + if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: pending_cp, + funding_txo: pending_funding, + channel_id: pending_chan_id, + update: pending_update, + } = pending_event + { + let for_same_channel = counterparty_node_id == pending_cp + && funding_txo == pending_funding + && channel_id == pending_chan_id; + if for_same_channel { + debug_assert!(update.update_id >= pending_update.update_id); + if pending_update.updates.iter().any(|upd| { + matches!(upd, ChannelMonitorUpdateStep::ChannelForceClosed { .. }) + }) { + // If the background event we're looking at is just + // force-closing the channel which already has a pending + // force-close update, no need to duplicate it. + continue 'each_bg_event; + } + update.update_id = pending_update.update_id.saturating_add(1); + updated_id = true; + } + } + } + let mut per_peer_state = per_peer_state + .get(counterparty_node_id) + .expect("If we have pending updates for a channel it must have an entry") + .lock() + .unwrap(); + if updated_id { + per_peer_state + .closed_channel_monitor_update_ids + .entry(*channel_id) + .and_modify(|v| *v = cmp::max(update.update_id, *v)) + .or_insert(update.update_id); + } + let in_flight_updates = &mut per_peer_state + .in_flight_monitor_updates + .entry(*channel_id) + .or_insert_with(|| (*funding_txo, Vec::new())) + .1; + debug_assert!(!in_flight_updates.iter().any(|upd| upd == update)); + in_flight_updates.push(update.clone()); + } + pending_background_events.push(new_event); + } + + // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we + // should ensure we try them again on the inbound edge. We put them here and do so after we + // have a fully-constructed `ChannelManager` at the end. + let mut pending_claims_to_replay = Vec::new(); + + { + // If we're tracking pending payments, ensure we haven't lost any by looking at the + // ChannelMonitor data for any channels for which we do not have authorative state + // (i.e. those for which we just force-closed above or we otherwise don't have a + // corresponding `Channel` at all). + // This avoids several edge-cases where we would otherwise "forget" about pending + // payments which are still in-flight via their on-chain state. + // We only rebuild the pending payments map if we were most recently serialized by + // 0.0.102+ + // + // First we rebuild all pending payments, then separately re-claim and re-fail pending + // payments. This avoids edge-cases around MPP payments resulting in redundant actions. + for (channel_id, monitor) in args.channel_monitors.iter() { + let mut is_channel_closed = true; + let counterparty_node_id = monitor.get_counterparty_node_id(); + if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mtx.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); + } + + if is_channel_closed { + for (htlc_source, (htlc, _)) in monitor.get_all_current_outbound_htlcs() { + let logger = WithChannelMonitor::from( + &args.logger, + monitor, + Some(htlc.payment_hash), + ); + if let HTLCSource::OutboundRoute { + payment_id, session_priv, path, .. + } = htlc_source + { + if path.hops.is_empty() { + log_error!(logger, "Got an empty path for a pending payment"); + return Err(DecodeError::InvalidValue); + } + + let mut session_priv_bytes = [0; 32]; + session_priv_bytes[..].copy_from_slice(&session_priv[..]); + pending_outbounds.insert_from_monitor_on_startup( + payment_id, + htlc.payment_hash, + session_priv_bytes, + &path, + best_block_height, + ); + } + } + } + } + for (channel_id, monitor) in args.channel_monitors.iter() { + let mut is_channel_closed = true; + let counterparty_node_id = monitor.get_counterparty_node_id(); + if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mtx.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); + } + + if is_channel_closed { + for (htlc_source, (htlc, preimage_opt)) in + monitor.get_all_current_outbound_htlcs() + { + let htlc_id = SentHTLCId::from_source(&htlc_source); + match htlc_source { + HTLCSource::PreviousHopData(..) => {}, + HTLCSource::OutboundRoute { + payment_id, + session_priv, + path, + bolt12_invoice, + .. + } => { + if let Some(preimage) = preimage_opt { + let pending_events = Mutex::new(pending_events_read); + let update = PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id, + }; + let mut compl_action = Some( + EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) + ); + pending_outbounds.claim_htlc( + payment_id, + preimage, + bolt12_invoice, + session_priv, + path, + true, + &mut compl_action, + &pending_events, + ); + // If the completion action was not consumed, then there was no + // payment to claim, and we need to tell the `ChannelMonitor` + // we don't need to hear about the HTLC again, at least as long + // as the PaymentSent event isn't still sitting around in our + // event queue. + let have_action = if compl_action.is_some() { + let pending_events = pending_events.lock().unwrap(); + pending_events.iter().any(|(_, act)| *act == compl_action) + } else { + false + }; + if !have_action && compl_action.is_some() { + let mut peer_state = per_peer_state + .get(&counterparty_node_id) + .map(|state| state.lock().unwrap()) + .expect("Channels originating a preimage must have peer state"); + let update_id = peer_state + .closed_channel_monitor_update_ids + .get_mut(channel_id) + .expect("Channels originating a preimage must have a monitor"); + // Note that for channels closed pre-0.1, the latest + // update_id is `u64::MAX`. + *update_id = update_id.saturating_add(1); + + pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: monitor.get_counterparty_node_id(), + funding_txo: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + update: ChannelMonitorUpdate { + update_id: *update_id, + channel_id: Some(monitor.channel_id()), + updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { + htlc: htlc_id, + }], + #[cfg(feature = "safe_channels")] + channel_state: None, + }, + }); + } + pending_events_read = pending_events.into_inner().unwrap(); + } + }, + } + } + for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { + log_info!( + args.logger, + "Failing HTLC with payment hash {} as it was resolved on-chain.", + payment_hash + ); + let completion_action = Some(PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id: SentHTLCId::from_source(&htlc_source), + }); + + failed_htlcs.push(( + htlc_source, + payment_hash, + monitor.get_counterparty_node_id(), + monitor.channel_id(), + LocalHTLCFailureReason::OnChainTimeout, + completion_action, + )); + } + } + + // Whether the downstream channel was closed or not, try to re-apply any payment + // preimages from it which may be needed in upstream channels for forwarded + // payments. + let mut fail_read = false; + let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs() + .into_iter() + .filter_map(|(htlc_source, (htlc, preimage_opt))| { + if let HTLCSource::PreviousHopData(prev_hop) = &htlc_source { + if let Some(payment_preimage) = preimage_opt { + let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.channel_id); + // Note that for channels which have gone to chain, + // `get_all_current_outbound_htlcs` is never pruned and always returns + // a constant set until the monitor is removed/archived. Thus, we + // want to skip replaying claims that have definitely been resolved + // on-chain. + + // If the inbound monitor is not present, we assume it was fully + // resolved and properly archived, implying this payment had plenty + // of time to get claimed and we can safely skip any further + // attempts to claim it (they wouldn't succeed anyway as we don't + // have a monitor against which to do so). + let inbound_edge_monitor = if let Some(monitor) = inbound_edge_monitor { + monitor + } else { + return None; + }; + // Second, if the inbound edge of the payment's monitor has been + // fully claimed we've had at least `ANTI_REORG_DELAY` blocks to + // get any PaymentForwarded event(s) to the user and assume that + // there's no need to try to replay the claim just for that. + let inbound_edge_balances = inbound_edge_monitor.get_claimable_balances(); + if inbound_edge_balances.is_empty() { + return None; + } + + if prev_hop.counterparty_node_id.is_none() { + // We no longer support claiming an HTLC where we don't have + // the counterparty_node_id available if the claim has to go to + // a closed channel. Its possible we can get away with it if + // the channel is not yet closed, but its by no means a + // guarantee. + + // Thus, in this case we are a bit more aggressive with our + // pruning - if we have no use for the claim (because the + // inbound edge of the payment's monitor has already claimed + // the HTLC) we skip trying to replay the claim. + let htlc_payment_hash: PaymentHash = payment_preimage.into(); + let balance_could_incl_htlc = |bal| match bal { + &Balance::ClaimableOnChannelClose { .. } => { + // The channel is still open, assume we can still + // claim against it + true + }, + &Balance::MaybePreimageClaimableHTLC { payment_hash, .. } => { + payment_hash == htlc_payment_hash + }, + _ => false, + }; + let htlc_may_be_in_balances = + inbound_edge_balances.iter().any(balance_could_incl_htlc); + if !htlc_may_be_in_balances { + return None; + } + + // First check if we're absolutely going to fail - if we need + // to replay this claim to get the preimage into the inbound + // edge monitor but the channel is closed (and thus we'll + // immediately panic if we call claim_funds_from_hop). + if short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).is_none() { + log_error!(args.logger, + "We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\ + All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1", + htlc_payment_hash, + payment_preimage, + ); + fail_read = true; + } + + // At this point we're confident we need the claim, but the + // inbound edge channel is still live. As long as this remains + // the case, we can conceivably proceed, but we run some risk + // of panicking at runtime. The user ideally should have read + // the release notes and we wouldn't be here, but we go ahead + // and let things run in the hope that it'll all just work out. + log_error!(args.logger, + "We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably.\ + As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\ + All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\ + Continuing anyway, though panics may occur!", + htlc_payment_hash, + payment_preimage, + ); + } + + Some((htlc_source, payment_preimage, htlc.amount_msat, + is_channel_closed, monitor.get_counterparty_node_id(), + monitor.get_funding_txo(), monitor.channel_id())) + } else { None } + } else { + // If it was an outbound payment, we've handled it above - if a preimage + // came in and we persisted the `ChannelManager` we either handled it and + // are good to go or the channel force-closed - we don't have to handle the + // channel still live case here. + None + } + }); + for tuple in outbound_claimed_htlcs_iter { + pending_claims_to_replay.push(tuple); + } + if fail_read { + return Err(DecodeError::InvalidValue); + } + } + } + + let expanded_inbound_key = args.node_signer.get_expanded_key(); + + let mut secp_ctx = Secp256k1::new(); + secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes()); + + let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) { + Ok(key) => key, + Err(()) => return Err(DecodeError::InvalidValue), + }; + if received_network_pubkey != our_network_pubkey { + log_error!(args.logger, "Key that was generated does not match the existing key."); + return Err(DecodeError::InvalidValue); + } + + let mut outbound_scid_aliases = new_hash_set(); + for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for (chan_id, chan) in peer_state.channel_by_id.iter_mut() { + if let Some(funded_chan) = chan.as_funded_mut() { + let logger = WithChannelContext::from(&args.logger, &funded_chan.context, None); + if funded_chan.context.outbound_scid_alias() == 0 { + let mut outbound_scid_alias; + loop { + outbound_scid_alias = fake_scid::Namespace::OutboundAlias + .get_fake_scid( + best_block_height, + &chain_hash, + &fake_scid_rand_bytes, + &args.entropy_source, + ); + if outbound_scid_aliases.insert(outbound_scid_alias) { + break; + } + } + funded_chan.context.set_outbound_scid_alias(outbound_scid_alias); + } else if !outbound_scid_aliases + .insert(funded_chan.context.outbound_scid_alias()) + { + // Note that in rare cases its possible to hit this while reading an older + // channel if we just happened to pick a colliding outbound alias above. + log_error!( + logger, + "Got duplicate outbound SCID alias; {}", + funded_chan.context.outbound_scid_alias() + ); + return Err(DecodeError::InvalidValue); + } + if funded_chan.context.is_usable() { + let alias = funded_chan.context.outbound_scid_alias(); + let cp_id = funded_chan.context.get_counterparty_node_id(); + if short_to_chan_info.insert(alias, (cp_id, *chan_id)).is_some() { + // Note that in rare cases its possible to hit this while reading an older + // channel if we just happened to pick a colliding outbound alias above. + log_error!( + logger, + "Got duplicate outbound SCID alias; {}", + funded_chan.context.outbound_scid_alias() + ); + return Err(DecodeError::InvalidValue); + } + } + } else { + // We shouldn't have persisted (or read) any unfunded channel types so none should have been + // created in this `channel_by_id` map. + debug_assert!(false); + return Err(DecodeError::InvalidValue); + } + } + } + + let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator); + + let best_block = BestBlock::new(best_block_hash, best_block_height); + let flow = OffersMessageFlow::new( + chain_hash, + best_block, + our_network_pubkey, + highest_seen_timestamp, + expanded_inbound_key, + args.node_signer.get_receive_auth_key(), + secp_ctx.clone(), + args.message_router, + args.logger.clone(), + ) + .with_async_payments_offers_cache(async_receive_offer_cache); + + let channel_manager = ChannelManager { + chain_hash, + fee_estimator: bounded_fee_estimator, + chain_monitor: args.chain_monitor, + tx_broadcaster: args.tx_broadcaster, + router: args.router, + flow, + + best_block: RwLock::new(best_block), + + inbound_payment_key: expanded_inbound_key, + pending_outbound_payments: pending_outbounds, + pending_intercepted_htlcs: Mutex::new(new_hash_map()), + + forward_htlcs: Mutex::new(new_hash_map()), + decode_update_add_htlcs: Mutex::new(new_hash_map()), + claimable_payments: Mutex::new(ClaimablePayments { + claimable_payments: new_hash_map(), + pending_claiming_payments: new_hash_map(), + }), + outbound_scid_aliases: Mutex::new(outbound_scid_aliases), + short_to_chan_info: FairRwLock::new(short_to_chan_info), + fake_scid_rand_bytes, + + probing_cookie_secret, + inbound_payment_id_secret, + + our_network_pubkey, + secp_ctx, + + highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize), + + per_peer_state: FairRwLock::new(per_peer_state), + + #[cfg(not(any(test, feature = "_externalize_tests")))] + monitor_update_type: AtomicUsize::new(0), + + pending_events: Mutex::new(pending_events_read), + pending_events_processor: AtomicBool::new(false), + pending_htlc_forwards_processor: AtomicBool::new(false), + pending_background_events: Mutex::new(pending_background_events), + total_consistency_lock: RwLock::new(()), + background_events_processed_since_startup: AtomicBool::new(false), + + event_persist_notifier: Notifier::new(), + needs_persist_flag: AtomicBool::new(false), + + funding_batch_states: Mutex::new(BTreeMap::new()), + + pending_broadcast_messages: Mutex::new(Vec::new()), + + entropy_source: args.entropy_source, + node_signer: args.node_signer, + signer_provider: args.signer_provider, + + last_days_feerates: Mutex::new(VecDeque::new()), + + logger: args.logger, + config: RwLock::new(args.config), + + #[cfg(feature = "_test_utils")] + testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()), + }; + + let mut processed_claims: HashSet> = new_hash_set(); + for (_, monitor) in args.channel_monitors.iter() { + for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() + { + if !payment_claims.is_empty() { + for payment_claim in payment_claims { + if processed_claims.contains(&payment_claim.mpp_parts) { + // We might get the same payment a few times from different channels + // that the MPP payment was received using. There's no point in trying + // to claim the same payment again and again, so we check if the HTLCs + // are the same and skip the payment here. + continue; + } + if payment_claim.mpp_parts.is_empty() { + return Err(DecodeError::InvalidValue); + } + { + let payments = channel_manager.claimable_payments.lock().unwrap(); + if !payments.claimable_payments.contains_key(&payment_hash) { + if let Some(payment) = + payments.pending_claiming_payments.get(&payment_hash) + { + if payment.payment_id + == payment_claim.claiming_payment.payment_id + { + // If this payment already exists and was marked as + // being-claimed then the serialized state must contain all + // of the pending `ChannelMonitorUpdate`s required to get + // the preimage on disk in all MPP parts. Thus we can skip + // the replay below. + continue; + } + } + } + } + + let mut channels_without_preimage = payment_claim + .mpp_parts + .iter() + .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.channel_id)) + .collect::>(); + // If we have multiple MPP parts which were received over the same channel, + // we only track it once as once we get a preimage durably in the + // `ChannelMonitor` it will be used for all HTLCs with a matching hash. + channels_without_preimage.sort_unstable(); + channels_without_preimage.dedup(); + let pending_claims = PendingMPPClaim { + channels_without_preimage, + channels_with_preimage: Vec::new(), + }; + let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims))); + + // While it may be duplicative to generate a PaymentClaimed here, trying to + // figure out if the user definitely saw it before shutdown would require some + // nontrivial logic and may break as we move away from regularly persisting + // ChannelManager. Instead, we rely on the users' event handler being + // idempotent and just blindly generate one no matter what, letting the + // preimages eventually timing out from ChannelMonitors to prevent us from + // doing so forever. + + let claim_found = channel_manager + .claimable_payments + .lock() + .unwrap() + .begin_claiming_payment( + payment_hash, + &channel_manager.node_signer, + &channel_manager.logger, + &channel_manager.inbound_payment_id_secret, + true, + ); + if claim_found.is_err() { + let mut claimable_payments = + channel_manager.claimable_payments.lock().unwrap(); + match claimable_payments.pending_claiming_payments.entry(payment_hash) { + hash_map::Entry::Occupied(_) => { + debug_assert!( + false, + "Entry was added in begin_claiming_payment" + ); + return Err(DecodeError::InvalidValue); + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(payment_claim.claiming_payment); + }, + } + } + + for part in payment_claim.mpp_parts.iter() { + let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| { + ( + part.counterparty_node_id, + part.channel_id, + PendingMPPClaimPointer(Arc::clone(&ptr)), + ) + }); + let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr| { + RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { + pending_claim: PendingMPPClaimPointer(Arc::clone(&ptr)), + } + }); + // Note that we don't need to pass the `payment_info` here - its + // already (clearly) durably on disk in the `ChannelMonitor` so there's + // no need to worry about getting it into others. + // + // We don't encode any attribution data, because the required onion shared secret isn't + // available here. + channel_manager.claim_mpp_part( + part.into(), + payment_preimage, + None, + None, + |_, _| { + ( + Some(MonitorUpdateCompletionAction::PaymentClaimed { + payment_hash, + pending_mpp_claim, + }), + pending_claim_ptr, + ) + }, + ); + } + processed_claims.insert(payment_claim.mpp_parts); + } + } else { + let per_peer_state = channel_manager.per_peer_state.read().unwrap(); + let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap(); + let payment = claimable_payments.claimable_payments.remove(&payment_hash); + mem::drop(claimable_payments); + if let Some(payment) = payment { + log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash); + let mut claimable_amt_msat = 0; + let mut receiver_node_id = Some(our_network_pubkey); + let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret; + if phantom_shared_secret.is_some() { + let phantom_pubkey = channel_manager + .node_signer + .get_node_id(Recipient::PhantomNode) + .expect("Failed to get node_id for phantom node recipient"); + receiver_node_id = Some(phantom_pubkey) + } + for claimable_htlc in &payment.htlcs { + claimable_amt_msat += claimable_htlc.value; + + // Add a holding-cell claim of the payment to the Channel, which should be + // applied ~immediately on peer reconnection. Because it won't generate a + // new commitment transaction we can just provide the payment preimage to + // the corresponding ChannelMonitor and nothing else. + // + // We do so directly instead of via the normal ChannelMonitor update + // procedure as the ChainMonitor hasn't yet been initialized, implying + // we're not allowed to call it directly yet. Further, we do the update + // without incrementing the ChannelMonitor update ID as there isn't any + // reason to. + // If we were to generate a new ChannelMonitor update ID here and then + // crash before the user finishes block connect we'd end up force-closing + // this channel as well. On the flip side, there's no harm in restarting + // without the new monitor persisted - we'll end up right back here on + // restart. + let previous_channel_id = claimable_htlc.prev_hop.channel_id; + let peer_node_id = monitor.get_counterparty_node_id(); + { + let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap(); + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let Some(channel) = peer_state + .channel_by_id + .get_mut(&previous_channel_id) + .and_then(Channel::as_funded_mut) + { + let logger = WithChannelContext::from( + &channel_manager.logger, + &channel.context, + Some(payment_hash), + ); + channel + .claim_htlc_while_disconnected_dropping_mon_update_legacy( + claimable_htlc.prev_hop.htlc_id, + payment_preimage, + &&logger, + ); + } + } + if let Some(previous_hop_monitor) = + args.channel_monitors.get(&claimable_htlc.prev_hop.channel_id) + { + // Note that this is unsafe as we no longer require the + // `ChannelMonitor`s to be re-persisted prior to this + // `ChannelManager` being persisted after we get started running. + // If this `ChannelManager` gets persisted first then we crash, we + // won't have the `claimable_payments` entry we need to re-enter + // this code block, causing us to not re-apply the preimage to this + // `ChannelMonitor`. + // + // We should never be here with modern payment claims, however, as + // they should always include the HTLC list. Instead, this is only + // for nodes during upgrade, and we explicitly require the old + // persistence semantics on upgrade in the release notes. + previous_hop_monitor.provide_payment_preimage_unsafe_legacy( + &payment_hash, + &payment_preimage, + &channel_manager.tx_broadcaster, + &channel_manager.fee_estimator, + &channel_manager.logger, + ); + } + } + let mut pending_events = channel_manager.pending_events.lock().unwrap(); + let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret); + let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(); + let sender_intended_total_msat = + payment.htlcs.first().map(|htlc| htlc.total_msat); + pending_events.push_back(( + events::Event::PaymentClaimed { + receiver_node_id, + payment_hash, + purpose: payment.purpose, + amount_msat: claimable_amt_msat, + htlcs, + sender_intended_total_msat, + onion_fields: payment.onion_fields, + payment_id: Some(payment_id), + }, + // Note that we don't bother adding a EventCompletionAction here to + // ensure the `PaymentClaimed` event is durable processed as this + // should only be hit for particularly old channels and we don't have + // enough information to generate such an action. + None, + )); + } + } + } + } + + for htlc_source in failed_htlcs { + let (source, hash, counterparty_id, channel_id, failure_reason, ev_action) = + htlc_source; + let receiver = + HTLCHandlingFailureType::Forward { node_id: Some(counterparty_id), channel_id }; + let reason = HTLCFailReason::from_failure_code(failure_reason); + channel_manager + .fail_htlc_backwards_internal(&source, &hash, &reason, receiver, ev_action); + } + + for ( + source, + preimage, + downstream_value, + downstream_closed, + downstream_node_id, + downstream_funding, + downstream_channel_id, + ) in pending_claims_to_replay + { + // We use `downstream_closed` in place of `from_onchain` here just as a guess - we + // don't remember in the `ChannelMonitor` where we got a preimage from, but if the + // channel is closed we just assume that it probably came from an on-chain claim. + // The same holds for attribution data. We don't have any, so we pass an empty one. + channel_manager.claim_funds_internal( + source, + preimage, + Some(downstream_value), + None, + downstream_closed, + downstream_node_id, + downstream_funding, + downstream_channel_id, + None, + None, + None, + ); + } + + //TODO: Broadcast channel update for closed channels, but only after we've made a + //connection or two. + + Ok((best_block_hash.clone(), channel_manager)) + } } #[cfg(test)] diff --git a/lightning/src/ln/funding.rs b/lightning/src/ln/funding.rs index f80b2b6daea..d9ff9f67fdd 100644 --- a/lightning/src/ln/funding.rs +++ b/lightning/src/ln/funding.rs @@ -88,7 +88,7 @@ impl SpliceContribution { /// An input to contribute to a channel's funding transaction either when using the v2 channel /// establishment protocol or when splicing. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct FundingTxInput { /// The unspent [`TxOut`] that the input spends. /// diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index 4340aad420a..4ead5fb3037 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -98,7 +98,7 @@ pub(crate) struct NegotiationError { pub contributed_outputs: Vec, } -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum AbortReason { InvalidStateTransition, UnexpectedCounterpartyMessage, @@ -550,7 +550,7 @@ impl ConstructedTransaction { } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct SharedInputSignature { holder_signature_first: bool, witness_script: ScriptBuf, @@ -568,7 +568,7 @@ impl_writeable_tlv_based!(SharedInputSignature, { /// See the specification for more details: /// https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#the-commitment_signed-message /// https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#sharing-funding-signatures-tx_signatures -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct InteractiveTxSigningSession { unsigned_tx: ConstructedTransaction, holder_sends_tx_signatures_first: bool, @@ -919,7 +919,7 @@ impl_writeable_tlv_based!(InteractiveTxSigningSession, { (11, shared_input_signature, required), }); -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] struct NegotiationContext { holder_node_id: PublicKey, counterparty_node_id: PublicKey, @@ -1398,7 +1398,7 @@ macro_rules! define_state { }; ($state: ident, $inner: ident, $doc: expr) => { #[doc = $doc] - #[derive(Debug)] + #[derive(Debug, Clone, PartialEq, Eq)] struct $state($inner); impl State for $state {} }; @@ -1527,7 +1527,7 @@ define_state_transitions!(RECEIVED_MSG_STATE, [ define_state_transitions!(TX_COMPLETE, SentChangeMsg, ReceivedTxComplete); define_state_transitions!(TX_COMPLETE, ReceivedChangeMsg, SentTxComplete); -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] enum StateMachine { Indeterminate, SentChangeMsg(SentChangeMsg), @@ -1941,7 +1941,7 @@ impl InteractiveTxInput { } } -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(super) struct InteractiveTxConstructor { state_machine: StateMachine, is_initiator: bool, @@ -1954,7 +1954,7 @@ pub(super) struct InteractiveTxConstructor { } #[allow(clippy::enum_variant_names)] // Clippy doesn't like the repeated `Tx` prefix here -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub(crate) enum InteractiveTxMessageSend { TxAddInput(msgs::TxAddInput), TxAddOutput(msgs::TxAddOutput), diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 8e230fab1d9..750ff88fe3c 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -2486,6 +2486,11 @@ mod fuzzy_internal_msgs { pub data: Vec, pub attribution_data: Option, } + + impl_writeable_tlv_based!(OnionErrorPacket, { + (0, data, required), + (1, attribution_data, required), + }); } #[cfg(fuzzing)] pub use self::fuzzy_internal_msgs::*; diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index e32b39775fe..48fb216ec7e 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -1877,12 +1877,10 @@ impl From<&HTLCFailReason> for HTLCHandlingFailureReason { } } -#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug -#[cfg_attr(test, derive(PartialEq))] +#[derive(Clone, PartialEq, Eq)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug pub(super) struct HTLCFailReason(HTLCFailReasonRepr); -#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug -#[cfg_attr(test, derive(PartialEq))] +#[derive(Clone, PartialEq, Eq)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug enum HTLCFailReasonRepr { LightningError { err: msgs::OnionErrorPacket, hold_time: Option }, Reason { data: Vec, failure_reason: LocalHTLCFailureReason }, diff --git a/lightning/src/ln/script.rs b/lightning/src/ln/script.rs index 5258b8f3283..3daebc1a265 100644 --- a/lightning/src/ln/script.rs +++ b/lightning/src/ln/script.rs @@ -21,8 +21,7 @@ use crate::prelude::*; /// A script pubkey for shutting down a channel as defined by [BOLT #2]. /// /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md -#[cfg_attr(test, derive(Debug))] -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct ShutdownScript(ShutdownScriptImpl); /// An error occurring when converting from [`ScriptBuf`] to [`ShutdownScript`]. diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index dd1aaa40424..1ec212da5e3 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -772,7 +772,7 @@ impl From for ChannelConfigUpdate { /// Legacy version of [`ChannelConfig`] that stored the static /// [`ChannelHandshakeConfig::announce_for_forwarding`] and /// [`ChannelHandshakeConfig::commit_upfront_shutdown_pubkey`] fields. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub(crate) struct LegacyChannelConfig { pub(crate) options: ChannelConfig, /// Deprecated but may still be read from. See [`ChannelHandshakeConfig::announce_for_forwarding`] to