diff --git a/lightning-macros/src/lib.rs b/lightning-macros/src/lib.rs index e784acf72fb..d6e8678861f 100644 --- a/lightning-macros/src/lib.rs +++ b/lightning-macros/src/lib.rs @@ -11,7 +11,6 @@ //! Proc macros used by LDK -#![cfg_attr(not(test), no_std)] #![deny(missing_docs)] #![forbid(unsafe_code)] #![deny(rustdoc::broken_intra_doc_links)] @@ -20,13 +19,19 @@ extern crate alloc; -use alloc::string::ToString; +use std::collections::HashMap; +use std::string::ToString; +use std::vec::Vec; + use proc_macro::{Delimiter, Group, TokenStream, TokenTree}; use proc_macro2::TokenStream as TokenStream2; -use quote::quote; + +use quote::{quote, ToTokens}; + use syn::spanned::Spanned; use syn::{parse, ImplItemFn, Token}; use syn::{parse_macro_input, Item}; +use syn::parse::Parser; fn add_async_method(mut parsed: ImplItemFn) -> TokenStream { let output = quote! { @@ -400,3 +405,531 @@ pub fn xtest_inventory(_input: TokenStream) -> TokenStream { TokenStream::from(expanded) } + +struct AddLogsCtx<'a, 'b> { + methods_with_param: &'a [syn::Ident], + substructs_logged: &'a [syn::Ident], + otherstructs_logged: &'a [syn::Ident], + parent: Option<&'b AddLogsCtx<'a, 'b>>, + var_types: HashMap, +} + +impl<'a, 'b> AddLogsCtx<'a, 'b> { + fn push_var_ctx(&'b self) -> AddLogsCtx<'a, 'b> { + AddLogsCtx { + methods_with_param: self.methods_with_param, + substructs_logged: self.substructs_logged, + otherstructs_logged: self.otherstructs_logged, + parent: Some(self), + var_types: HashMap::new(), + } + } + + fn resolve_var(&self, var: &syn::Ident) -> Option<&syn::Ident> { + let mut ctx = self; + loop { + if let Some(res) = ctx.var_types.get(var) { + return Some(res); + } + if let Some(parent) = ctx.parent { + ctx = parent; + } else { + return None; + } + } + } +} + +fn process_var_decl(pat: &syn::Pat, ty: &syn::Type, ctx: &mut AddLogsCtx) { + if let syn::Pat::Ident(name) = pat { + if let syn::Type::Path(p) = ty { + if p.path.segments.len() == 1 { + ctx.var_types.insert(name.ident.clone(), p.path.segments[0].ident.clone()); + } + } else if let syn::Type::Reference(r) = ty { + if let syn::Type::Path(p) = &*r.elem { + if p.path.segments.len() == 1 { + ctx.var_types.insert(name.ident.clone(), p.path.segments[0].ident.clone()); + } + } else if let syn::Type::Reference(r) = &*r.elem { + if let syn::Type::Path(p) = &*r.elem { + if p.path.segments.len() == 1 { + ctx.var_types.insert(name.ident.clone(), p.path.segments[0].ident.clone()); + } + } + } + } + } else if let syn::Pat::Tuple(names) = pat { + if let syn::Type::Tuple(types) = ty { + assert_eq!(names.elems.len(), types.elems.len()); + for (name, ty) in names.elems.iter().zip(types.elems.iter()) { + process_var_decl(name, ty, ctx); + } + } else if let syn::Type::Reference(r) = ty { + if let syn::Type::Tuple(types) = &*r.elem { + assert_eq!(names.elems.len(), types.elems.len()); + for (name, ty) in names.elems.iter().zip(types.elems.iter()) { + process_var_decl(name, ty, ctx); + } + } else if let syn::Type::Reference(r) = &*r.elem { + if let syn::Type::Tuple(types) = &*r.elem { + assert_eq!(names.elems.len(), types.elems.len()); + for (name, ty) in names.elems.iter().zip(types.elems.iter()) { + process_var_decl(name, ty, ctx); + } + } + } + } + } +} + +fn add_logs_to_macro_call(m: &mut syn::Macro, ctx: &mut AddLogsCtx) { + // Many macros just take a bunch of exprs as arguments, separated by commas. + // In that case, we optimistically edit the args as if they're exprs. + let parser = syn::punctuated::Punctuated::::parse_terminated; + if let Ok(mut parsed) = parser.parse(m.tokens.clone().into()) { + for expr in parsed.iter_mut() { + add_logs_to_self_exprs(expr, ctx); + } + m.tokens = quote! { #parsed }.into(); + } +} + +fn add_logs_to_stmt_list(s: &mut Vec, ctx: &mut AddLogsCtx) { + for stmt in s.iter_mut() { + match stmt { + syn::Stmt::Expr(ref mut expr, _) => add_logs_to_self_exprs(expr, ctx), + syn::Stmt::Local(syn::Local { pat, init, .. }) => { + if let syn::Pat::Type(t) = pat { + process_var_decl(&*t.pat, &*t.ty, ctx); + } + if let Some(l) = init { + add_logs_to_self_exprs(&mut *l.expr, ctx); + if let Some((_, e)) = &mut l.diverge { + add_logs_to_self_exprs(&mut *e, ctx); + } + } + }, + syn::Stmt::Macro(m) => add_logs_to_macro_call(&mut m.mac, ctx), + syn::Stmt::Item(syn::Item::Fn(f)) => { + add_logs_to_stmt_list(&mut f.block.stmts, ctx); + }, + syn::Stmt::Item(_) => {}, + } + } +} + +fn add_logs_to_self_exprs(e: &mut syn::Expr, ctx: &mut AddLogsCtx) { + match e { + syn::Expr::Array(e) => { + for elem in e.elems.iter_mut() { + add_logs_to_self_exprs(elem, ctx); + } + }, + syn::Expr::Assign(e) => { + add_logs_to_self_exprs(&mut *e.left, ctx); + add_logs_to_self_exprs(&mut *e.right, ctx); + }, + syn::Expr::Async(e) => { + add_logs_to_stmt_list(&mut e.block.stmts, ctx); + }, + syn::Expr::Await(e) => { + add_logs_to_self_exprs(&mut *e.base, ctx); + }, + syn::Expr::Binary(e) => { + add_logs_to_self_exprs(&mut *e.left, ctx); + add_logs_to_self_exprs(&mut *e.right, ctx); + }, + syn::Expr::Block(e) => { + let mut ctx = ctx.push_var_ctx(); + add_logs_to_stmt_list(&mut e.block.stmts, &mut ctx); + }, + syn::Expr::Break(e) => { + if let Some(e) = e.expr.as_mut() { + add_logs_to_self_exprs(&mut *e, ctx); + } + }, + syn::Expr::Call(e) => { + let mut needs_log_param = false; + if let syn::Expr::Path(p) = &*e.func { + if p.path.segments.len() == 2 && ctx.otherstructs_logged.iter().any(|m| *m == p.path.segments[0].ident) { + needs_log_param = true; + } + } else { + add_logs_to_self_exprs(&mut *e.func, ctx); + } + for a in e.args.iter_mut() { + add_logs_to_self_exprs(a, ctx); + } + if needs_log_param { + e.args.push(parse(quote!(logger).into()).unwrap()); + } + }, + syn::Expr::Cast(e) => { + add_logs_to_self_exprs(&mut *e.expr, ctx); + }, + syn::Expr::Closure(e) => { + let mut ctx = ctx.push_var_ctx(); + for pat in e.inputs.iter() { + if let syn::Pat::Type(t) = pat { + process_var_decl(&*t.pat, &*t.ty, &mut ctx); + } + } + add_logs_to_self_exprs(&mut *e.body, &mut ctx); + }, + syn::Expr::Const(_) => {}, + syn::Expr::Continue(_) => {}, + syn::Expr::Field(e) => { + add_logs_to_self_exprs(&mut *e.base, ctx); + }, + syn::Expr::ForLoop(e) => { + add_logs_to_self_exprs(&mut *e.expr, ctx); + let mut ctx = ctx.push_var_ctx(); + add_logs_to_stmt_list(&mut e.body.stmts, &mut ctx); + }, + syn::Expr::Group(e) => { + + }, + syn::Expr::If(e) => { + add_logs_to_self_exprs(&mut *e.cond, ctx); + let mut if_ctx = ctx.push_var_ctx(); + add_logs_to_stmt_list(&mut e.then_branch.stmts, &mut if_ctx); + if let Some((_, branch)) = e.else_branch.as_mut() { + let mut ctx = ctx.push_var_ctx(); + add_logs_to_self_exprs(&mut *branch, &mut ctx); + } + }, + syn::Expr::Index(e) => { + add_logs_to_self_exprs(&mut *e.expr, ctx); + add_logs_to_self_exprs(&mut *e.index, ctx); + }, + syn::Expr::Infer(e) => { + + }, + syn::Expr::Let(e) => { + if let syn::Pat::Type(t) = &*e.pat { + process_var_decl(&*t.pat, &*t.ty, ctx); + } + add_logs_to_self_exprs(&mut *e.expr, ctx); + }, + syn::Expr::Lit(_) => {}, + syn::Expr::Loop(e) => { + let mut ctx = ctx.push_var_ctx(); + add_logs_to_stmt_list(&mut e.body.stmts, &mut ctx); + }, + syn::Expr::Macro(e) => { + add_logs_to_macro_call(&mut e.mac, ctx); + }, + syn::Expr::Match(e) => { + add_logs_to_self_exprs(&mut *e.expr, ctx); + for arm in e.arms.iter_mut() { + if let Some((_, e)) = arm.guard.as_mut() { + add_logs_to_self_exprs(&mut *e, ctx); + } + add_logs_to_self_exprs(&mut *arm.body, ctx); + } + }, + syn::Expr::MethodCall(e) => { + match &*e.receiver { + syn::Expr::Path(path) => { + assert_eq!(path.path.segments.len(), 1, "Multiple segments should instead be parsed as a Field, below"); + let is_self_call = + path.qself.is_none() + && path.path.segments.len() == 1 + && path.path.segments[0].ident.to_string() == "self"; + if is_self_call && ctx.methods_with_param.iter().any(|m| *m == e.method) { + e.args.push(parse(quote!(logger).into()).unwrap()); + } + if let Some(varty) = ctx.resolve_var(&path.path.segments[0].ident) { + if ctx.otherstructs_logged.iter().any(|m| m == varty) { + if e.method != "clone" { + e.args.push(parse(quote!(logger).into()).unwrap()); + } + } + } + }, + syn::Expr::Field(field) => { + if let syn::Expr::Path(p) = &*field.base { + let is_self_call = + p.qself.is_none() + && p.path.segments.len() == 1 + && p.path.segments[0].ident.to_string() == "self"; + if let syn::Member::Named(field) = &field.member { + if is_self_call && ctx.substructs_logged.iter().any(|m| m == field) { + e.args.push(parse(quote!(logger).into()).unwrap()); + } + } else { + add_logs_to_self_exprs(&mut *e.receiver, ctx); + } + } else { + add_logs_to_self_exprs(&mut *e.receiver, ctx); + } + }, + _ => add_logs_to_self_exprs(&mut *e.receiver, ctx), + } + for a in e.args.iter_mut() { + add_logs_to_self_exprs(a, ctx); + } + }, + syn::Expr::Paren(e) => { + add_logs_to_self_exprs(&mut *e.expr, ctx); + }, + syn::Expr::Path(_) => {}, + syn::Expr::Range(e) => { + if let Some(start) = e.start.as_mut() { + add_logs_to_self_exprs(start, ctx); + } + if let Some(end) = e.end.as_mut() { + add_logs_to_self_exprs(end, ctx); + } + }, + syn::Expr::RawAddr(e) => { + + }, + syn::Expr::Reference(e) => { + add_logs_to_self_exprs(&mut *e.expr, ctx); + }, + syn::Expr::Repeat(e) => { + add_logs_to_self_exprs(&mut *e.expr, ctx); + add_logs_to_self_exprs(&mut *e.len, ctx); + }, + syn::Expr::Return(e) => { + if let Some(e) = e.expr.as_mut() { + add_logs_to_self_exprs(&mut *e, ctx); + } + }, + syn::Expr::Struct(e) => { + for field in e.fields.iter_mut() { + add_logs_to_self_exprs(&mut field.expr, ctx); + } + if let Some(rest) = e.rest.as_mut() { + add_logs_to_self_exprs(rest, ctx); + } + }, + syn::Expr::Try(e) => { + add_logs_to_self_exprs(&mut *e.expr, ctx); + }, + syn::Expr::TryBlock(e) => { + let mut ctx = ctx.push_var_ctx(); + add_logs_to_stmt_list(&mut e.block.stmts, &mut ctx); + }, + syn::Expr::Tuple(e) => { + for elem in e.elems.iter_mut() { + add_logs_to_self_exprs(elem, ctx); + } + }, + syn::Expr::Unary(e) => { + add_logs_to_self_exprs(&mut *e.expr, ctx); + }, + syn::Expr::Unsafe(e) => { + + }, + syn::Expr::Verbatim(e) => { + + }, + syn::Expr::While(e) => { + + }, + syn::Expr::Yield(e) => { + + }, + _ => {}, + } +} + +/// This attribute, on an `impl` block, will add logging parameters transparently to every method +/// in the `impl` block. It will also pass through the current logger to any calls to modified +/// methods. +/// +/// Provided attributes should be in the form `logger: LoggerType $(, substruct: subfield)*` +/// where `LoggerType` is the type of the logger object which is required, and `subfield` is any +/// number of fields (accessible through `self`) which have had their `impl` block(s) similarly +/// modified. +/// +/// For example, this translates: +/// ```rust +/// struct B; +/// struct A { field_b: B } +/// +/// #[proc_macro_attribute(logger: LogType, substruct: field_b)] +/// impl A { +/// fn f_a(&self) { +/// logger.log(); +/// } +/// fn f(&self) { +/// self.f_a(); +/// self.field_b.f(); +/// } +/// } +/// +/// #[proc_macro_attribute(logger: LogType)] +/// impl B { +/// fn f(&self) { +/// logger.log(); +/// } +/// } +/// ``` +/// +/// to this: +/// +/// ```rust +/// struct B; +/// struct A { field_b: B } +/// +/// impl A { +/// fn f_a(&self, logger: &LogType) { +/// logger.log(); +/// } +/// fn f(&self, logger: &LogType) { +/// self.f_a(logger); +/// self.field_b.f(logger); +/// } +/// } +/// +/// impl B { +/// fn f(&self, logger: &LogType) { +/// logger.log(); +/// } +/// } +/// ``` +#[proc_macro_attribute] +pub fn add_logging(attrs: TokenStream, expr: TokenStream) -> TokenStream { + let mut im = if let Ok(parsed) = parse::(expr) { + if let syn::Item::Impl(im) = parsed { + im + } else { + return (quote! { + compile_error!("add_logging can only be used on impl items") + }) + .into(); + } + } else { + return (quote! { + compile_error!("add_logging can only be used on impl items") + }) + .into(); + }; + + let parsed_attrs = parse::(attrs); + let (logger_type, substructs_logged, otherstructs_logged) = if let Ok(attrs) = &parsed_attrs { + if attrs.args.len() < 1 { + return (quote! { + compile_error!("add_logging must have at least the `logger: LoggerType` attribute") + }) + .into(); + } + let logger_ty = if let syn::GenericArgument::Type(ty) = &attrs.args[0] { + ty + } else { + return (quote! { + compile_error!("add_logging's attributes must start with `logger:`") + }) + .into(); + }; + let mut substructs_logged = Vec::new(); + let mut otherstructs_logged = Vec::new(); + for arg in attrs.args.iter().skip(1) { + if let syn::GenericArgument::AssocType(syn::AssocType { ident, ty: syn::Type::Path(p), .. }) = arg { + match ident.to_string().as_str() { + // XXX: fix all these error strings + "substruct" => { + if p.path.leading_colon.is_some() && p.path.segments.len() != 1 { + return (quote! { + compile_error!("add_logging's attributes must be in the form `logger: Logger $(, substruct: field)*") + }) + .into(); + } + substructs_logged.push(p.path.segments[0].ident.clone()); + }, + "otherstruct" => { + if p.path.leading_colon.is_some() && p.path.segments.len() != 1 { + return (quote! { + compile_error!("add_logging's attributes must be in the form `logger: Logger $(, substruct: field)*") + }) + .into(); + } + otherstructs_logged.push(p.path.segments[0].ident.clone()); + }, + _ => return quote! { + compile_error!("add_logging's attributes must be in the form `logger: Logger $(, substruct: field)*") + }.into(), + } + } else { + return (quote! { + compile_error!("add_logging's attributes must be in the form `logger: Logger $(, substruct: field)*") + }) + .into(); + } + } + + if let syn::Type::Path(p) = &*im.self_ty { + if p.path.segments.len() == 1{ + otherstructs_logged.push(p.path.segments[0].ident.clone()); + } + } + + (logger_ty, substructs_logged, otherstructs_logged) + } else { + return (quote! { + compile_error!("add_logging's attributes must be in the form `logger: Logger $(, substruct: field)*") + }) + .into(); + }; + + let mut methods_added = Vec::new(); + for item in im.items.iter_mut() { + if let syn::ImplItem::Fn(f) = item { + // In some rare cases, manually taking a logger may be required to specify appropriate + // lifetimes. Detect such cases and avoid adding a redundant logger. + let have_logger = f.sig.inputs.iter().any(|inp| { + if let syn::FnArg::Typed(syn::PatType { pat, .. }) = inp { + if let syn::Pat::Ident(syn::PatIdent { ident, .. }) = &**pat { + ident == "logger" + } else { + false + } + } else { + false + } + }); + if !have_logger { + if f.sig.generics.lt_token.is_none() { + f.sig.generics.lt_token = Some(Default::default()); + f.sig.generics.gt_token = Some(Default::default()); + } + f.sig.generics.params.push(parse(quote!(L: Deref).into()).unwrap()); + if f.sig.generics.where_clause.is_none() { + f.sig.generics.where_clause = Some(parse(quote!(where).into()).unwrap()); + } + let log_bound = parse(quote!(L::Target: Logger).into()).unwrap(); + f.sig.generics.where_clause.as_mut().unwrap().predicates.push(log_bound); + f.sig.inputs.push(parse(quote!(logger: &#logger_type).into()).unwrap()); + methods_added.push(f.sig.ident.clone()); + } + } + } + + let ctx = AddLogsCtx { + methods_with_param: &methods_added[..], + substructs_logged: &substructs_logged, + otherstructs_logged: &otherstructs_logged, + parent: None, + var_types: HashMap::new(), + }; + + for item in im.items.iter_mut() { + if let syn::ImplItem::Fn(f) = item { + if ctx.methods_with_param.contains(&f.sig.ident) { + let mut ctx = ctx.push_var_ctx(); + for input in f.sig.inputs.iter() { + if let syn::FnArg::Typed(ty) = input { + process_var_decl(&*ty.pat, &*ty.ty, &mut ctx); + } + } + add_logs_to_stmt_list(&mut f.block.stmts, &mut ctx); + } + } + } + + quote! { #im }.into() +} diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 515a3dc5f1d..3dc033cdf46 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -2018,8 +2018,15 @@ impl ChannelMonitor { } #[cfg(test)] - fn provide_secret(&self, idx: u64, secret: [u8; 32]) -> Result<(), &'static str> { - self.inner.lock().unwrap().provide_secret(idx, secret) + fn provide_secret( + &self, idx: u64, secret: [u8; 32], logger: &L, + ) -> Result<(), &'static str> + where + L::Target: Logger, + { + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &inner, None); + inner.provide_secret(idx, secret, &logger) } /// A variant of `Self::provide_latest_counterparty_commitment_tx` used to provide @@ -2029,11 +2036,15 @@ impl ChannelMonitor { /// /// This is used to provide the counterparty commitment transaction directly to the monitor /// before the initial persistence of a new channel. - pub(crate) fn provide_initial_counterparty_commitment_tx( - &self, commitment_tx: CommitmentTransaction, - ) { + pub(crate) fn provide_initial_counterparty_commitment_tx( + &self, commitment_tx: CommitmentTransaction, logger: &L, + ) + where + L::Target: Logger, + { let mut inner = self.inner.lock().unwrap(); - inner.provide_initial_counterparty_commitment_tx(commitment_tx); + let logger = WithChannelMonitor::from_impl(logger, &inner, None); + inner.provide_initial_counterparty_commitment_tx(commitment_tx, &logger); } /// Informs this monitor of the latest counterparty (ie non-broadcastable) commitment transaction. @@ -2041,28 +2052,41 @@ impl ChannelMonitor { /// possibly future revocation/preimage information) to claim outputs where possible. /// We cache also the mapping hash:commitment number to lighten pruning of old preimages by watchtowers. #[cfg(test)] - fn provide_latest_counterparty_commitment_tx( + fn provide_latest_counterparty_commitment_tx( &self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option>)>, - commitment_number: u64, their_per_commitment_point: PublicKey, - ) { + commitment_number: u64, their_per_commitment_point: PublicKey, logger: &L, + ) + where + L::Target: Logger, + { let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &inner, None); inner.provide_latest_counterparty_commitment_tx( txid, htlc_outputs, commitment_number, their_per_commitment_point, + &logger, ) } #[cfg(test)] - #[rustfmt::skip] - fn provide_latest_holder_commitment_tx( + fn provide_latest_holder_commitment_tx( &self, holder_commitment_tx: HolderCommitmentTransaction, htlc_outputs: &[(HTLCOutputInCommitment, Option, Option)], - ) { - self.inner.lock().unwrap().provide_latest_holder_commitment_tx( - holder_commitment_tx, htlc_outputs, &Vec::new(), Vec::new(), - ).unwrap() + logger: &L, + ) + where + L::Target: Logger, + { + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &inner, None); + inner.provide_latest_holder_commitment_tx( + holder_commitment_tx, htlc_outputs, + &Vec::new(), + Vec::new(), + &logger, + ).unwrap(); } /// This is used to provide payment preimage(s) out-of-band during startup without updating the @@ -2370,8 +2394,8 @@ impl ChannelMonitor { inner.queue_latest_holder_commitment_txn_for_broadcast( broadcaster, &fee_estimator, - &logger, false, + &logger, ); } @@ -3466,153 +3490,473 @@ impl ChannelMonitorImpl { ConfirmationTarget::OutputSpendingFee } - /// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither - /// needed by holder commitment transactions HTCLs nor by counterparty ones. Unless we haven't already seen - /// counterparty commitment transaction's secret, they are de facto pruned (we can use revocation key). - #[rustfmt::skip] - fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), &'static str> { - if let Err(()) = self.commitment_secrets.provide_secret(idx, secret) { - return Err("Previous secret did not match new one"); - } - - // Prune HTLCs from the previous counterparty commitment tx so we don't generate failure/fulfill - // events for now-revoked/fulfilled HTLCs. - let mut removed_fulfilled_htlcs = false; - let prune_htlc_sources = |funding: &mut FundingScope| { - if let Some(txid) = funding.prev_counterparty_commitment_txid.take() { - if funding.current_counterparty_commitment_txid.unwrap() != txid { - let cur_claimables = funding.counterparty_claimable_outpoints.get( - &funding.current_counterparty_commitment_txid.unwrap()).unwrap(); - // We only need to remove fulfilled HTLCs once for the first `FundingScope` we - // come across since all `FundingScope`s share the same set of HTLC sources. - if !removed_fulfilled_htlcs { - for (_, ref source_opt) in funding.counterparty_claimable_outpoints.get(&txid).unwrap() { - if let Some(source) = source_opt { - if !cur_claimables.iter() - .any(|(_, cur_source_opt)| cur_source_opt == source_opt) - { - self.counterparty_fulfilled_htlcs.remove(&SentHTLCId::from_source(source)); - } - } - } - removed_fulfilled_htlcs = true; - } - for &mut (_, ref mut source_opt) in funding.counterparty_claimable_outpoints.get_mut(&txid).unwrap() { - *source_opt = None; - } - } else { - assert!(cfg!(fuzzing), "Commitment txids are unique outside of fuzzing, where hashes can collide"); - } - } - }; - core::iter::once(&mut self.funding).chain(&mut self.pending_funding).for_each(prune_htlc_sources); - - if !self.payment_preimages.is_empty() { - let min_idx = self.get_min_seen_secret(); - let counterparty_hash_commitment_number = &mut self.counterparty_hash_commitment_number; + /// Returns true if the channel has been closed (i.e. no further updates are allowed) and no + /// commitment state updates ever happened. + fn is_closed_without_updates(&self) -> bool { + let mut commitment_not_advanced = + self.current_counterparty_commitment_number == INITIAL_COMMITMENT_NUMBER; + commitment_not_advanced &= + self.current_holder_commitment_number == INITIAL_COMMITMENT_NUMBER; + (self.holder_tx_signed || self.lockdown_from_offchain) && commitment_not_advanced + } - self.payment_preimages.retain(|&k, _| { - for htlc in holder_commitment_htlcs!(self, CURRENT) { - if k == htlc.payment_hash { - return true - } - } - if let Some(htlcs) = holder_commitment_htlcs!(self, PREV) { - for htlc in htlcs { - if k == htlc.payment_hash { - return true - } - } - } - let contains = if let Some(cn) = counterparty_hash_commitment_number.get(&k) { - if *cn < min_idx { - return true - } - true - } else { false }; - if contains { - counterparty_hash_commitment_number.remove(&k); - } - false - }); - } + fn no_further_updates_allowed(&self) -> bool { + self.funding_spend_seen || self.lockdown_from_offchain || self.holder_tx_signed + } - Ok(()) + fn get_latest_update_id(&self) -> u64 { + self.latest_update_id } + /// Returns the outpoint we are currently monitoring the chain for spends. This will change for + /// every splice that has reached its intended confirmation depth. #[rustfmt::skip] - fn provide_initial_counterparty_commitment_tx( - &mut self, commitment_tx: CommitmentTransaction, - ) { - // We populate this field for downgrades - self.initial_counterparty_commitment_info = Some((commitment_tx.per_commitment_point(), - commitment_tx.negotiated_feerate_per_kw(), commitment_tx.to_broadcaster_value_sat(), commitment_tx.to_countersignatory_value_sat())); + fn get_funding_txo(&self) -> OutPoint { + self.funding.channel_parameters.funding_outpoint + .expect("Funding outpoint must be set for active monitor") + } - #[cfg(debug_assertions)] { - let rebuilt_commitment_tx = self.initial_counterparty_commitment_tx().unwrap(); - debug_assert_eq!(rebuilt_commitment_tx.trust().txid(), commitment_tx.trust().txid()); - } + /// Returns the P2WSH script we are currently monitoring the chain for spends. This will change + /// for every splice that has reached its intended confirmation depth. + fn get_funding_script(&self) -> ScriptBuf { + self.funding.channel_parameters.make_funding_redeemscript().to_p2wsh() + } - self.provide_latest_counterparty_commitment_tx(commitment_tx.trust().txid(), Vec::new(), commitment_tx.commitment_number(), - commitment_tx.per_commitment_point()); - // Soon, we will only populate this field - self.initial_counterparty_commitment_tx = Some(commitment_tx); + pub fn channel_id(&self) -> ChannelId { + self.channel_id } - #[rustfmt::skip] - fn provide_latest_counterparty_commitment_tx( - &mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option>)>, - commitment_number: u64, their_per_commitment_point: PublicKey, - ) { - // TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction - // so that a remote monitor doesn't learn anything unless there is a malicious close. - // (only maybe, sadly we cant do the same for local info, as we need to be aware of - // timeouts) - for &(ref htlc, _) in &htlc_outputs { - self.counterparty_hash_commitment_number.insert(htlc.payment_hash, commitment_number); + fn get_outputs_to_watch(&self) -> &HashMap> { + // If we've detected a counterparty commitment tx on chain, we must include it in the set + // of outputs to watch for spends of, otherwise we're likely to lose user funds. Because + // its trivial to do, double-check that here. + for txid in self.counterparty_commitment_txn_on_chain.keys() { + self.outputs_to_watch.get(txid).expect("Counterparty commitment txn which have been broadcast should have outputs registered"); } + &self.outputs_to_watch + } - self.funding.prev_counterparty_commitment_txid = self.funding.current_counterparty_commitment_txid.take(); - self.funding.current_counterparty_commitment_txid = Some(txid); - self.funding.counterparty_claimable_outpoints.insert(txid, htlc_outputs); - self.current_counterparty_commitment_number = commitment_number; + fn get_and_clear_pending_monitor_events(&mut self) -> Vec { + let mut ret = Vec::new(); + mem::swap(&mut ret, &mut self.pending_monitor_events); + ret + } - //TODO: Merge this into the other per-counterparty-transaction output storage stuff - match self.their_cur_per_commitment_points { - Some(old_points) => { - if old_points.0 == commitment_number + 1 { - self.their_cur_per_commitment_points = Some((old_points.0, old_points.1, Some(their_per_commitment_point))); - } else if old_points.0 == commitment_number + 2 { - if let Some(old_second_point) = old_points.2 { - self.their_cur_per_commitment_points = Some((old_points.0 - 1, old_second_point, Some(their_per_commitment_point))); - } else { - self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None)); - } - } else { - self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None)); + /// Gets the set of events that are repeated regularly (e.g. those which RBF bump + /// transactions). We're okay if we lose these on restart as they'll be regenerated for us at + /// some regular interval via [`ChannelMonitor::rebroadcast_pending_claims`]. + #[rustfmt::skip] + pub(super) fn get_repeated_events(&mut self) -> Vec { + let pending_claim_events = self.onchain_tx_handler.get_and_clear_pending_claim_events(); + let mut ret = Vec::with_capacity(pending_claim_events.len()); + for (claim_id, claim_event) in pending_claim_events { + match claim_event { + ClaimEvent::BumpCommitment { + package_target_feerate_sat_per_1000_weight, commitment_tx, + commitment_tx_fee_satoshis, pending_nondust_htlcs, anchor_output_idx, + channel_parameters, + } => { + let channel_id = self.channel_id; + let counterparty_node_id = self.counterparty_node_id; + let commitment_txid = commitment_tx.compute_txid(); + ret.push(Event::BumpTransaction(BumpTransactionEvent::ChannelClose { + channel_id, + counterparty_node_id, + claim_id, + package_target_feerate_sat_per_1000_weight, + anchor_descriptor: AnchorDescriptor { + channel_derivation_parameters: ChannelDerivationParameters { + keys_id: self.channel_keys_id, + value_satoshis: channel_parameters.channel_value_satoshis, + transaction_parameters: channel_parameters, + }, + outpoint: BitcoinOutPoint { + txid: commitment_txid, + vout: anchor_output_idx, + }, + value: commitment_tx.output[anchor_output_idx as usize].value, + }, + pending_htlcs: pending_nondust_htlcs, + commitment_tx, + commitment_tx_fee_satoshis, + })); + }, + ClaimEvent::BumpHTLC { + target_feerate_sat_per_1000_weight, htlcs, tx_lock_time, + } => { + let channel_id = self.channel_id; + let counterparty_node_id = self.counterparty_node_id; + ret.push(Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { + channel_id, + counterparty_node_id, + claim_id, + target_feerate_sat_per_1000_weight, + htlc_descriptors: htlcs, + tx_lock_time, + })); } - }, - None => { - self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None)); } } + ret } - fn update_counterparty_commitment_data( - &mut self, commitment_txs: &[CommitmentTransaction], htlc_data: &CommitmentHTLCData, - ) -> Result<(), &'static str> { - self.verify_matching_commitment_transactions(commitment_txs.iter())?; + /// Can only fail if idx is < get_min_seen_secret + fn get_secret(&self, idx: u64) -> Option<[u8; 32]> { + self.commitment_secrets.get_secret(idx) + } - let htlcs_for_commitment = |commitment: &CommitmentTransaction| { - debug_assert!(htlc_data.nondust_htlc_sources.len() <= commitment.nondust_htlcs().len()); - let mut nondust_htlcs = commitment.nondust_htlcs().iter(); - let mut sources = htlc_data.nondust_htlc_sources.iter(); - let nondust_htlcs = core::iter::from_fn(move || { - let htlc = nondust_htlcs.next()?.clone(); - let source = (!htlc.offered).then(|| { - let source = sources - .next() - .expect("Every inbound non-dust HTLC should have a corresponding source") + fn get_min_seen_secret(&self) -> u64 { + self.commitment_secrets.get_min_seen_secret() + } + + fn get_cur_counterparty_commitment_number(&self) -> u64 { + self.current_counterparty_commitment_number + } + + fn get_cur_holder_commitment_number(&self) -> u64 { + self.current_holder_commitment_number + } + + fn channel_type_features(&self) -> &ChannelTypeFeatures { + &self.funding.channel_parameters.channel_type_features + } + + fn initial_counterparty_commitment_tx(&mut self) -> Option { + self.initial_counterparty_commitment_tx.clone().or_else(|| { + // This provides forward compatibility; an old monitor will not contain the full + // transaction; only enough information to rebuild it + self.initial_counterparty_commitment_info.map( + |( + their_per_commitment_point, + feerate_per_kw, + to_broadcaster_value, + to_countersignatory_value, + )| { + let nondust_htlcs = vec![]; + // Since we're expected to only reach here during the initial persistence of a + // monitor (i.e., via [`Persist::persist_new_channel`]), we expect to only have + // one `FundingScope` present. + debug_assert!(self.pending_funding.is_empty()); + let channel_parameters = &self.funding.channel_parameters; + + let commitment_tx = self.build_counterparty_commitment_tx( + channel_parameters, + INITIAL_COMMITMENT_NUMBER, + &their_per_commitment_point, + to_broadcaster_value, + to_countersignatory_value, + feerate_per_kw, + nondust_htlcs, + ); + // Take the opportunity to populate this recently introduced field + self.initial_counterparty_commitment_tx = Some(commitment_tx.clone()); + commitment_tx + }, + ) + }) + } + + #[rustfmt::skip] + fn build_counterparty_commitment_tx( + &self, channel_parameters: &ChannelTransactionParameters, commitment_number: u64, + their_per_commitment_point: &PublicKey, to_broadcaster_value: u64, + to_countersignatory_value: u64, feerate_per_kw: u32, + nondust_htlcs: Vec + ) -> CommitmentTransaction { + let channel_parameters = &channel_parameters.as_counterparty_broadcastable(); + CommitmentTransaction::new(commitment_number, their_per_commitment_point, + to_broadcaster_value, to_countersignatory_value, feerate_per_kw, nondust_htlcs, channel_parameters, &self.onchain_tx_handler.secp_ctx) + } + + #[rustfmt::skip] + fn counterparty_commitment_txs_from_update(&self, update: &ChannelMonitorUpdate) -> Vec { + update.updates.iter().filter_map(|update| { + // Soon we will drop the first branch here in favor of the second. + // In preparation, we just add the second branch without deleting the first. + // Next step: in channel, switch channel monitor updates to use the `LatestCounterpartyCommitment` variant. + match update { + &ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, + ref htlc_outputs, commitment_number, their_per_commitment_point, + feerate_per_kw: Some(feerate_per_kw), + to_broadcaster_value_sat: Some(to_broadcaster_value), + to_countersignatory_value_sat: Some(to_countersignatory_value) } => { + + let nondust_htlcs = htlc_outputs.iter().filter_map(|(htlc, _)| { + htlc.transaction_output_index.map(|_| htlc).cloned() + }).collect::>(); + + // This monitor update variant is only applicable while there's a single + // `FundingScope` active, otherwise we expect to see + // `LatestCounterpartyCommitment` instead. + debug_assert!(self.pending_funding.is_empty()); + let channel_parameters = &self.funding.channel_parameters; + let commitment_tx = self.build_counterparty_commitment_tx( + channel_parameters, + commitment_number, + &their_per_commitment_point, + to_broadcaster_value, + to_countersignatory_value, + feerate_per_kw, + nondust_htlcs, + ); + + debug_assert_eq!(commitment_tx.trust().txid(), commitment_txid); + + Some(vec![commitment_tx]) + }, + &ChannelMonitorUpdateStep::LatestCounterpartyCommitment { ref commitment_txs, .. } => { + Some(commitment_txs.clone()) + }, + &ChannelMonitorUpdateStep::RenegotiatedFunding { ref counterparty_commitment_tx, .. } => { + Some(vec![counterparty_commitment_tx.clone()]) + }, + _ => None, + } + }).flatten().collect() + } + + #[rustfmt::skip] + fn sign_to_local_justice_tx( + &self, mut justice_tx: Transaction, input_idx: usize, value: u64, commitment_number: u64 + ) -> Result { + let secret = self.get_secret(commitment_number).ok_or(())?; + let per_commitment_key = SecretKey::from_slice(&secret).map_err(|_| ())?; + let their_per_commitment_point = PublicKey::from_secret_key( + &self.onchain_tx_handler.secp_ctx, &per_commitment_key); + + let revocation_pubkey = RevocationKey::from_basepoint(&self.onchain_tx_handler.secp_ctx, + &self.holder_revocation_basepoint, &their_per_commitment_point); + let delayed_key = DelayedPaymentKey::from_basepoint(&self.onchain_tx_handler.secp_ctx, + &self.counterparty_commitment_params.counterparty_delayed_payment_base_key, &their_per_commitment_point); + let revokeable_redeemscript = chan_utils::get_revokeable_redeemscript(&revocation_pubkey, + self.counterparty_commitment_params.on_counterparty_tx_csv, &delayed_key); + + let commitment_txid = &justice_tx.input[input_idx].previous_output.txid; + // Since there may be multiple counterparty commitment transactions for the same commitment + // number due to splicing, we have to locate the matching `FundingScope::channel_parameters` + // to provide the signer. Since this is intended to be called during + // `Persist::update_persisted_channel`, the monitor should have already had the update + // applied. + let channel_parameters = core::iter::once(&self.funding) + .chain(&self.pending_funding) + .find(|funding| funding.counterparty_claimable_outpoints.contains_key(commitment_txid)) + .map(|funding| &funding.channel_parameters) + .ok_or(())?; + let sig = self.onchain_tx_handler.signer.sign_justice_revoked_output( + &channel_parameters, &justice_tx, input_idx, value, &per_commitment_key, + &self.onchain_tx_handler.secp_ctx, + )?; + justice_tx.input[input_idx].witness.push_ecdsa_signature(&BitcoinSignature::sighash_all(sig)); + justice_tx.input[input_idx].witness.push(&[1u8]); + justice_tx.input[input_idx].witness.push(revokeable_redeemscript.as_bytes()); + Ok(justice_tx) + } + + #[rustfmt::skip] + fn get_spendable_outputs(&self, funding_spent: &FundingScope, tx: &Transaction) -> Vec { + let mut spendable_outputs = Vec::new(); + for (i, outp) in tx.output.iter().enumerate() { + if outp.script_pubkey == self.destination_script { + spendable_outputs.push(SpendableOutputDescriptor::StaticOutput { + outpoint: OutPoint { txid: tx.compute_txid(), index: i as u16 }, + output: outp.clone(), + channel_keys_id: Some(self.channel_keys_id), + }); + } + if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script { + if broadcasted_holder_revokable_script.0 == outp.script_pubkey { + spendable_outputs.push(SpendableOutputDescriptor::DelayedPaymentOutput(DelayedPaymentOutputDescriptor { + outpoint: OutPoint { txid: tx.compute_txid(), index: i as u16 }, + per_commitment_point: broadcasted_holder_revokable_script.1, + to_self_delay: self.on_holder_tx_csv, + output: outp.clone(), + revocation_pubkey: broadcasted_holder_revokable_script.2, + channel_keys_id: self.channel_keys_id, + channel_value_satoshis: funding_spent.channel_parameters.channel_value_satoshis, + channel_transaction_parameters: Some(funding_spent.channel_parameters.clone()), + })); + } + } + if self.counterparty_payment_script == outp.script_pubkey { + spendable_outputs.push(SpendableOutputDescriptor::StaticPaymentOutput(StaticPaymentOutputDescriptor { + outpoint: OutPoint { txid: tx.compute_txid(), index: i as u16 }, + output: outp.clone(), + channel_keys_id: self.channel_keys_id, + channel_value_satoshis: funding_spent.channel_parameters.channel_value_satoshis, + channel_transaction_parameters: Some(funding_spent.channel_parameters.clone()), + })); + } + if self.shutdown_script.as_ref() == Some(&outp.script_pubkey) { + spendable_outputs.push(SpendableOutputDescriptor::StaticOutput { + outpoint: OutPoint { txid: tx.compute_txid(), index: i as u16 }, + output: outp.clone(), + channel_keys_id: Some(self.channel_keys_id), + }); + } + } + spendable_outputs + } +} + +#[lightning_macros::add_logging( + , + substruct = onchain_tx_handler, + otherstruct = CounterpartyOfferedHTLCOutput, + otherstruct = CounterpartyReceivedHTLCOutput, + otherstruct = RevokedHTLCOutput, + otherstruct = RevokedOutput, + otherstruct = HolderHTLCOutput, + otherstruct = HolderFundingOutput, + otherstruct = PackageTemplate + >)] +impl ChannelMonitorImpl { + /// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither + /// needed by holder commitment transactions HTCLs nor by counterparty ones. Unless we haven't already seen + /// counterparty commitment transaction's secret, they are de facto pruned (we can use revocation key). + #[rustfmt::skip] + fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), &'static str> { + if let Err(()) = self.commitment_secrets.provide_secret(idx, secret) { + return Err("Previous secret did not match new one"); + } + + // Prune HTLCs from the previous counterparty commitment tx so we don't generate failure/fulfill + // events for now-revoked/fulfilled HTLCs. + let mut removed_fulfilled_htlcs = false; + let prune_htlc_sources = |funding: &mut FundingScope| { + if let Some(txid) = funding.prev_counterparty_commitment_txid.take() { + if funding.current_counterparty_commitment_txid.unwrap() != txid { + let cur_claimables = funding.counterparty_claimable_outpoints.get( + &funding.current_counterparty_commitment_txid.unwrap()).unwrap(); + // We only need to remove fulfilled HTLCs once for the first `FundingScope` we + // come across since all `FundingScope`s share the same set of HTLC sources. + if !removed_fulfilled_htlcs { + for (_, ref source_opt) in funding.counterparty_claimable_outpoints.get(&txid).unwrap() { + if let Some(source) = source_opt { + if !cur_claimables.iter() + .any(|(_, cur_source_opt)| cur_source_opt == source_opt) + { + self.counterparty_fulfilled_htlcs.remove(&SentHTLCId::from_source(source)); + } + } + } + removed_fulfilled_htlcs = true; + } + for &mut (_, ref mut source_opt) in funding.counterparty_claimable_outpoints.get_mut(&txid).unwrap() { + *source_opt = None; + } + } else { + assert!(cfg!(fuzzing), "Commitment txids are unique outside of fuzzing, where hashes can collide"); + } + } + }; + core::iter::once(&mut self.funding).chain(&mut self.pending_funding).for_each(prune_htlc_sources); + + if !self.payment_preimages.is_empty() { + let min_idx = self.get_min_seen_secret(); + let counterparty_hash_commitment_number = &mut self.counterparty_hash_commitment_number; + + self.payment_preimages.retain(|&k, _| { + for htlc in holder_commitment_htlcs!(self, CURRENT) { + if k == htlc.payment_hash { + return true + } + } + if let Some(htlcs) = holder_commitment_htlcs!(self, PREV) { + for htlc in htlcs { + if k == htlc.payment_hash { + return true + } + } + } + let contains = if let Some(cn) = counterparty_hash_commitment_number.get(&k) { + if *cn < min_idx { + return true + } + true + } else { false }; + if contains { + counterparty_hash_commitment_number.remove(&k); + } + false + }); + } + + Ok(()) + } + + #[rustfmt::skip] + fn provide_initial_counterparty_commitment_tx( + &mut self, commitment_tx: CommitmentTransaction, + ) { + // We populate this field for downgrades + self.initial_counterparty_commitment_info = Some((commitment_tx.per_commitment_point(), + commitment_tx.negotiated_feerate_per_kw(), commitment_tx.to_broadcaster_value_sat(), commitment_tx.to_countersignatory_value_sat())); + + #[cfg(debug_assertions)] { + let rebuilt_commitment_tx = self.initial_counterparty_commitment_tx().unwrap(); + debug_assert_eq!(rebuilt_commitment_tx.trust().txid(), commitment_tx.trust().txid()); + } + + self.provide_latest_counterparty_commitment_tx(commitment_tx.trust().txid(), Vec::new(), commitment_tx.commitment_number(), + commitment_tx.per_commitment_point()); + // Soon, we will only populate this field + self.initial_counterparty_commitment_tx = Some(commitment_tx); + } + + #[rustfmt::skip] + fn provide_latest_counterparty_commitment_tx( + &mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option>)>, + commitment_number: u64, their_per_commitment_point: PublicKey, + ) { + // TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction + // so that a remote monitor doesn't learn anything unless there is a malicious close. + // (only maybe, sadly we cant do the same for local info, as we need to be aware of + // timeouts) + for &(ref htlc, _) in &htlc_outputs { + self.counterparty_hash_commitment_number.insert(htlc.payment_hash, commitment_number); + } + + self.funding.prev_counterparty_commitment_txid = self.funding.current_counterparty_commitment_txid.take(); + self.funding.current_counterparty_commitment_txid = Some(txid); + self.funding.counterparty_claimable_outpoints.insert(txid, htlc_outputs); + self.current_counterparty_commitment_number = commitment_number; + + //TODO: Merge this into the other per-counterparty-transaction output storage stuff + match self.their_cur_per_commitment_points { + Some(old_points) => { + if old_points.0 == commitment_number + 1 { + self.their_cur_per_commitment_points = Some((old_points.0, old_points.1, Some(their_per_commitment_point))); + } else if old_points.0 == commitment_number + 2 { + if let Some(old_second_point) = old_points.2 { + self.their_cur_per_commitment_points = Some((old_points.0 - 1, old_second_point, Some(their_per_commitment_point))); + } else { + self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None)); + } + } else { + self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None)); + } + }, + None => { + self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None)); + } + } + } + + fn update_counterparty_commitment_data( + &mut self, commitment_txs: &[CommitmentTransaction], htlc_data: &CommitmentHTLCData, + ) -> Result<(), &'static str> { + self.verify_matching_commitment_transactions(commitment_txs.iter())?; + + let htlcs_for_commitment = |commitment: &CommitmentTransaction| { + debug_assert!(htlc_data.nondust_htlc_sources.len() <= commitment.nondust_htlcs().len()); + let mut nondust_htlcs = commitment.nondust_htlcs().iter(); + let mut sources = htlc_data.nondust_htlc_sources.iter(); + let nondust_htlcs = core::iter::from_fn(move || { + let htlc = nondust_htlcs.next()?.clone(); + let source = (!htlc.offered).then(|| { + let source = sources + .next() + .expect("Every inbound non-dust HTLC should have a corresponding source") .clone(); Box::new(source) }); @@ -3826,13 +4170,13 @@ impl ChannelMonitorImpl { /// /// Note that this is often called multiple times for the same payment and must be idempotent. #[rustfmt::skip] - fn provide_payment_preimage( + fn provide_payment_preimage( &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, payment_info: &Option, broadcaster: &B, - fee_estimator: &LowerBoundedFeeEstimator, logger: &WithChannelMonitor) + fee_estimator: &LowerBoundedFeeEstimator, + ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger, { self.payment_preimages.entry(payment_hash.clone()) .and_modify(|(_, payment_infos)| { @@ -3866,7 +4210,7 @@ impl ChannelMonitorImpl { // First check if a counterparty commitment transaction has been broadcasted: macro_rules! claim_htlcs { ($commitment_number: expr, $txid: expr, $htlcs: expr) => { - let htlc_claim_reqs = self.get_counterparty_output_claims_for_preimage(*payment_preimage, funding_spent, $commitment_number, $txid, $htlcs, confirmed_spend_height); + let htlc_claim_reqs = self.get_counterparty_output_claims_for_preimage(*payment_preimage, funding_spent, $commitment_number, $txid, $htlcs, confirmed_spend_height, logger); let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests( htlc_claim_reqs, self.best_block.height, self.best_block.height, broadcaster, @@ -3924,7 +4268,7 @@ impl ChannelMonitorImpl { let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests( claim_reqs, self.best_block.height, self.best_block.height, broadcaster, - conf_target, &self.destination_script, fee_estimator, logger, + conf_target, &self.destination_script, fee_estimator, ); } } @@ -4005,14 +4349,13 @@ impl ChannelMonitorImpl { /// See also [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]. /// /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]: crate::chain::channelmonitor::ChannelMonitor::broadcast_latest_holder_commitment_txn - pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast( - &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithChannelMonitor, + fn queue_latest_holder_commitment_txn_for_broadcast( + &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, require_funding_seen: bool, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger, { let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), @@ -4029,19 +4372,16 @@ impl ChannelMonitorImpl { let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests( claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster, - conf_target, &self.destination_script, fee_estimator, logger, + conf_target, &self.destination_script, fee_estimator, ); } - fn renegotiated_funding( - &mut self, logger: &WithChannelMonitor, + fn renegotiated_funding( + &mut self, channel_parameters: &ChannelTransactionParameters, alternative_holder_commitment_tx: &HolderCommitmentTransaction, alternative_counterparty_commitment_tx: &CommitmentTransaction, - ) -> Result<(), ()> - where - L::Target: Logger, - { + ) -> Result<(), ()> { let alternative_counterparty_commitment_txid = alternative_counterparty_commitment_tx.trust().txid(); @@ -4209,12 +4549,11 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn update_monitor( - &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithChannelMonitor + fn update_monitor( + &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, ) -> Result<(), ()> where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger, { if self.latest_update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID && updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID { log_info!(logger, "Applying pre-0.1 post-force-closed update to monitor {} with {} change(s).", @@ -4305,368 +4644,110 @@ impl ChannelMonitorImpl { }, ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage, payment_info } => { log_trace!(logger, "Updating ChannelMonitor with payment preimage"); - self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array()), &payment_preimage, payment_info, broadcaster, &bounded_fee_estimator, logger) + self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array()), &payment_preimage, payment_info, broadcaster, &bounded_fee_estimator) }, ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => { log_trace!(logger, "Updating ChannelMonitor with commitment secret"); if let Err(e) = self.provide_secret(*idx, *secret) { debug_assert!(false, "Latest counterparty commitment secret was invalid"); log_error!(logger, "Providing latest counterparty commitment secret failed/was refused:"); - log_error!(logger, " {}", e); - ret = Err(()); - } - }, - ChannelMonitorUpdateStep::RenegotiatedFunding { - channel_parameters, holder_commitment_tx, counterparty_commitment_tx, - } => { - log_trace!(logger, "Updating ChannelMonitor with alternative holder and counterparty commitment transactions for funding txid {}", - channel_parameters.funding_outpoint.unwrap().txid); - if let Err(_) = self.renegotiated_funding( - logger, channel_parameters, holder_commitment_tx, counterparty_commitment_tx, - ) { - ret = Err(()); - } - }, - ChannelMonitorUpdateStep::RenegotiatedFundingLocked { funding_txid } => { - log_trace!(logger, "Updating ChannelMonitor with locked renegotiated funding txid {}", funding_txid); - if let Err(_) = self.promote_funding(*funding_txid) { - log_error!(logger, "Unknown funding with txid {} became locked", funding_txid); - ret = Err(()); - } - }, - ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => { - log_trace!(logger, "Updating ChannelMonitor: channel force closed, should broadcast: {}", should_broadcast); - self.lockdown_from_offchain = true; - if *should_broadcast { - // There's no need to broadcast our commitment transaction if we've seen one - // confirmed (even with 1 confirmation) as it'll be rejected as - // duplicate/conflicting. - let detected_funding_spend = self.funding_spend_confirmed.is_some() || - self.onchain_events_awaiting_threshold_conf.iter().any( - |event| matches!(event.event, OnchainEvent::FundingSpendConfirmation { .. })); - if detected_funding_spend { - log_trace!(logger, "Avoiding commitment broadcast, already detected confirmed spend onchain"); - continue; - } - self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger, true); - } else if !self.holder_tx_signed { - log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast"); - log_error!(logger, " in channel monitor!"); - log_error!(logger, " Read the docs for ChannelMonitor::broadcast_latest_holder_commitment_txn to take manual action!"); - } else { - // If we generated a MonitorEvent::HolderForceClosed, the ChannelManager - // will still give us a ChannelForceClosed event with !should_broadcast, but we - // shouldn't print the scary warning above. - log_info!(logger, "Channel off-chain state closed after we broadcasted our latest commitment transaction."); - } - }, - ChannelMonitorUpdateStep::ShutdownScript { scriptpubkey } => { - log_trace!(logger, "Updating ChannelMonitor with shutdown script"); - if let Some(shutdown_script) = self.shutdown_script.replace(scriptpubkey.clone()) { - panic!("Attempted to replace shutdown script {} with {}", shutdown_script, scriptpubkey); - } - }, - ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc } => { - log_trace!(logger, "HTLC {htlc:?} permanently and fully resolved"); - self.htlcs_resolved_to_user.insert(*htlc); - }, - } - } - - #[cfg(debug_assertions)] { - self.counterparty_commitment_txs_from_update(updates); - } - - self.latest_update_id = updates.update_id; - - // Refuse updates after we've detected a spend onchain (or if the channel was otherwise - // closed), but only if the update isn't the kind of update we expect to see after channel - // closure. - let mut is_pre_close_update = false; - for update in updates.updates.iter() { - match update { - ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { .. } - |ChannelMonitorUpdateStep::LatestHolderCommitment { .. } - |ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. } - |ChannelMonitorUpdateStep::LatestCounterpartyCommitment { .. } - |ChannelMonitorUpdateStep::ShutdownScript { .. } - |ChannelMonitorUpdateStep::CommitmentSecret { .. } - |ChannelMonitorUpdateStep::RenegotiatedFunding { .. } - |ChannelMonitorUpdateStep::RenegotiatedFundingLocked { .. } => - is_pre_close_update = true, - // After a channel is closed, we don't communicate with our peer about it, so the - // only things we will update is getting a new preimage (from a different channel), - // being told that the channel is closed, or being told a payment which was - // resolved on-chain has had its resolution communicated to the user. All other - // updates are generated while talking to our peer. - ChannelMonitorUpdateStep::PaymentPreimage { .. } => {}, - ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {}, - ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => {}, - } - } - - if ret.is_ok() && self.no_further_updates_allowed() && is_pre_close_update { - log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); - Err(()) - } else { ret } - } - - /// Returns true if the channel has been closed (i.e. no further updates are allowed) and no - /// commitment state updates ever happened. - fn is_closed_without_updates(&self) -> bool { - let mut commitment_not_advanced = - self.current_counterparty_commitment_number == INITIAL_COMMITMENT_NUMBER; - commitment_not_advanced &= - self.current_holder_commitment_number == INITIAL_COMMITMENT_NUMBER; - (self.holder_tx_signed || self.lockdown_from_offchain) && commitment_not_advanced - } - - fn no_further_updates_allowed(&self) -> bool { - self.funding_spend_seen || self.lockdown_from_offchain || self.holder_tx_signed - } - - fn get_latest_update_id(&self) -> u64 { - self.latest_update_id - } - - /// Returns the outpoint we are currently monitoring the chain for spends. This will change for - /// every splice that has reached its intended confirmation depth. - #[rustfmt::skip] - fn get_funding_txo(&self) -> OutPoint { - self.funding.channel_parameters.funding_outpoint - .expect("Funding outpoint must be set for active monitor") - } - - /// Returns the P2WSH script we are currently monitoring the chain for spends. This will change - /// for every splice that has reached its intended confirmation depth. - fn get_funding_script(&self) -> ScriptBuf { - self.funding.channel_parameters.make_funding_redeemscript().to_p2wsh() - } - - pub fn channel_id(&self) -> ChannelId { - self.channel_id - } - - fn get_outputs_to_watch(&self) -> &HashMap> { - // If we've detected a counterparty commitment tx on chain, we must include it in the set - // of outputs to watch for spends of, otherwise we're likely to lose user funds. Because - // its trivial to do, double-check that here. - for txid in self.counterparty_commitment_txn_on_chain.keys() { - self.outputs_to_watch.get(txid).expect("Counterparty commitment txn which have been broadcast should have outputs registered"); - } - &self.outputs_to_watch - } - - fn get_and_clear_pending_monitor_events(&mut self) -> Vec { - let mut ret = Vec::new(); - mem::swap(&mut ret, &mut self.pending_monitor_events); - ret - } - - /// Gets the set of events that are repeated regularly (e.g. those which RBF bump - /// transactions). We're okay if we lose these on restart as they'll be regenerated for us at - /// some regular interval via [`ChannelMonitor::rebroadcast_pending_claims`]. - #[rustfmt::skip] - pub(super) fn get_repeated_events(&mut self) -> Vec { - let pending_claim_events = self.onchain_tx_handler.get_and_clear_pending_claim_events(); - let mut ret = Vec::with_capacity(pending_claim_events.len()); - for (claim_id, claim_event) in pending_claim_events { - match claim_event { - ClaimEvent::BumpCommitment { - package_target_feerate_sat_per_1000_weight, commitment_tx, - commitment_tx_fee_satoshis, pending_nondust_htlcs, anchor_output_idx, - channel_parameters, - } => { - let channel_id = self.channel_id; - let counterparty_node_id = self.counterparty_node_id; - let commitment_txid = commitment_tx.compute_txid(); - ret.push(Event::BumpTransaction(BumpTransactionEvent::ChannelClose { - channel_id, - counterparty_node_id, - claim_id, - package_target_feerate_sat_per_1000_weight, - anchor_descriptor: AnchorDescriptor { - channel_derivation_parameters: ChannelDerivationParameters { - keys_id: self.channel_keys_id, - value_satoshis: channel_parameters.channel_value_satoshis, - transaction_parameters: channel_parameters, - }, - outpoint: BitcoinOutPoint { - txid: commitment_txid, - vout: anchor_output_idx, - }, - value: commitment_tx.output[anchor_output_idx as usize].value, - }, - pending_htlcs: pending_nondust_htlcs, - commitment_tx, - commitment_tx_fee_satoshis, - })); - }, - ClaimEvent::BumpHTLC { - target_feerate_sat_per_1000_weight, htlcs, tx_lock_time, - } => { - let channel_id = self.channel_id; - let counterparty_node_id = self.counterparty_node_id; - ret.push(Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { - channel_id, - counterparty_node_id, - claim_id, - target_feerate_sat_per_1000_weight, - htlc_descriptors: htlcs, - tx_lock_time, - })); - } - } - } - ret - } - - fn initial_counterparty_commitment_tx(&mut self) -> Option { - self.initial_counterparty_commitment_tx.clone().or_else(|| { - // This provides forward compatibility; an old monitor will not contain the full - // transaction; only enough information to rebuild it - self.initial_counterparty_commitment_info.map( - |( - their_per_commitment_point, - feerate_per_kw, - to_broadcaster_value, - to_countersignatory_value, - )| { - let nondust_htlcs = vec![]; - // Since we're expected to only reach here during the initial persistence of a - // monitor (i.e., via [`Persist::persist_new_channel`]), we expect to only have - // one `FundingScope` present. - debug_assert!(self.pending_funding.is_empty()); - let channel_parameters = &self.funding.channel_parameters; - - let commitment_tx = self.build_counterparty_commitment_tx( - channel_parameters, - INITIAL_COMMITMENT_NUMBER, - &their_per_commitment_point, - to_broadcaster_value, - to_countersignatory_value, - feerate_per_kw, - nondust_htlcs, - ); - // Take the opportunity to populate this recently introduced field - self.initial_counterparty_commitment_tx = Some(commitment_tx.clone()); - commitment_tx + log_error!(logger, " {}", e); + ret = Err(()); + } }, - ) - }) - } - - #[rustfmt::skip] - fn build_counterparty_commitment_tx( - &self, channel_parameters: &ChannelTransactionParameters, commitment_number: u64, - their_per_commitment_point: &PublicKey, to_broadcaster_value: u64, - to_countersignatory_value: u64, feerate_per_kw: u32, - nondust_htlcs: Vec - ) -> CommitmentTransaction { - let channel_parameters = &channel_parameters.as_counterparty_broadcastable(); - CommitmentTransaction::new(commitment_number, their_per_commitment_point, - to_broadcaster_value, to_countersignatory_value, feerate_per_kw, nondust_htlcs, channel_parameters, &self.onchain_tx_handler.secp_ctx) - } - - #[rustfmt::skip] - fn counterparty_commitment_txs_from_update(&self, update: &ChannelMonitorUpdate) -> Vec { - update.updates.iter().filter_map(|update| { - // Soon we will drop the first branch here in favor of the second. - // In preparation, we just add the second branch without deleting the first. - // Next step: in channel, switch channel monitor updates to use the `LatestCounterpartyCommitment` variant. - match update { - &ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, - ref htlc_outputs, commitment_number, their_per_commitment_point, - feerate_per_kw: Some(feerate_per_kw), - to_broadcaster_value_sat: Some(to_broadcaster_value), - to_countersignatory_value_sat: Some(to_countersignatory_value) } => { - - let nondust_htlcs = htlc_outputs.iter().filter_map(|(htlc, _)| { - htlc.transaction_output_index.map(|_| htlc).cloned() - }).collect::>(); - - // This monitor update variant is only applicable while there's a single - // `FundingScope` active, otherwise we expect to see - // `LatestCounterpartyCommitment` instead. - debug_assert!(self.pending_funding.is_empty()); - let channel_parameters = &self.funding.channel_parameters; - let commitment_tx = self.build_counterparty_commitment_tx( - channel_parameters, - commitment_number, - &their_per_commitment_point, - to_broadcaster_value, - to_countersignatory_value, - feerate_per_kw, - nondust_htlcs, - ); - - debug_assert_eq!(commitment_tx.trust().txid(), commitment_txid); - - Some(vec![commitment_tx]) + ChannelMonitorUpdateStep::RenegotiatedFunding { + channel_parameters, holder_commitment_tx, counterparty_commitment_tx, + } => { + log_trace!(logger, "Updating ChannelMonitor with alternative holder and counterparty commitment transactions for funding txid {}", + channel_parameters.funding_outpoint.unwrap().txid); + if let Err(_) = self.renegotiated_funding( + channel_parameters, holder_commitment_tx, counterparty_commitment_tx, + ) { + ret = Err(()); + } }, - &ChannelMonitorUpdateStep::LatestCounterpartyCommitment { ref commitment_txs, .. } => { - Some(commitment_txs.clone()) + ChannelMonitorUpdateStep::RenegotiatedFundingLocked { funding_txid } => { + log_trace!(logger, "Updating ChannelMonitor with locked renegotiated funding txid {}", funding_txid); + if let Err(_) = self.promote_funding(*funding_txid) { + log_error!(logger, "Unknown funding with txid {} became locked", funding_txid); + ret = Err(()); + } }, - &ChannelMonitorUpdateStep::RenegotiatedFunding { ref counterparty_commitment_tx, .. } => { - Some(vec![counterparty_commitment_tx.clone()]) + ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => { + log_trace!(logger, "Updating ChannelMonitor: channel force closed, should broadcast: {}", should_broadcast); + self.lockdown_from_offchain = true; + if *should_broadcast { + // There's no need to broadcast our commitment transaction if we've seen one + // confirmed (even with 1 confirmation) as it'll be rejected as + // duplicate/conflicting. + let detected_funding_spend = self.funding_spend_confirmed.is_some() || + self.onchain_events_awaiting_threshold_conf.iter().any( + |event| matches!(event.event, OnchainEvent::FundingSpendConfirmation { .. })); + if detected_funding_spend { + log_trace!(logger, "Avoiding commitment broadcast, already detected confirmed spend onchain"); + continue; + } + self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, true); + } else if !self.holder_tx_signed { + log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast"); + log_error!(logger, " in channel monitor!"); + log_error!(logger, " Read the docs for ChannelMonitor::broadcast_latest_holder_commitment_txn to take manual action!"); + } else { + // If we generated a MonitorEvent::HolderForceClosed, the ChannelManager + // will still give us a ChannelForceClosed event with !should_broadcast, but we + // shouldn't print the scary warning above. + log_info!(logger, "Channel off-chain state closed after we broadcasted our latest commitment transaction."); + } + }, + ChannelMonitorUpdateStep::ShutdownScript { scriptpubkey } => { + log_trace!(logger, "Updating ChannelMonitor with shutdown script"); + if let Some(shutdown_script) = self.shutdown_script.replace(scriptpubkey.clone()) { + panic!("Attempted to replace shutdown script {} with {}", shutdown_script, scriptpubkey); + } + }, + ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc } => { + log_trace!(logger, "HTLC {htlc:?} permanently and fully resolved"); + self.htlcs_resolved_to_user.insert(*htlc); }, - _ => None, } - }).flatten().collect() - } - - #[rustfmt::skip] - fn sign_to_local_justice_tx( - &self, mut justice_tx: Transaction, input_idx: usize, value: u64, commitment_number: u64 - ) -> Result { - let secret = self.get_secret(commitment_number).ok_or(())?; - let per_commitment_key = SecretKey::from_slice(&secret).map_err(|_| ())?; - let their_per_commitment_point = PublicKey::from_secret_key( - &self.onchain_tx_handler.secp_ctx, &per_commitment_key); - - let revocation_pubkey = RevocationKey::from_basepoint(&self.onchain_tx_handler.secp_ctx, - &self.holder_revocation_basepoint, &their_per_commitment_point); - let delayed_key = DelayedPaymentKey::from_basepoint(&self.onchain_tx_handler.secp_ctx, - &self.counterparty_commitment_params.counterparty_delayed_payment_base_key, &their_per_commitment_point); - let revokeable_redeemscript = chan_utils::get_revokeable_redeemscript(&revocation_pubkey, - self.counterparty_commitment_params.on_counterparty_tx_csv, &delayed_key); - - let commitment_txid = &justice_tx.input[input_idx].previous_output.txid; - // Since there may be multiple counterparty commitment transactions for the same commitment - // number due to splicing, we have to locate the matching `FundingScope::channel_parameters` - // to provide the signer. Since this is intended to be called during - // `Persist::update_persisted_channel`, the monitor should have already had the update - // applied. - let channel_parameters = core::iter::once(&self.funding) - .chain(&self.pending_funding) - .find(|funding| funding.counterparty_claimable_outpoints.contains_key(commitment_txid)) - .map(|funding| &funding.channel_parameters) - .ok_or(())?; - let sig = self.onchain_tx_handler.signer.sign_justice_revoked_output( - &channel_parameters, &justice_tx, input_idx, value, &per_commitment_key, - &self.onchain_tx_handler.secp_ctx, - )?; - justice_tx.input[input_idx].witness.push_ecdsa_signature(&BitcoinSignature::sighash_all(sig)); - justice_tx.input[input_idx].witness.push(&[1u8]); - justice_tx.input[input_idx].witness.push(revokeable_redeemscript.as_bytes()); - Ok(justice_tx) - } + } - /// Can only fail if idx is < get_min_seen_secret - fn get_secret(&self, idx: u64) -> Option<[u8; 32]> { - self.commitment_secrets.get_secret(idx) - } + #[cfg(debug_assertions)] { + self.counterparty_commitment_txs_from_update(updates); + } - fn get_min_seen_secret(&self) -> u64 { - self.commitment_secrets.get_min_seen_secret() - } + self.latest_update_id = updates.update_id; - fn get_cur_counterparty_commitment_number(&self) -> u64 { - self.current_counterparty_commitment_number - } + // Refuse updates after we've detected a spend onchain (or if the channel was otherwise + // closed), but only if the update isn't the kind of update we expect to see after channel + // closure. + let mut is_pre_close_update = false; + for update in updates.updates.iter() { + match update { + ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { .. } + |ChannelMonitorUpdateStep::LatestHolderCommitment { .. } + |ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. } + |ChannelMonitorUpdateStep::LatestCounterpartyCommitment { .. } + |ChannelMonitorUpdateStep::ShutdownScript { .. } + |ChannelMonitorUpdateStep::CommitmentSecret { .. } + |ChannelMonitorUpdateStep::RenegotiatedFunding { .. } + |ChannelMonitorUpdateStep::RenegotiatedFundingLocked { .. } => + is_pre_close_update = true, + // After a channel is closed, we don't communicate with our peer about it, so the + // only things we will update is getting a new preimage (from a different channel), + // being told that the channel is closed, or being told a payment which was + // resolved on-chain has had its resolution communicated to the user. All other + // updates are generated while talking to our peer. + ChannelMonitorUpdateStep::PaymentPreimage { .. } => {}, + ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {}, + ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => {}, + } + } - fn get_cur_holder_commitment_number(&self) -> u64 { - self.current_holder_commitment_number + if ret.is_ok() && self.no_further_updates_allowed() && is_pre_close_update { + log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); + Err(()) + } else { ret } } /// Attempts to claim a counterparty commitment transaction's outputs using the revocation key and @@ -4677,9 +4758,8 @@ impl ChannelMonitorImpl { /// Returns packages to claim the revoked output(s) and general information about the output that /// is to the counterparty in the commitment transaction. #[rustfmt::skip] - fn check_spend_counterparty_transaction(&mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) - -> (Vec, CommitmentTxCounterpartyOutputInfo) - where L::Target: Logger { + fn check_spend_counterparty_transaction(&mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash) + -> (Vec, CommitmentTxCounterpartyOutputInfo) { // Most secp and related errors trying to create keys means we have no hope of constructing // a spend transaction...so we return no transactions to broadcast let mut claimable_outpoints = Vec::new(); @@ -4967,9 +5047,9 @@ impl ChannelMonitorImpl { /// Attempts to claim a counterparty HTLC-Success/HTLC-Timeout's outputs using the revocation key #[rustfmt::skip] - fn check_spend_counterparty_htlc( - &mut self, tx: &Transaction, commitment_number: u64, commitment_txid: &Txid, height: u32, logger: &L - ) -> (Vec, Option) where L::Target: Logger { + fn check_spend_counterparty_htlc( + &mut self, tx: &Transaction, commitment_number: u64, commitment_txid: &Txid, height: u32, + ) -> (Vec, Option) { let secret = if let Some(secret) = self.get_secret(commitment_number) { secret } else { return (Vec::new(), None); }; let per_commitment_key = match SecretKey::from_slice(&secret) { Ok(key) => key, @@ -5110,13 +5190,10 @@ impl ChannelMonitorImpl { /// revoked using data in holder_claimable_outpoints. /// Should not be used if check_spend_revoked_transaction succeeds. /// Returns None unless the transaction is definitely one of our commitment transactions. - fn check_spend_holder_transaction( + fn check_spend_holder_transaction( &mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, - block_hash: &BlockHash, logger: &L, - ) -> Option<(Vec, TransactionOutputs)> - where - L::Target: Logger, - { + block_hash: &BlockHash, + ) -> Option<(Vec, TransactionOutputs)> { let funding_spent = get_confirmed_funding_scope!(self); // HTLCs set may differ between last and previous holder commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward @@ -5179,9 +5256,9 @@ impl ChannelMonitorImpl { /// Cancels any existing pending claims for a commitment that previously confirmed and has now /// been replaced by another. #[rustfmt::skip] - pub fn cancel_prev_commitment_claims( - &mut self, logger: &L, confirmed_commitment_txid: &Txid - ) where L::Target: Logger { + fn cancel_prev_commitment_claims( + &mut self, confirmed_commitment_txid: &Txid + ) { for (counterparty_commitment_txid, _) in &self.counterparty_commitment_txn_on_chain { // Cancel any pending claims for counterparty commitments we've seen confirm. if counterparty_commitment_txid == confirmed_commitment_txid { @@ -5253,9 +5330,9 @@ impl ChannelMonitorImpl { #[cfg(any(test, feature = "_test_utils", feature = "unsafe_revoked_tx_signing"))] /// Note that this includes possibly-locktimed-in-the-future transactions! #[rustfmt::skip] - fn unsafe_get_latest_holder_commitment_txn( - &mut self, logger: &WithChannelMonitor - ) -> Vec where L::Target: Logger { + fn unsafe_get_latest_holder_commitment_txn( + &mut self, + ) -> Vec { log_debug!(logger, "Getting signed copy of latest holder commitment transaction!"); let commitment_tx = { let sig = self.onchain_tx_handler.signer.unsafe_sign_holder_commitment( @@ -5305,67 +5382,62 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn block_connected( + fn block_connected( &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, - fee_estimator: F, logger: &WithChannelMonitor, + fee_estimator: F, ) -> Vec where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger, { let block_hash = header.block_hash(); self.best_block = BestBlock::new(block_hash, height); let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); - self.transactions_confirmed(header, txdata, height, broadcaster, &bounded_fee_estimator, logger) + self.transactions_confirmed(header, txdata, height, broadcaster, &bounded_fee_estimator) } #[rustfmt::skip] - fn best_block_updated( + fn best_block_updated( &mut self, header: &Header, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, ) -> Vec where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger, { let block_hash = header.block_hash(); if height > self.best_block.height { self.best_block = BestBlock::new(block_hash, height); log_trace!(logger, "Connecting new block {} at height {}", block_hash, height); - self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, logger) + self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator) } else if block_hash != self.best_block.block_hash { self.best_block = BestBlock::new(block_hash, height); log_trace!(logger, "Best block re-orged, replaced with new block {} at height {}", block_hash, height); self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height); let conf_target = self.closure_conf_target(); self.onchain_tx_handler.blocks_disconnected( - height, &broadcaster, conf_target, &self.destination_script, fee_estimator, logger, + height, &broadcaster, conf_target, &self.destination_script, fee_estimator, ); Vec::new() } else { Vec::new() } } #[rustfmt::skip] - fn transactions_confirmed( + fn transactions_confirmed( &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, ) -> Vec where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger, { let funding_seen_before = self.funding_seen_onchain; let txn_matched = self.filter_block(txdata); @@ -5501,7 +5573,7 @@ impl ChannelMonitorImpl { // different funding transaction. let new_holder_commitment_txid = alternative_funding.current_holder_commitment_tx.trust().txid(); - self.cancel_prev_commitment_claims(&logger, &new_holder_commitment_txid); + self.cancel_prev_commitment_claims(&new_holder_commitment_txid); // We either attempted to broadcast a holder commitment, or saw one confirm // onchain, so broadcast the new holder commitment for the confirmed funding to @@ -5542,7 +5614,7 @@ impl ChannelMonitorImpl { // Is it a commitment transaction? if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.to_consensus_u32() >> 8*3) as u8 == 0x20 { - if let Some((mut new_outpoints, new_outputs)) = self.check_spend_holder_transaction(txid, &tx, height, &block_hash, &logger) { + if let Some((mut new_outpoints, new_outputs)) = self.check_spend_holder_transaction(txid, &tx, height, &block_hash) { if !new_outputs.1.is_empty() { watch_outputs.push(new_outputs); } @@ -5557,7 +5629,7 @@ impl ChannelMonitorImpl { watch_outputs.push((txid, new_watch_outputs)); let (mut new_outpoints, counterparty_output_idx_sats) = - self.check_spend_counterparty_transaction(txid, &tx, height, &block_hash, &logger); + self.check_spend_counterparty_transaction(txid, &tx, height, &block_hash); commitment_tx_to_counterparty_output = counterparty_output_idx_sats; claimable_outpoints.append(&mut new_outpoints); @@ -5585,7 +5657,7 @@ impl ChannelMonitorImpl { // Now that we've detected a confirmed commitment transaction, attempt to cancel // pending claims for any commitments that were previously confirmed such that // we don't continue claiming inputs that no longer exist. - self.cancel_prev_commitment_claims(&logger, &txid); + self.cancel_prev_commitment_claims(&txid); } } if tx.input.len() >= 1 { @@ -5596,7 +5668,7 @@ impl ChannelMonitorImpl { let commitment_txid = tx_input.previous_output.txid; if let Some(&commitment_number) = self.counterparty_commitment_txn_on_chain.get(&commitment_txid) { let (mut new_outpoints, new_outputs_option) = self.check_spend_counterparty_htlc( - &tx, commitment_number, &commitment_txid, height, &logger + &tx, commitment_number, &commitment_txid, height, ); claimable_outpoints.append(&mut new_outpoints); if let Some(new_outputs) = new_outputs_option { @@ -5609,9 +5681,9 @@ impl ChannelMonitorImpl { break; } } - self.is_resolving_htlc_output(&tx, height, &block_hash, logger); + self.is_resolving_htlc_output(&tx, height, &block_hash); - self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash, logger); + self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash); } } @@ -5626,7 +5698,7 @@ impl ChannelMonitorImpl { watch_outputs.append(&mut outputs); } - self.block_confirmed(height, block_hash, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, logger) + self.block_confirmed(height, block_hash, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator) } /// Update state for new block(s)/transaction(s) confirmed. Note that the caller must update @@ -5638,7 +5710,7 @@ impl ChannelMonitorImpl { /// `conf_height` should be set to the height at which any new transaction(s)/block(s) were /// confirmed at, even if it is not the current best height. #[rustfmt::skip] - fn block_confirmed( + fn block_confirmed( &mut self, conf_height: u32, conf_hash: BlockHash, @@ -5647,19 +5719,17 @@ impl ChannelMonitorImpl { mut claimable_outpoints: Vec, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, ) -> Vec where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger, { log_trace!(logger, "Processing {} matched transactions for block at height {}.", txn_matched.len(), conf_height); debug_assert!(self.best_block.height >= conf_height); // Only generate claims if we haven't already done so (e.g., in transactions_confirmed). if claimable_outpoints.is_empty() { - let should_broadcast = self.should_broadcast_holder_commitment_txn(logger); + let should_broadcast = self.should_broadcast_holder_commitment_txn(); if let Some(payment_hash) = should_broadcast { let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; let (mut new_outpoints, mut new_outputs) = @@ -5835,11 +5905,11 @@ impl ChannelMonitorImpl { let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests( claimable_outpoints, conf_height, self.best_block.height, broadcaster, conf_target, - &self.destination_script, fee_estimator, logger, + &self.destination_script, fee_estimator, ); self.onchain_tx_handler.update_claims_view_from_matched_txn( &txn_matched, conf_height, conf_hash, self.best_block.height, broadcaster, conf_target, - &self.destination_script, fee_estimator, logger, + &self.destination_script, fee_estimator, ); // Determine new outputs to watch by comparing against previously known outputs to watch, @@ -5866,11 +5936,10 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn blocks_disconnected( - &mut self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &WithChannelMonitor + fn blocks_disconnected( + &mut self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger, { let new_height = fork_point.height; log_trace!(logger, "Block(s) disconnected to height {}", new_height); @@ -5892,7 +5961,7 @@ impl ChannelMonitorImpl { // different funding transaction. let new_holder_commitment_txid = self.funding.current_holder_commitment_tx.trust().txid(); - self.cancel_prev_commitment_claims(&logger, &new_holder_commitment_txid); + self.cancel_prev_commitment_claims(&new_holder_commitment_txid); should_broadcast_commitment = true; } @@ -5902,29 +5971,27 @@ impl ChannelMonitorImpl { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let conf_target = self.closure_conf_target(); self.onchain_tx_handler.blocks_disconnected( - new_height, &broadcaster, conf_target, &self.destination_script, &bounded_fee_estimator, logger + new_height, &broadcaster, conf_target, &self.destination_script, &bounded_fee_estimator, ); // Only attempt to broadcast the new commitment after the `block_disconnected` call above so that // it doesn't get removed from the set of pending claims. if should_broadcast_commitment { - self.queue_latest_holder_commitment_txn_for_broadcast(&broadcaster, &bounded_fee_estimator, logger, true); + self.queue_latest_holder_commitment_txn_for_broadcast(&broadcaster, &bounded_fee_estimator, true); } self.best_block = fork_point; } #[rustfmt::skip] - fn transaction_unconfirmed( + fn transaction_unconfirmed( &mut self, txid: &Txid, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger, { let mut removed_height = None; for entry in self.onchain_events_awaiting_threshold_conf.iter() { @@ -5954,7 +6021,7 @@ impl ChannelMonitorImpl { // different funding transaction. let new_holder_commitment_txid = self.funding.current_holder_commitment_tx.trust().txid(); - self.cancel_prev_commitment_claims(&logger, &new_holder_commitment_txid); + self.cancel_prev_commitment_claims(&new_holder_commitment_txid); should_broadcast_commitment = true; } @@ -5963,13 +6030,13 @@ impl ChannelMonitorImpl { let conf_target = self.closure_conf_target(); self.onchain_tx_handler.transaction_unconfirmed( - txid, &broadcaster, conf_target, &self.destination_script, fee_estimator, logger + txid, &broadcaster, conf_target, &self.destination_script, fee_estimator, ); // Only attempt to broadcast the new commitment after the `transaction_unconfirmed` call above so // that it doesn't get removed from the set of pending claims. if should_broadcast_commitment { - self.queue_latest_holder_commitment_txn_for_broadcast(&broadcaster, fee_estimator, logger, true); + self.queue_latest_holder_commitment_txn_for_broadcast(&broadcaster, fee_estimator, true); } } @@ -6030,9 +6097,9 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn should_broadcast_holder_commitment_txn( - &self, logger: &WithChannelMonitor - ) -> Option where L::Target: Logger { + fn should_broadcast_holder_commitment_txn( + &self, + ) -> Option { // There's no need to broadcast our commitment transaction if we've seen one confirmed (even // with 1 confirmation) as it'll be rejected as duplicate/conflicting. if self.funding_spend_confirmed.is_some() || @@ -6097,9 +6164,9 @@ impl ChannelMonitorImpl { /// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a holder /// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC #[rustfmt::skip] - fn is_resolving_htlc_output( - &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor, - ) where L::Target: Logger { + fn is_resolving_htlc_output( + &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, + ) { let funding_spent = get_confirmed_funding_scope!(self); 'outer_loop: for input in &tx.input { @@ -6306,57 +6373,12 @@ impl ChannelMonitorImpl { } } - #[rustfmt::skip] - fn get_spendable_outputs(&self, funding_spent: &FundingScope, tx: &Transaction) -> Vec { - let mut spendable_outputs = Vec::new(); - for (i, outp) in tx.output.iter().enumerate() { - if outp.script_pubkey == self.destination_script { - spendable_outputs.push(SpendableOutputDescriptor::StaticOutput { - outpoint: OutPoint { txid: tx.compute_txid(), index: i as u16 }, - output: outp.clone(), - channel_keys_id: Some(self.channel_keys_id), - }); - } - if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script { - if broadcasted_holder_revokable_script.0 == outp.script_pubkey { - spendable_outputs.push(SpendableOutputDescriptor::DelayedPaymentOutput(DelayedPaymentOutputDescriptor { - outpoint: OutPoint { txid: tx.compute_txid(), index: i as u16 }, - per_commitment_point: broadcasted_holder_revokable_script.1, - to_self_delay: self.on_holder_tx_csv, - output: outp.clone(), - revocation_pubkey: broadcasted_holder_revokable_script.2, - channel_keys_id: self.channel_keys_id, - channel_value_satoshis: funding_spent.channel_parameters.channel_value_satoshis, - channel_transaction_parameters: Some(funding_spent.channel_parameters.clone()), - })); - } - } - if self.counterparty_payment_script == outp.script_pubkey { - spendable_outputs.push(SpendableOutputDescriptor::StaticPaymentOutput(StaticPaymentOutputDescriptor { - outpoint: OutPoint { txid: tx.compute_txid(), index: i as u16 }, - output: outp.clone(), - channel_keys_id: self.channel_keys_id, - channel_value_satoshis: funding_spent.channel_parameters.channel_value_satoshis, - channel_transaction_parameters: Some(funding_spent.channel_parameters.clone()), - })); - } - if self.shutdown_script.as_ref() == Some(&outp.script_pubkey) { - spendable_outputs.push(SpendableOutputDescriptor::StaticOutput { - outpoint: OutPoint { txid: tx.compute_txid(), index: i as u16 }, - output: outp.clone(), - channel_keys_id: Some(self.channel_keys_id), - }); - } - } - spendable_outputs - } - /// Checks if the confirmed transaction is paying funds back to some address we can assume to /// own. #[rustfmt::skip] - fn check_tx_and_push_spendable_outputs( - &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor, - ) where L::Target: Logger { + fn check_tx_and_push_spendable_outputs( + &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, + ) { let funding_spent = get_confirmed_funding_scope!(self); for spendable_output in self.get_spendable_outputs(funding_spent, tx) { let entry = OnchainEventEntry { @@ -6370,10 +6392,6 @@ impl ChannelMonitorImpl { self.onchain_events_awaiting_threshold_conf.push(entry); } } - - fn channel_type_features(&self) -> &ChannelTypeFeatures { - &self.funding.channel_parameters.channel_type_features - } } impl chain::Listen @@ -6698,7 +6716,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP } let channel_parameters = channel_parameters.unwrap_or_else(|| { - onchain_tx_handler.channel_parameters().clone() + onchain_tx_handler.channel_transaction_parameters.clone() }); // Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the @@ -6715,7 +6733,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)); let (current_holder_commitment_tx, current_holder_htlc_data) = { - let holder_commitment_tx = onchain_tx_handler.current_holder_commitment_tx(); + let holder_commitment_tx = &onchain_tx_handler.holder_commitment; #[cfg(debug_assertions)] let holder_signed_tx_copy = current_holder_signed_tx.clone(); @@ -6739,7 +6757,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let (prev_holder_commitment_tx, prev_holder_htlc_data) = if let Some(prev_holder_signed_tx) = prev_holder_signed_tx { - let holder_commitment_tx = onchain_tx_handler.prev_holder_commitment_tx(); + let holder_commitment_tx = &onchain_tx_handler.prev_holder_commitment; if holder_commitment_tx.is_none() { return Err(DecodeError::InvalidValue); } @@ -6753,7 +6771,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP #[cfg(debug_assertions)] { let mut stream = crate::util::ser::VecWriter(Vec::new()); write_legacy_holder_commitment_data( - &mut stream, &holder_commitment_tx.unwrap(), &holder_commitment_htlc_data + &mut stream, holder_commitment_tx.as_ref().unwrap(), &holder_commitment_htlc_data ).map_err(|_| DecodeError::InvalidValue)?; let mut cursor = crate::io::Cursor::new(stream.0); if holder_signed_tx_copy != ::read(&mut cursor)? { @@ -6761,7 +6779,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP } } - (holder_commitment_tx.cloned(), Some(holder_commitment_htlc_data)) + (holder_commitment_tx.clone(), Some(holder_commitment_htlc_data)) } else { (None, None) }; @@ -7125,11 +7143,12 @@ mod tests { let nondust_htlcs = dummy_commitment_tx.nondust_htlcs(); monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx.clone(), - &nondust_htlcs.iter().map(|htlc| (htlc.clone(), Some(dummy_sig), Some(dummy_source.clone()))).collect::>()); + &nondust_htlcs.iter().map(|htlc| (htlc.clone(), Some(dummy_sig), Some(dummy_source.clone()))).collect::>(), + &logger); monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"1").to_byte_array()), - preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key); + preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key, &logger); monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"2").to_byte_array()), - preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key); + preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key, &logger); for &(ref preimage, ref hash) in preimages.iter() { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator); monitor.provide_payment_preimage_unsafe_legacy( @@ -7140,23 +7159,23 @@ mod tests { // Now provide a secret, pruning preimages 10-15 let mut secret = [0; 32]; secret[0..32].clone_from_slice(&>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()); - monitor.provide_secret(281474976710655, secret.clone()).unwrap(); + monitor.provide_secret(281474976710655, secret.clone(), &logger).unwrap(); assert_eq!(monitor.inner.lock().unwrap().payment_preimages.len(), 15); test_preimages_exist!(&preimages[0..10], monitor); test_preimages_exist!(&preimages[15..20], monitor); monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"3").to_byte_array()), - preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key); + preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key, &logger); // Now provide a further secret, pruning preimages 15-17 secret[0..32].clone_from_slice(&>::from_hex("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap()); - monitor.provide_secret(281474976710654, secret.clone()).unwrap(); + monitor.provide_secret(281474976710654, secret.clone(), &logger).unwrap(); assert_eq!(monitor.inner.lock().unwrap().payment_preimages.len(), 13); test_preimages_exist!(&preimages[0..10], monitor); test_preimages_exist!(&preimages[17..20], monitor); monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"4").to_byte_array()), - preimages_slice_to_htlc_outputs!(preimages[18..20]), 281474976710652, dummy_key); + preimages_slice_to_htlc_outputs!(preimages[18..20]), 281474976710652, dummy_key, &logger); // Now update holder commitment tx info, pruning only element 18 as we still care about the // previous commitment tx's preimages too @@ -7165,9 +7184,10 @@ mod tests { // These HTLCs now have their output indices assigned let nondust_htlcs = dummy_commitment_tx.nondust_htlcs(); monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx.clone(), - &nondust_htlcs.iter().map(|htlc| (htlc.clone(), Some(dummy_sig), Some(dummy_source.clone()))).collect::>()); + &nondust_htlcs.iter().map(|htlc| (htlc.clone(), Some(dummy_sig), Some(dummy_source.clone()))).collect::>(), + &logger); secret[0..32].clone_from_slice(&>::from_hex("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap()); - monitor.provide_secret(281474976710653, secret.clone()).unwrap(); + monitor.provide_secret(281474976710653, secret.clone(), &logger).unwrap(); assert_eq!(monitor.inner.lock().unwrap().payment_preimages.len(), 12); test_preimages_exist!(&preimages[0..10], monitor); test_preimages_exist!(&preimages[18..20], monitor); @@ -7178,9 +7198,10 @@ mod tests { // These HTLCs now have their output indices assigned let nondust_htlcs = dummy_commitment_tx.nondust_htlcs(); monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx.clone(), - &nondust_htlcs.iter().map(|htlc| (htlc.clone(), Some(dummy_sig), Some(dummy_source.clone()))).collect::>()); + &nondust_htlcs.iter().map(|htlc| (htlc.clone(), Some(dummy_sig), Some(dummy_source.clone()))).collect::>(), + &logger); secret[0..32].clone_from_slice(&>::from_hex("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap()); - monitor.provide_secret(281474976710652, secret.clone()).unwrap(); + monitor.provide_secret(281474976710652, secret.clone(), &logger).unwrap(); assert_eq!(monitor.inner.lock().unwrap().payment_preimages.len(), 5); test_preimages_exist!(&preimages[0..5], monitor); } diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index fb65aa0f157..8637e152104 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -25,7 +25,7 @@ use bitcoin::transaction::Transaction; use crate::chain::chaininterface::ConfirmationTarget; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator}; use crate::chain::channelmonitor::ANTI_REORG_DELAY; -use crate::chain::package::{PackageSolvingData, PackageTemplate}; +use crate::chain::package::{HolderFundingOutput, PackageSolvingData, PackageTemplate}; use crate::chain::transaction::MaybeSignedTransaction; use crate::chain::ClaimId; use crate::ln::chan_utils::{ @@ -224,11 +224,11 @@ pub struct OnchainTxHandler { channel_value_satoshis: u64, // Deprecated as of 0.2. channel_keys_id: [u8; 32], // Deprecated as of 0.2. destination_script: ScriptBuf, // Deprecated as of 0.2. - holder_commitment: HolderCommitmentTransaction, - prev_holder_commitment: Option, + pub(super) holder_commitment: HolderCommitmentTransaction, + pub(super) prev_holder_commitment: Option, pub(super) signer: ChannelSigner, - channel_transaction_parameters: ChannelTransactionParameters, // Deprecated as of 0.2. + pub(super) channel_transaction_parameters: ChannelTransactionParameters, // Deprecated as of 0.2. // Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump // it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within @@ -465,6 +465,46 @@ impl OnchainTxHandler { } } + pub(crate) fn get_and_clear_pending_claim_events(&mut self) -> Vec<(ClaimId, ClaimEvent)> { + let mut events = Vec::new(); + swap(&mut events, &mut self.pending_claim_events); + events + } + + /// Returns true if we are currently tracking any pending claim requests that are not fully + /// confirmed yet. + pub(super) fn has_pending_claims(&self) -> bool { + self.pending_claim_requests.len() != 0 + } + + pub(crate) fn is_output_spend_pending(&self, outpoint: &BitcoinOutPoint) -> bool { + self.claimable_outpoints.get(outpoint).is_some() + } + + #[rustfmt::skip] + pub(crate) fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option)> { + let mut txids: Vec<(Txid, u32, Option)> = self.onchain_events_awaiting_threshold_conf + .iter() + .map(|entry| (entry.txid, entry.height, entry.block_hash)) + .collect(); + txids.sort_unstable_by(|a, b| a.0.cmp(&b.0).then(b.1.cmp(&a.1))); + txids.dedup_by_key(|(txid, _, _)| *txid); + txids + } +} + +#[lightning_macros::add_logging(< + crate::chain::channelmonitor::WithChannelMonitor, + otherstruct = CounterpartyOfferedHTLCOutput, + otherstruct = CounterpartyReceivedHTLCOutput, + otherstruct = RevokedHTLCOutput, + otherstruct = RevokedOutput, + otherstruct = HolderHTLCOutput, + otherstruct = HolderFundingOutput, + otherstruct = PackageTemplate +>)] +impl OnchainTxHandler { + pub(crate) fn prev_holder_commitment_tx(&self) -> Option<&HolderCommitmentTransaction> { self.prev_holder_commitment.as_ref() } @@ -473,11 +513,6 @@ impl OnchainTxHandler { &self.holder_commitment } - pub(crate) fn get_and_clear_pending_claim_events(&mut self) -> Vec<(ClaimId, ClaimEvent)> { - let mut events = Vec::new(); - swap(&mut events, &mut self.pending_claim_events); - events - } /// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is /// crucial in preventing certain classes of pinning attacks, detecting substantial mempool @@ -485,10 +520,10 @@ impl OnchainTxHandler { /// invoking this every 30 seconds, or lower if running in an environment with spotty /// connections, like on mobile. #[rustfmt::skip] - pub(super) fn rebroadcast_pending_claims( + pub(super) fn rebroadcast_pending_claims( &mut self, current_height: u32, feerate_strategy: FeerateStrategy, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + fee_estimator: &LowerBoundedFeeEstimator, ) where B::Target: BroadcasterInterface, @@ -496,18 +531,25 @@ impl OnchainTxHandler { { let mut bump_requests = Vec::with_capacity(self.pending_claim_requests.len()); for (claim_id, request) in self.pending_claim_requests.iter() { + // Clarify the type of `request` so that loggers are passed properly + let request: &PackageTemplate = request; let inputs = request.outpoints(); log_info!(logger, "Triggering rebroadcast/fee-bump for request with inputs {:?}", inputs); bump_requests.push((*claim_id, request.clone())); } for (claim_id, request) in bump_requests { + // Clarify the type of `request` so that loggers are passed properly + let request: PackageTemplate = request; + self.generate_claim( current_height, &request, &feerate_strategy, conf_target, destination_script, - fee_estimator, logger, + fee_estimator, ) .map(|(_, new_feerate, claim)| { let mut feerate_was_bumped = false; if let Some(mut_request) = self.pending_claim_requests.get_mut(&claim_id) { + // Clarify the type so that loggers are passed properly + let mut_request: &mut PackageTemplate = mut_request; feerate_was_bumped = new_feerate > request.previous_feerate(); mut_request.set_feerate(new_feerate); } @@ -539,12 +581,6 @@ impl OnchainTxHandler { } } - /// Returns true if we are currently tracking any pending claim requests that are not fully - /// confirmed yet. - pub(super) fn has_pending_claims(&self) -> bool { - self.pending_claim_requests.len() != 0 - } - /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty /// onchain) lays on the assumption of claim transactions getting confirmed before timelock /// expiration (CSV or CLTV following cases). In case of high-fee spikes, claim tx may get stuck @@ -554,10 +590,10 @@ impl OnchainTxHandler { /// Panics if there are signing errors, because signing operations in reaction to on-chain /// events are not expected to fail, and if they do, we may lose funds. #[rustfmt::skip] - fn generate_claim( + fn generate_claim( &mut self, cur_height: u32, cached_request: &PackageTemplate, feerate_strategy: &FeerateStrategy, conf_target: ConfirmationTarget, - destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, ) -> Option<(u32, u64, OnchainClaim)> where F::Target: FeeEstimator, { @@ -622,12 +658,12 @@ impl OnchainTxHandler { let predicted_weight = cached_request.package_weight(destination_script); if let Some((output_value, new_feerate)) = cached_request.compute_package_output( predicted_weight, destination_script.minimal_non_dust().to_sat(), - feerate_strategy, conf_target, fee_estimator, logger, + feerate_strategy, conf_target, fee_estimator, ) { assert!(new_feerate != 0); let transaction = cached_request.maybe_finalize_malleable_package( - cur_height, self, Amount::from_sat(output_value), destination_script.into(), logger + cur_height, self, Amount::from_sat(output_value), destination_script.into(), ).unwrap(); assert!(predicted_weight >= transaction.0.weight().to_wu()); return Some((new_timer, new_feerate, OnchainClaim::Tx(transaction))); @@ -640,7 +676,7 @@ impl OnchainTxHandler { debug_assert_eq!(inputs.len(), 1); if !cached_request.requires_external_funding() { - return cached_request.maybe_finalize_untractable_package(self, logger) + return cached_request.maybe_finalize_untractable_package(self) .map(|tx| (new_timer, 0, OnchainClaim::Tx(tx))) } @@ -648,6 +684,7 @@ impl OnchainTxHandler { // Commitment inputs with anchors support are the only untractable inputs supported // thus far that require external funding. PackageSolvingData::HolderFundingOutput(output) => { + let output: &HolderFundingOutput = output; let maybe_signed_commitment_tx = output.get_maybe_signed_commitment_tx(self); let tx = if maybe_signed_commitment_tx.is_fully_signed() { maybe_signed_commitment_tx.0 @@ -726,11 +763,13 @@ impl OnchainTxHandler { let claim_id = self.claimable_outpoints.get(outpoint).map(|(claim_id, _)| *claim_id) .or_else(|| { self.pending_claim_requests.iter() - .find(|(_, claim)| claim.outpoints().contains(&outpoint)) + .find(|(_, claim): &(_, &PackageTemplate)| claim.outpoints().contains(&outpoint)) .map(|(claim_id, _)| *claim_id) }); if let Some(claim_id) = claim_id { if let Some(claim) = self.pending_claim_requests.remove(&claim_id) { + // Clarify the type so that loggers are passed properly + let claim: PackageTemplate = claim; for outpoint in claim.outpoints() { if self.claimable_outpoints.remove(outpoint).is_some() { found_claim = true; @@ -739,7 +778,7 @@ impl OnchainTxHandler { } } else { self.locktimed_packages.values_mut().for_each(|claims| { - claims.retain(|claim| { + claims.retain(|claim: &PackageTemplate| { let includes_outpoint = claim.outpoints().contains(&outpoint); if includes_outpoint { found_claim = true; @@ -761,10 +800,10 @@ impl OnchainTxHandler { /// does not need to equal the current blockchain tip height, which should be provided via /// `cur_height`, however it must never be higher than `cur_height`. #[rustfmt::skip] - pub(super) fn update_claims_view_from_requests( + pub(super) fn update_claims_view_from_requests( &mut self, mut requests: Vec, conf_height: u32, cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L + fee_estimator: &LowerBoundedFeeEstimator, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -774,7 +813,7 @@ impl OnchainTxHandler { } // First drop any duplicate claims. - requests.retain(|req| { + requests.retain(|req: &PackageTemplate| { debug_assert_eq!( req.outpoints().len(), 1, @@ -792,8 +831,9 @@ impl OnchainTxHandler { false } else { let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten() - .find(|locked_package| locked_package.outpoints() == req.outpoints()); + .find(|locked_package: &&PackageTemplate| locked_package.outpoints() == req.outpoints()); if let Some(package) = timelocked_equivalent_package { + let package: &PackageTemplate = package; log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_locktime(cur_height)); false @@ -806,9 +846,9 @@ impl OnchainTxHandler { // Then try to maximally aggregate `requests`. for i in (1..requests.len()).rev() { for j in 0..i { - if requests[i].can_merge_with(&requests[j], cur_height) { + if requests[i].can_merge_with(&requests[j], cur_height, logger) { let merge = requests.remove(i); - if let Err(rejected) = requests[j].merge_package(merge, cur_height) { + if let Err(rejected) = requests[j].merge_package(merge, cur_height, logger) { debug_assert!(false, "Merging package should not be rejected after verifying can_merge_with."); requests.insert(i, rejected); } else { @@ -821,6 +861,7 @@ impl OnchainTxHandler { // Finally, split requests into timelocked ones and immediately-spendable ones. let mut preprocessed_requests = Vec::with_capacity(requests.len()); for req in requests { + let req: PackageTemplate = req; let package_locktime = req.package_locktime(cur_height); if package_locktime > cur_height { log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height); @@ -849,10 +890,11 @@ impl OnchainTxHandler { // Generate claim transactions and track them to bump if necessary at // height timer expiration (i.e in how many blocks we're going to take action). - for mut req in preprocessed_requests { + for req in preprocessed_requests { + let mut req: PackageTemplate = req; if let Some((new_timer, new_feerate, claim)) = self.generate_claim( cur_height, &req, &FeerateStrategy::ForceBump, conf_target, destination_script, - &*fee_estimator, &*logger, + &*fee_estimator, ) { req.set_timer(new_timer); req.set_feerate(new_feerate); @@ -912,10 +954,10 @@ impl OnchainTxHandler { /// confirmed. This does not need to equal the current blockchain tip height, which should be /// provided via `cur_height`, however it must never be higher than `cur_height`. #[rustfmt::skip] - pub(super) fn update_claims_view_from_matched_txn( + pub(super) fn update_claims_view_from_matched_txn( &mut self, txn_matched: &[&Transaction], conf_height: u32, conf_hash: BlockHash, cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, - destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L + destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -936,6 +978,9 @@ impl OnchainTxHandler { if let Some((claim_id, _)) = self.claimable_outpoints.get(&inp.previous_output) { // If outpoint has claim request pending on it... if let Some(request) = self.pending_claim_requests.get_mut(claim_id) { + // Clarify the type so that loggers are passed properly + let request: &mut PackageTemplate = request; + //... we need to check if the pending claim was for a subset of the outputs // spent by the confirmed transaction. If so, we can drop the pending claim // after ANTI_REORG_DELAY blocks, otherwise we need to split it and retry @@ -1005,7 +1050,7 @@ impl OnchainTxHandler { // Also remove/split any locktimed packages whose inputs have been spent by this transaction. self.locktimed_packages.retain(|_locktime, packages|{ - packages.retain_mut(|package| { + packages.retain_mut(|package: &mut PackageTemplate| { if let Some(p) = package.split_package(&inp.previous_output) { claimed_outputs_material.push(p); } @@ -1038,6 +1083,9 @@ impl OnchainTxHandler { // We may remove a whole set of claim outpoints here, as these one may have // been aggregated in a single tx and claimed so atomically if let Some(request) = self.pending_claim_requests.remove(&claim_id) { + // Clarify the type so that loggers are passed properly + let request: PackageTemplate = request; + for outpoint in request.outpoints() { log_debug!(logger, "Removing claim tracking for {} due to maturation of claim package {}.", outpoint, log_bytes!(claim_id.0)); @@ -1052,6 +1100,7 @@ impl OnchainTxHandler { } }, OnchainEvent::ContentiousOutpoint { package } => { + let package: PackageTemplate = package; log_debug!(logger, "Removing claim tracking due to maturation of claim tx for outpoints:"); log_debug!(logger, " {:?}", package.outpoints()); self.claimable_outpoints.remove(package.outpoints()[0]); @@ -1064,6 +1113,8 @@ impl OnchainTxHandler { // Check if any pending claim request must be rescheduled for (claim_id, request) in self.pending_claim_requests.iter() { + // Clarify the type so that loggers are passed properly + let request: &PackageTemplate = request; if cur_height >= request.timer() { bump_candidates.insert(*claim_id, request.clone()); } @@ -1076,9 +1127,10 @@ impl OnchainTxHandler { } for (claim_id, request) in bump_candidates.iter() { + let request: &PackageTemplate = request; if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim( cur_height, &request, &FeerateStrategy::ForceBump, conf_target, destination_script, - &*fee_estimator, &*logger, + &*fee_estimator, ) { match bump_claim { OnchainClaim::Tx(bump_tx) => { @@ -1102,6 +1154,8 @@ impl OnchainTxHandler { }, } if let Some(request) = self.pending_claim_requests.get_mut(claim_id) { + // Clarify the type so that loggers are passed properly + let request: &mut PackageTemplate = request; request.set_timer(new_timer); request.set_feerate(new_feerate); } @@ -1110,14 +1164,13 @@ impl OnchainTxHandler { } #[rustfmt::skip] - pub(super) fn transaction_unconfirmed( + pub(super) fn transaction_unconfirmed( &mut self, txid: &Txid, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, - logger: &L, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -1132,15 +1185,15 @@ impl OnchainTxHandler { if let Some(height) = height { self.blocks_disconnected( - height - 1, broadcaster, conf_target, destination_script, fee_estimator, logger, + height - 1, broadcaster, conf_target, destination_script, fee_estimator, ); } } #[rustfmt::skip] - pub(super) fn blocks_disconnected( + pub(super) fn blocks_disconnected( &mut self, new_best_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, - destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -1154,6 +1207,8 @@ impl OnchainTxHandler { //- resurect outpoint back in its claimable set and regenerate tx match entry.event { OnchainEvent::ContentiousOutpoint { package } => { + let package: PackageTemplate = package; + // We pass 0 to `package_locktime` to get the actual required locktime. let package_locktime = package.package_locktime(0); if package_locktime > new_best_height { @@ -1163,6 +1218,8 @@ impl OnchainTxHandler { if let Some(pending_claim) = self.claimable_outpoints.get(package.outpoints()[0]) { if let Some(request) = self.pending_claim_requests.get_mut(&pending_claim.0) { + // Clarify the type so that loggers are passed properly + let request: &mut PackageTemplate = request; assert!(request.merge_package(package, new_best_height + 1).is_ok()); // Using a HashMap guarantee us than if we have multiple outpoints getting // resurrected only one bump claim tx is going to be broadcast @@ -1179,8 +1236,10 @@ impl OnchainTxHandler { for ((_claim_id, _), ref mut request) in bump_candidates.iter_mut() { if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim( new_best_height, &request, &FeerateStrategy::ForceBump, conf_target, - destination_script, fee_estimator, logger + destination_script, fee_estimator, ) { + let request: &mut PackageTemplate = request; + request.set_timer(new_timer); request.set_feerate(new_feerate); match bump_claim { @@ -1221,21 +1280,6 @@ impl OnchainTxHandler { } } - pub(crate) fn is_output_spend_pending(&self, outpoint: &BitcoinOutPoint) -> bool { - self.claimable_outpoints.get(outpoint).is_some() - } - - #[rustfmt::skip] - pub(crate) fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option)> { - let mut txids: Vec<(Txid, u32, Option)> = self.onchain_events_awaiting_threshold_conf - .iter() - .map(|entry| (entry.txid, entry.height, entry.block_hash)) - .collect(); - txids.sort_unstable_by(|a, b| a.0.cmp(&b.0).then(b.1.cmp(&a.1))); - txids.dedup_by_key(|(txid, _, _)| *txid); - txids - } - pub(crate) fn provide_latest_holder_tx(&mut self, tx: HolderCommitmentTransaction) { self.prev_holder_commitment = Some(replace(&mut self.holder_commitment, tx)); } diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index db46f3be60d..38fe6fd08f9 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -147,6 +147,7 @@ pub(crate) struct RevokedOutput { outpoint_confirmation_height: Option, } +#[lightning_macros::add_logging(>)] impl RevokedOutput { #[rustfmt::skip] pub(crate) fn build( @@ -210,6 +211,7 @@ pub(crate) struct RevokedHTLCOutput { outpoint_confirmation_height: Option, } +#[lightning_macros::add_logging(>)] impl RevokedHTLCOutput { pub(crate) fn build( per_commitment_point: PublicKey, per_commitment_key: SecretKey, @@ -272,6 +274,7 @@ pub(crate) struct CounterpartyOfferedHTLCOutput { outpoint_confirmation_height: Option, } +#[lightning_macros::add_logging(>)] impl CounterpartyOfferedHTLCOutput { pub(crate) fn build( per_commitment_point: PublicKey, preimage: PaymentPreimage, htlc: HTLCOutputInCommitment, @@ -373,6 +376,7 @@ pub(crate) struct CounterpartyReceivedHTLCOutput { outpoint_confirmation_height: Option, } +#[lightning_macros::add_logging(>)] impl CounterpartyReceivedHTLCOutput { pub(crate) fn build( per_commitment_point: PublicKey, htlc: HTLCOutputInCommitment, @@ -469,6 +473,7 @@ pub(crate) struct HolderHTLCOutput { outpoint_confirmation_height: Option, } +#[lightning_macros::add_logging(, otherstruct = OnchainTxHandler>)] impl HolderHTLCOutput { #[rustfmt::skip] pub(crate) fn build( @@ -640,6 +645,7 @@ pub(crate) struct HolderFundingOutput { pub(crate) channel_parameters: Option, } +#[lightning_macros::add_logging(, otherstruct = OnchainTxHandler>)] impl HolderFundingOutput { pub(crate) fn build( commitment_tx: HolderCommitmentTransaction, @@ -739,6 +745,7 @@ pub(crate) enum PackageSolvingData { HolderFundingOutput(HolderFundingOutput), } +#[lightning_macros::add_logging(, otherstruct = HolderFundingOutput, otherstruct = HolderHTLCOutput, otherstruct = OnchainTxHandler>)] impl PackageSolvingData { #[rustfmt::skip] fn amount(&self) -> u64 { @@ -994,11 +1001,14 @@ impl PackageSolvingData { fn get_maybe_finalized_tx(&self, outpoint: &BitcoinOutPoint, onchain_handler: &mut OnchainTxHandler) -> Option { match self { PackageSolvingData::HolderHTLCOutput(ref outp) => { + let outp: &HolderHTLCOutput = outp; + debug_assert!(!outp.channel_type_features.supports_anchors_zero_fee_htlc_tx()); debug_assert!(!outp.channel_type_features.supports_anchor_zero_fee_commitments()); outp.get_maybe_signed_htlc_tx(onchain_handler, outpoint) } PackageSolvingData::HolderFundingOutput(ref outp) => { + let outp: &HolderFundingOutput = outp; Some(outp.get_maybe_signed_commitment_tx(onchain_handler)) } _ => { panic!("API Error!"); } @@ -1026,44 +1036,44 @@ impl PackageSolvingData { _ => None, } } +} - #[rustfmt::skip] - fn map_output_type_flags(&self) -> PackageMalleability { - // We classify claims into not-mergeable (i.e. transactions that have to be broadcasted - // as-is) or merge-able (i.e. transactions we can merge with others and claim in batches), - // which we then sub-categorize into pinnable (where our counterparty could potentially - // also claim the transaction right now) or unpinnable (where only we can claim this - // output). We assume we are claiming in a timely manner. - match self { - PackageSolvingData::RevokedOutput(RevokedOutput { .. }) => - PackageMalleability::Malleable(AggregationCluster::Unpinnable), - PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput { htlc, .. }) => { - if htlc.offered { +#[rustfmt::skip] +fn map_output_type_flags(data: &PackageSolvingData) -> PackageMalleability { + // We classify claims into not-mergeable (i.e. transactions that have to be broadcasted + // as-is) or merge-able (i.e. transactions we can merge with others and claim in batches), + // which we then sub-categorize into pinnable (where our counterparty could potentially + // also claim the transaction right now) or unpinnable (where only we can claim this + // output). We assume we are claiming in a timely manner. + match data { + PackageSolvingData::RevokedOutput(RevokedOutput { .. }) => + PackageMalleability::Malleable(AggregationCluster::Unpinnable), + PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput { htlc, .. }) => { + if htlc.offered { + PackageMalleability::Malleable(AggregationCluster::Unpinnable) + } else { + PackageMalleability::Malleable(AggregationCluster::Pinnable) + } + }, + PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => + PackageMalleability::Malleable(AggregationCluster::Unpinnable), + PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => + PackageMalleability::Malleable(AggregationCluster::Pinnable), + PackageSolvingData::HolderHTLCOutput(ref outp) => { + let free_htlcs = outp.channel_type_features.supports_anchors_zero_fee_htlc_tx(); + let free_commits = outp.channel_type_features.supports_anchor_zero_fee_commitments(); + + if free_htlcs || free_commits { + if outp.preimage.is_some() { PackageMalleability::Malleable(AggregationCluster::Unpinnable) } else { PackageMalleability::Malleable(AggregationCluster::Pinnable) } - }, - PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => - PackageMalleability::Malleable(AggregationCluster::Unpinnable), - PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => - PackageMalleability::Malleable(AggregationCluster::Pinnable), - PackageSolvingData::HolderHTLCOutput(ref outp) => { - let free_htlcs = outp.channel_type_features.supports_anchors_zero_fee_htlc_tx(); - let free_commits = outp.channel_type_features.supports_anchor_zero_fee_commitments(); - - if free_htlcs || free_commits { - if outp.preimage.is_some() { - PackageMalleability::Malleable(AggregationCluster::Unpinnable) - } else { - PackageMalleability::Malleable(AggregationCluster::Pinnable) - } - } else { - PackageMalleability::Untractable - } - }, - PackageSolvingData::HolderFundingOutput(..) => PackageMalleability::Untractable, - } + } else { + PackageMalleability::Untractable + } + }, + PackageSolvingData::HolderFundingOutput(..) => PackageMalleability::Untractable, } } @@ -1180,6 +1190,7 @@ impl PartialEq for PackageTemplate { } } +#[lightning_macros::add_logging(, otherstruct = HolderHTLCOutput, otherstruct = PackageSolvingData>)] impl PackageTemplate { #[rustfmt::skip] pub(crate) fn can_merge_with(&self, other: &PackageTemplate, cur_height: u32) -> bool { @@ -1203,16 +1214,16 @@ impl PackageTemplate { { for i in 0..self.inputs.len() { for j in 0..i { - debug_assert!(self.inputs[i].1.is_possibly_from_same_tx_tree(&self.inputs[j].1)); + debug_assert!(self.inputs[i].1.is_possibly_from_same_tx_tree(&self.inputs[j].1, logger)); } } for i in 0..other.inputs.len() { for j in 0..i { - assert!(other.inputs[i].1.is_possibly_from_same_tx_tree(&other.inputs[j].1)); + assert!(other.inputs[i].1.is_possibly_from_same_tx_tree(&other.inputs[j].1, logger)); } } } - if !self.inputs[0].1.is_possibly_from_same_tx_tree(&other.inputs[0].1) { + if !self.inputs[0].1.is_possibly_from_same_tx_tree(&other.inputs[0].1, logger) { debug_assert!(false, "We shouldn't have packages from different tx trees"); return false; } @@ -1266,10 +1277,13 @@ impl PackageTemplate { pub(crate) fn outpoints(&self) -> Vec<&BitcoinOutPoint> { self.inputs.iter().map(|(o, _)| o).collect() } - pub(crate) fn outpoints_and_creation_heights( - &self, - ) -> impl Iterator)> { - self.inputs.iter().map(|(o, p)| (o, p.input_confirmation_height())) + pub(crate) fn outpoints_and_creation_heights<'a, L: Deref>( + &'a self, logger: &'a crate::chain::channelmonitor::WithChannelMonitor, + ) -> impl Iterator)> + where + L::Target: Logger, + { + self.inputs.iter().map(move |(o, p): &(_, PackageSolvingData) | (o, p.input_confirmation_height(logger))) } pub(crate) fn inputs(&self) -> impl ExactSizeIterator { @@ -1331,22 +1345,24 @@ impl PackageTemplate { pub(crate) fn package_amount(&self) -> u64 { let mut amounts = 0; for (_, outp) in self.inputs.iter() { + let outp: &PackageSolvingData = outp; amounts += outp.amount(); } amounts } #[rustfmt::skip] fn signed_locktime(&self) -> Option { - let signed_locktime = self.inputs.iter().find_map(|(_, outp)| outp.signed_locktime()); + let signed_locktime = self.inputs.iter().find_map(|(_, outp): &(_, PackageSolvingData)| outp.signed_locktime()); #[cfg(debug_assertions)] for (_, outp) in &self.inputs { + let outp: &PackageSolvingData = outp; debug_assert!(outp.signed_locktime().is_none() || outp.signed_locktime() == signed_locktime); } signed_locktime } #[rustfmt::skip] pub(crate) fn package_locktime(&self, current_height: u32) -> u32 { - let minimum_locktime = self.inputs.iter().filter_map(|(_, outp)| outp.minimum_locktime()).max(); + let minimum_locktime = self.inputs.iter().filter_map(|(_, outp): &(_, PackageSolvingData)| outp.minimum_locktime()).max(); if let Some(signed_locktime) = self.signed_locktime() { debug_assert!(minimum_locktime.is_none()); @@ -1359,6 +1375,7 @@ impl PackageTemplate { let mut inputs_weight = 0; let mut witnesses_weight = 2; // count segwit flags for (_, outp) in self.inputs.iter() { + let outp: &PackageSolvingData = outp; // previous_out_point: 36 bytes ; var_int: 1 byte ; sequence: 4 bytes inputs_weight += 41 * WITNESS_SCALE_FACTOR; witnesses_weight += outp.weight(); @@ -1378,6 +1395,7 @@ impl PackageTemplate { for (previous_output, input) in &self.inputs { match input { PackageSolvingData::HolderHTLCOutput(ref outp) => { + let outp: &HolderHTLCOutput = outp; let free_htlcs = outp.channel_type_features.supports_anchors_zero_fee_htlc_tx(); let free_commitments = outp.channel_type_features.supports_anchor_zero_fee_commitments(); @@ -1392,9 +1410,9 @@ impl PackageTemplate { htlcs } #[rustfmt::skip] - pub(crate) fn maybe_finalize_malleable_package( + pub(crate) fn maybe_finalize_malleable_package( &self, current_height: u32, onchain_handler: &mut OnchainTxHandler, value: Amount, - destination_script: ScriptBuf, logger: &L + destination_script: ScriptBuf, ) -> Option { debug_assert!(self.is_malleable()); let mut bumped_tx = Transaction { @@ -1407,20 +1425,23 @@ impl PackageTemplate { }], }; for (outpoint, outp) in self.inputs.iter() { + let outp: &PackageSolvingData = outp; bumped_tx.input.push(outp.as_tx_input(*outpoint)); } for (i, (outpoint, out)) in self.inputs.iter().enumerate() { + let out: &PackageSolvingData = out; log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout); if !out.finalize_input(&mut bumped_tx, i, onchain_handler) { continue; } } Some(MaybeSignedTransaction(bumped_tx)) } #[rustfmt::skip] - pub(crate) fn maybe_finalize_untractable_package( - &self, onchain_handler: &mut OnchainTxHandler, logger: &L, + pub(crate) fn maybe_finalize_untractable_package( + &self, onchain_handler: &mut OnchainTxHandler, ) -> Option { debug_assert!(!self.is_malleable()); if let Some((outpoint, outp)) = self.inputs.first() { + let outp: &PackageSolvingData = outp; if let Some(final_tx) = outp.get_maybe_finalized_tx(outpoint, onchain_handler) { log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout); return Some(final_tx); @@ -1512,9 +1533,9 @@ impl PackageTemplate { /// which was used to generate the value. Will not return less than `dust_limit_sats` for the /// value. #[rustfmt::skip] - pub(crate) fn compute_package_output( + pub(crate) fn compute_package_output( &self, predicted_weight: u64, dust_limit_sats: u64, feerate_strategy: &FeerateStrategy, - conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, ) -> Option<(u64, u64)> where F::Target: FeeEstimator, { @@ -1592,7 +1613,7 @@ impl PackageTemplate { txid: Txid, vout: u32, input_solving_data: PackageSolvingData, counterparty_spendable_height: u32, ) -> Self { - let malleability = PackageSolvingData::map_output_type_flags(&input_solving_data); + let malleability = map_output_type_flags(&input_solving_data); let inputs = vec![(BitcoinOutPoint { txid, vout }, input_solving_data)]; PackageTemplate { inputs, @@ -1633,7 +1654,7 @@ impl Readable for PackageTemplate { inputs.push((outpoint, rev_outp)); } let malleability = if let Some((_, lead_input)) = inputs.first() { - PackageSolvingData::map_output_type_flags(&lead_input) + map_output_type_flags(&lead_input) } else { return Err(DecodeError::InvalidValue); }; let mut counterparty_spendable_height = 0; let mut feerate_previous = 0; diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 659735cc0a2..df0ddd875e4 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3264,7 +3264,7 @@ where context.is_manual_broadcast, ); channel_monitor.provide_initial_counterparty_commitment_tx( - counterparty_initial_commitment_tx.clone(), + counterparty_initial_commitment_tx.clone(), logger, ); self.context_mut().counterparty_next_commitment_transaction_number -= 1;