diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..14aa642 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,36 @@ +# Signet Node Components + +## Branches + +The `main` branch is in maintenance and bug-fix mode. The `develop` branch +contains current work. + +## Commands + +- `cargo +nightly fmt` - format +- `cargo clippy -p --all-features --all-targets` - lint with features +- `cargo clippy -p --no-default-features --all-targets` - lint without +- `cargo t -p ` - test specific crate + +Pre-commit: clippy (both feature sets where applicable) + fmt. Never use `cargo check/build`. + +## Style + +- Functional combinators over imperative control flow +- `let else` for early returns, avoid nesting +- No glob imports; group imports from same crate; no blank lines between imports +- Private by default, `pub(crate)` for internal, `pub` for API only; never `pub(super)` +- `thiserror` for library errors, `eyre` for apps, never `anyhow` +- `tracing` for instrumentation: instrument work items not long-lived tasks; `skip(self)` on methods +- Builders for structs with >4 fields or multiple same-type fields +- Tests: fail fast with `unwrap()`, never return `Result`; unit tests in `mod tests` +- Rustdoc on all public items with usage examples; hide scaffolding with `#` +- `// SAFETY:` comments on all unsafe blocks + +## Workspace Crates + +All crates use `signet-` prefix. Features exist in: +- `signet-blobber`: `test-utils` +- `signet-node-config`: `test_utils` + +Other crates (`signet-node`, `signet-node-types`, `signet-rpc`, `signet-db`, `signet-block-processor`, `signet-genesis`, `signet-node-tests`) have no feature flags — lint with `--all-features` only. diff --git a/Cargo.toml b/Cargo.toml index e98d70b..2ed784e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,26 +74,26 @@ alloy = { version = "1.4.0", features = [ alloy-contract = { version = "1.4.0", features = ["pubsub"] } # Reth -reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-codecs = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-eth-wire-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-libmdbx = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-stages-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } +reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-codecs = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-eth-wire-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-libmdbx = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-stages-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } # Foundry periphery foundry-blob-explorers = "0.17" diff --git a/README.md b/README.md index 609a8e8..42c7067 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,8 @@ # Signet Node Components +> **Note:** The `main` branch is in maintenance and bug-fix mode. The +> `develop` branch contains current work. + A collection of components for building the Signet node. These components implement core node functionality, but are potentially indepedently useful. diff --git a/crates/blobber/src/shim.rs b/crates/blobber/src/shim.rs index 0c56d90..a7ac340 100644 --- a/crates/blobber/src/shim.rs +++ b/crates/blobber/src/shim.rs @@ -59,8 +59,10 @@ impl From for RecoveredBlockShim { } impl HasTxns for RecoveredBlockShim { - fn transactions(&self) -> &[signet_types::primitives::TransactionSigned] { - &self.block.sealed_block().body().transactions + fn transactions( + &self, + ) -> impl ExactSizeIterator { + self.block.sealed_block().body().transactions.iter() } } diff --git a/crates/db/src/consistency.rs b/crates/db/src/consistency.rs index 5c81f97..3d96786 100644 --- a/crates/db/src/consistency.rs +++ b/crates/db/src/consistency.rs @@ -75,11 +75,9 @@ where // * pruning data was interrupted before a config commit, then we // have deleted data that we are expected to still have. We need // to check the Database and unwind everything accordingly. - if sfp.is_read_only() { - sfp.check_segment_consistency(segment)?; - } else { - // Fetching the writer will attempt to heal any file level - // inconsistency. + // Fetching the writer will attempt to heal any file level + // inconsistency. + if !sfp.is_read_only() { sfp.latest_writer(segment)?; } @@ -157,6 +155,10 @@ where >( self, segment, highest_tx, highest_block )?, + // StorageChangeSets uses BlockNumberAddress keys (not u64), + // so our simplified ensure_invariants doesn't apply. Signet + // does not use storage change sets in static files. + StaticFileSegment::StorageChangeSets => None, } { update_last_good_height(unwind); } @@ -231,6 +233,7 @@ where StageId::Execution } StaticFileSegment::TransactionSenders => StageId::SenderRecovery, + StaticFileSegment::StorageChangeSets => StageId::Execution, })? .unwrap_or_default() .block_number; diff --git a/crates/db/src/convert.rs b/crates/db/src/convert.rs index 7737b84..53f2ccc 100644 --- a/crates/db/src/convert.rs +++ b/crates/db/src/convert.rs @@ -104,6 +104,6 @@ impl DataCompat for reth::primitives::Se impl DataCompat for signet_types::primitives::SealedHeader { fn convert(self) -> reth::primitives::SealedHeader { - reth::primitives::SealedHeader::new_unhashed(self.header().to_owned()) + reth::primitives::SealedHeader::new_unhashed(self.clone_inner()) } } diff --git a/crates/db/src/journal/trait.rs b/crates/db/src/journal/trait.rs index 014714f..1187e55 100644 --- a/crates/db/src/journal/trait.rs +++ b/crates/db/src/journal/trait.rs @@ -1,9 +1,9 @@ use crate::RuWriter; -use alloy::consensus::{BlockHeader, Header}; +use alloy::consensus::BlockHeader; use reth::{providers::ProviderResult, revm::db::BundleState}; use signet_evm::{BlockResult, ExecutionOutcome}; use signet_journal::HostJournal; -use signet_types::primitives::{RecoveredBlock, SealedBlock, SealedHeader, TransactionSigned}; +use signet_types::primitives::{SealedBlock, SealedHeader}; /// A database that can be updated with journals. pub trait JournalDb: RuWriter { @@ -31,13 +31,8 @@ pub trait JournalDb: RuWriter { let bundle_state: BundleState = bsi.into(); let execution_outcome = ExecutionOutcome::new(bundle_state, vec![], header.number()); - let block: SealedBlock = - SealedBlock { header: SealedHeader::new(header), body: Default::default() }; - let block_result = BlockResult { - sealed_block: RecoveredBlock::new(block, vec![]), - execution_outcome, - host_height, - }; + let block = SealedBlock::new(SealedHeader::new(header), vec![]); + let block_result = BlockResult { sealed_block: block, execution_outcome, host_height }; self.append_host_block( None, diff --git a/crates/db/src/provider.rs b/crates/db/src/provider.rs index f2cd7da..d06b5a7 100644 --- a/crates/db/src/provider.rs +++ b/crates/db/src/provider.rs @@ -7,6 +7,7 @@ use alloy::{ consensus::BlockHeader, primitives::{Address, B256, BlockNumber}, }; +use reth::{core::primitives::StorageSlotKey, providers::ChangesetEntry}; use reth::{ primitives::StaticFileSegment, providers::{ @@ -21,7 +22,7 @@ use reth_db::{ tables, transaction::{DbTx, DbTxMut}, }; -use reth_prune_types::{MINIMUM_PRUNING_DISTANCE, PruneMode}; +use reth_prune_types::{MINIMUM_UNWIND_SAFE_DISTANCE, PruneMode}; use signet_evm::BlockResult; use signet_node_types::NodeTypesDbTrait; use signet_types::primitives::RecoveredBlock; @@ -163,8 +164,8 @@ where // Put journal hash into the DB self.tx_ref().put::(block_number, journal_hash)?; - let block_hash = block.block.header.hash(); - let block_header = block.block.header.header(); + let block_hash = block.header.seal(); + let block_header = block.header.inner(); self.static_file_provider() .get_writer(block_number, StaticFileSegment::Headers)? @@ -179,14 +180,14 @@ where .map(|(n, _)| n + 1) .unwrap_or_default(); let first_tx_num = next_tx_num; - let tx_count = block.block.body.transactions.len() as u64; + let tx_count = block.transactions.len() as u64; - for (sender, transaction) in block.senders.iter().zip(block.block.body.transactions()) { + for (sender, transaction) in block.senders().zip(block.transactions()) { let hash = *transaction.hash(); debug_assert_ne!(hash, B256::ZERO, "transaction hash is zero"); if self.prune_modes_ref().sender_recovery.as_ref().is_none_or(|m| !m.is_full()) { - self.tx_ref().put::(next_tx_num, *sender)?; + self.tx_ref().put::(next_tx_num, sender)?; } if self.prune_modes_ref().transaction_lookup.is_none_or(|m| !m.is_full()) { @@ -231,7 +232,7 @@ where // Increment block on static file header tx_writer.increment_block(block_number)?; - let tx_count = block.block.body.transactions.len() as u64; + let tx_count = block.transactions.len() as u64; let block_indices = StoredBlockBodyIndices { first_tx_num: next_tx_num, tx_count }; // insert block meta @@ -243,7 +244,7 @@ where } // Write transactions - for transaction in block.block.body.transactions() { + for transaction in block.transactions() { tx_writer.append_transaction(next_tx_num, transaction)?; // Increment transaction id for each transaction @@ -449,7 +450,9 @@ where .walk_range(storage_start..)? .collect::, _>>()?; - self.unwind_storage_history_indices(changed_storages.iter().copied())?; + self.unwind_storage_history_indices(changed_storages.iter().map(|(bna, entry)| { + (*bna, ChangesetEntry { key: StorageSlotKey::plain(entry.key), value: entry.value }) + }))?; // We also skip calculating the reverted root here. } @@ -523,7 +526,9 @@ where .walk_range(storage_start..)? .collect::, _>>()?; - self.unwind_storage_history_indices(changed_storages.iter().copied())?; + self.unwind_storage_history_indices(changed_storages.iter().map(|(bna, entry)| { + (*bna, ChangesetEntry { key: StorageSlotKey::plain(entry.key), value: entry.value }) + }))?; // We also skip calculating the reverted root here. } @@ -604,7 +609,7 @@ where // All receipts from the last 128 blocks are required for blockchain tree, even with // [`PruneSegment::ContractLogs`]. let prunable_receipts = - PruneMode::Distance(MINIMUM_PRUNING_DISTANCE).should_prune(first_block, tip); + PruneMode::Distance(MINIMUM_UNWIND_SAFE_DISTANCE).should_prune(first_block, tip); for (idx, (receipts, first_tx_index)) in execution_outcome.receipts().iter().zip(block_indices).enumerate() diff --git a/crates/db/tests/common/mod.rs b/crates/db/tests/common/mod.rs index d230e90..e57cc65 100644 --- a/crates/db/tests/common/mod.rs +++ b/crates/db/tests/common/mod.rs @@ -35,5 +35,6 @@ pub fn create_test_provider_factory() -> ProviderFactory> let sfp = StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"); let rocks_db = RocksDBProvider::builder(rocksdb_dir.keep()).build().unwrap(); - ProviderFactory::new(db, chain_spec(), sfp, rocks_db).expect("provider factory") + ProviderFactory::new(db, chain_spec(), sfp, rocks_db, Default::default()) + .expect("provider factory") } diff --git a/crates/db/tests/db.rs b/crates/db/tests/db.rs index 6d4f769..e8123f9 100644 --- a/crates/db/tests/db.rs +++ b/crates/db/tests/db.rs @@ -2,14 +2,14 @@ mod test_common; use alloy::{ - consensus::{BlockBody, BlockHeader, Signed, TxEip1559, TxEnvelope}, + consensus::{BlockHeader, Signed, TxEip1559, TxEnvelope}, primitives::{Address, B256, U256}, signers::Signature, }; use reth::providers::{BlockNumReader, BlockReader}; use signet_constants::test_utils::{DEPLOY_HEIGHT, RU_CHAIN_ID}; use signet_db::RuWriter; -use signet_types::primitives::{RecoveredBlock, SealedBlock, SealedHeader, TransactionSigned}; +use signet_types::primitives::{SealedBlock, SealedHeader, TransactionSigned}; use signet_zenith::Zenith; #[test] @@ -35,25 +35,19 @@ fn test_insert_signet_block() { blockDataHash: B256::repeat_byte(0x22), }); - let block = RecoveredBlock { - block: SealedBlock { - header: SealedHeader::new(alloy::consensus::Header::default()), - body: BlockBody { - transactions: std::iter::repeat_n( - TxEnvelope::Eip1559(Signed::new_unhashed( - TxEip1559::default(), - Signature::test_signature(), - )) - .into(), - 10, - ) - .collect(), - ommers: vec![], - withdrawals: None, - }, - }, - senders: std::iter::repeat_n(Address::repeat_byte(0x33), 10).collect(), - }; + let transactions: Vec = std::iter::repeat_n( + TxEnvelope::Eip1559(Signed::new_unhashed( + TxEip1559::default(), + Signature::test_signature(), + )) + .into(), + 10, + ) + .collect(); + let senders: Vec
= std::iter::repeat_n(Address::repeat_byte(0x33), 10).collect(); + let sealed = + SealedBlock::new(SealedHeader::new(alloy::consensus::Header::default()), transactions); + let block = sealed.recover_unchecked(senders); writer.insert_signet_block(header, &block, journal_hash).unwrap(); writer.commit().unwrap(); @@ -74,8 +68,8 @@ fn test_insert_signet_block() { .first() .cloned() .unwrap(); - assert_eq!(loaded_block.header(), block.block.header.header()); - assert_eq!(loaded_block.body().transactions.len(), block.block.body.transactions.len()); + assert_eq!(loaded_block.header(), block.header.inner()); + assert_eq!(loaded_block.body().transactions.len(), block.transactions.len()); // Check that the ZenithHeader can be loaded back let loaded_header = reader.get_signet_header(block.number()).unwrap(); @@ -111,13 +105,10 @@ fn test_transaction_hash_indexing() { let expected_hashes: Vec = transactions.iter().map(|tx: &TransactionSigned| *tx.hash()).collect(); - let block = RecoveredBlock { - block: SealedBlock { - header: SealedHeader::new(alloy::consensus::Header::default()), - body: BlockBody { transactions, ommers: vec![], withdrawals: None }, - }, - senders: std::iter::repeat_n(Address::repeat_byte(0x33), 5).collect(), - }; + let senders: Vec
= std::iter::repeat_n(Address::repeat_byte(0x33), 5).collect(); + let sealed = + SealedBlock::new(SealedHeader::new(alloy::consensus::Header::default()), transactions); + let block = sealed.recover_unchecked(senders); writer.insert_signet_block(header, &block, journal_hash).unwrap(); writer.commit().unwrap(); diff --git a/crates/node-tests/src/convert.rs b/crates/node-tests/src/convert.rs index 8b6977c..8d6e640 100644 --- a/crates/node-tests/src/convert.rs +++ b/crates/node-tests/src/convert.rs @@ -46,9 +46,14 @@ impl ToRethPrimitive for SealedBlock { type RethPrimitive = reth::primitives::SealedBlock; fn to_reth(self) -> Self::RethPrimitive { - let (hash, header) = self.header.split(); + let (header, hash) = self.header.into_parts(); + let body = alloy::consensus::BlockBody { + transactions: self.transactions, + ommers: vec![], + withdrawals: None, + }; reth::primitives::SealedBlock::new_unchecked( - reth::primitives::Block::new(header, self.body), + reth::primitives::Block::new(header, body), hash, ) } @@ -58,8 +63,21 @@ impl ToRethPrimitive for RecoveredBlock { type RethPrimitive = reth::primitives::RecoveredBlock; fn to_reth(self) -> Self::RethPrimitive { - let hash = self.block.header.hash(); - reth::primitives::RecoveredBlock::new(self.block.to_reth().into_block(), self.senders, hash) + let (header, hash) = self.header.into_parts(); + let (senders, transactions): (Vec<_>, Vec<_>) = self + .transactions + .into_iter() + .map(|r| { + let (tx, sender) = r.into_parts(); + (sender, tx) + }) + .unzip(); + let body = alloy::consensus::BlockBody { transactions, ommers: vec![], withdrawals: None }; + reth::primitives::RecoveredBlock::new( + reth::primitives::Block::new(header, body), + senders, + hash, + ) } } diff --git a/crates/node-tests/src/utils.rs b/crates/node-tests/src/utils.rs index 6d40508..9f5f11f 100644 --- a/crates/node-tests/src/utils.rs +++ b/crates/node-tests/src/utils.rs @@ -140,5 +140,5 @@ pub fn create_test_provider_factory_with_chain_spec( let rocks = RocksDBProvider::builder(rocks.keep()).build().expect("rocksdb provider"); - ProviderFactory::new(db, chain_spec, sfp, rocks).unwrap() + ProviderFactory::new(db, chain_spec, sfp, rocks, Default::default()).unwrap() } diff --git a/crates/node/src/builder.rs b/crates/node/src/builder.rs index 1cc4205..d9c800d 100644 --- a/crates/node/src/builder.rs +++ b/crates/node/src/builder.rs @@ -14,7 +14,6 @@ use signet_block_processor::AliasOracleFactory; use signet_db::DbProviderExt; use signet_node_config::SignetNodeConfig; use signet_node_types::{NodeStatus, NodeTypesDbTrait, SignetNodeTypes}; -use std::sync::Arc; /// A type that does not implement [`AliasOracleFactory`]. #[derive(Debug, Clone, Copy)] @@ -66,11 +65,14 @@ impl SignetNodeBuilder { self, db: NewDb, ) -> eyre::Result>, Aof>> { + let runtime = + reth::tasks::Runtime::with_existing_handle(tokio::runtime::Handle::current())?; let factory = ProviderFactory::new( db, self.config.chain_spec().clone(), self.config.static_file_rw()?, self.config.open_rocks_db()?, + runtime, )?; Ok(SignetNodeBuilder { @@ -86,14 +88,17 @@ impl SignetNodeBuilder { pub fn with_config_db( self, ) -> eyre::Result< - SignetNodeBuilder>>, Aof>, + SignetNodeBuilder>, Aof>, > { + let runtime = + reth::tasks::Runtime::with_existing_handle(tokio::runtime::Handle::current())?; let factory = ProviderFactory::new_with_database_path( self.config.database_path(), self.config.chain_spec().clone(), reth_db::mdbx::DatabaseArguments::default(), self.config.static_file_rw()?, self.config.open_rocks_db()?, + runtime, )?; Ok(SignetNodeBuilder { config: self.config, @@ -230,7 +235,7 @@ where pub fn build( self, ) -> eyre::Result<( - SignetNode, Box>, + SignetNode>, tokio::sync::watch::Receiver, )> { self.with_config_db()?.build() @@ -254,7 +259,7 @@ where pub fn build( self, ) -> eyre::Result<( - SignetNode, Aof>, + SignetNode, tokio::sync::watch::Receiver, )> { self.with_config_db()?.build() diff --git a/crates/rpc/src/ctx/signet.rs b/crates/rpc/src/ctx/signet.rs index 5ca08dc..8c99f17 100644 --- a/crates/rpc/src/ctx/signet.rs +++ b/crates/rpc/src/ctx/signet.rs @@ -126,7 +126,7 @@ where cache.clone(), ); - spawner.spawn_critical("fee_history_cache_new_blocks", Box::pin(fee_task)); + spawner.spawn_critical_task("fee_history_cache_new_blocks", Box::pin(fee_task)); let filters = FilterManager::new(eth_config.stale_filter_ttl, eth_config.stale_filter_ttl); diff --git a/crates/rpc/src/inspect/db.rs b/crates/rpc/src/inspect/db.rs index 73f2cde..37b9ae3 100644 --- a/crates/rpc/src/inspect/db.rs +++ b/crates/rpc/src/inspect/db.rs @@ -106,9 +106,9 @@ impl>> TableViewer<()> self.factory.db_ref().view(|tx| { let table_db = - tx.inner.open_db(Some(self.args.table_name())).wrap_err("Could not open db.")?; + tx.inner().open_db(Some(self.args.table_name())).wrap_err("Could not open db.")?; let stats = tx - .inner + .inner() .db_stat(table_db.dbi()) .wrap_err(format!("Could not find table: {}", stringify!($table)))?; let total_entries = stats.entries(); diff --git a/crates/rpc/src/utils.rs b/crates/rpc/src/utils.rs index a20d561..f3f4298 100644 --- a/crates/rpc/src/utils.rs +++ b/crates/rpc/src/utils.rs @@ -154,7 +154,7 @@ async fn serve( } }; - Ok(tasks.spawn(fut)) + Ok(tasks.spawn_task(fut)) } /// Serve the router on the given addresses using axum.