Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions CLAUDE.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# Signet Node Components

## Branches

The `main` branch is in maintenance and bug-fix mode. The `develop` branch
contains current work.

## Commands

- `cargo +nightly fmt` - format
- `cargo clippy -p <crate> --all-features --all-targets` - lint with features
- `cargo clippy -p <crate> --no-default-features --all-targets` - lint without
- `cargo t -p <crate>` - test specific crate

Pre-commit: clippy (both feature sets where applicable) + fmt. Never use `cargo check/build`.

## Style

- Functional combinators over imperative control flow
- `let else` for early returns, avoid nesting
- No glob imports; group imports from same crate; no blank lines between imports
- Private by default, `pub(crate)` for internal, `pub` for API only; never `pub(super)`
- `thiserror` for library errors, `eyre` for apps, never `anyhow`
- `tracing` for instrumentation: instrument work items not long-lived tasks; `skip(self)` on methods
- Builders for structs with >4 fields or multiple same-type fields
- Tests: fail fast with `unwrap()`, never return `Result`; unit tests in `mod tests`
- Rustdoc on all public items with usage examples; hide scaffolding with `#`
- `// SAFETY:` comments on all unsafe blocks

## Workspace Crates

All crates use `signet-` prefix. Features exist in:
- `signet-blobber`: `test-utils`
- `signet-node-config`: `test_utils`

Other crates (`signet-node`, `signet-node-types`, `signet-rpc`, `signet-db`, `signet-block-processor`, `signet-genesis`, `signet-node-tests`) have no feature flags — lint with `--all-features` only.
40 changes: 20 additions & 20 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -74,26 +74,26 @@ alloy = { version = "1.4.0", features = [
alloy-contract = { version = "1.4.0", features = ["pubsub"] }

# Reth
reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-codecs = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-eth-wire-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-libmdbx = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-stages-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" }
reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-codecs = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-eth-wire-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-libmdbx = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-stages-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }
reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" }

# Foundry periphery
foundry-blob-explorers = "0.17"
Expand Down
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# Signet Node Components

> **Note:** The `main` branch is in maintenance and bug-fix mode. The
> `develop` branch contains current work.

A collection of components for building the Signet node. These components
implement core node functionality, but are potentially indepedently useful.

Expand Down
6 changes: 4 additions & 2 deletions crates/blobber/src/shim.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,10 @@ impl From<RethRecovered> for RecoveredBlockShim {
}

impl HasTxns for RecoveredBlockShim {
fn transactions(&self) -> &[signet_types::primitives::TransactionSigned] {
&self.block.sealed_block().body().transactions
fn transactions(
&self,
) -> impl ExactSizeIterator<Item = &signet_types::primitives::TransactionSigned> {
self.block.sealed_block().body().transactions.iter()
}
}

Expand Down
13 changes: 8 additions & 5 deletions crates/db/src/consistency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,11 +75,9 @@ where
// * pruning data was interrupted before a config commit, then we
// have deleted data that we are expected to still have. We need
// to check the Database and unwind everything accordingly.
if sfp.is_read_only() {
sfp.check_segment_consistency(segment)?;
} else {
// Fetching the writer will attempt to heal any file level
// inconsistency.
// Fetching the writer will attempt to heal any file level
// inconsistency.
if !sfp.is_read_only() {
sfp.latest_writer(segment)?;
}

Expand Down Expand Up @@ -157,6 +155,10 @@ where
>(
self, segment, highest_tx, highest_block
)?,
// StorageChangeSets uses BlockNumberAddress keys (not u64),
// so our simplified ensure_invariants doesn't apply. Signet
// does not use storage change sets in static files.
StaticFileSegment::StorageChangeSets => None,
} {
update_last_good_height(unwind);
}
Expand Down Expand Up @@ -231,6 +233,7 @@ where
StageId::Execution
}
StaticFileSegment::TransactionSenders => StageId::SenderRecovery,
StaticFileSegment::StorageChangeSets => StageId::Execution,
})?
.unwrap_or_default()
.block_number;
Expand Down
2 changes: 1 addition & 1 deletion crates/db/src/convert.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,6 @@ impl DataCompat<signet_types::primitives::SealedHeader> for reth::primitives::Se

impl DataCompat<reth::primitives::SealedHeader> for signet_types::primitives::SealedHeader {
fn convert(self) -> reth::primitives::SealedHeader {
reth::primitives::SealedHeader::new_unhashed(self.header().to_owned())
reth::primitives::SealedHeader::new_unhashed(self.clone_inner())
}
}
13 changes: 4 additions & 9 deletions crates/db/src/journal/trait.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
use crate::RuWriter;
use alloy::consensus::{BlockHeader, Header};
use alloy::consensus::BlockHeader;
use reth::{providers::ProviderResult, revm::db::BundleState};
use signet_evm::{BlockResult, ExecutionOutcome};
use signet_journal::HostJournal;
use signet_types::primitives::{RecoveredBlock, SealedBlock, SealedHeader, TransactionSigned};
use signet_types::primitives::{SealedBlock, SealedHeader};

/// A database that can be updated with journals.
pub trait JournalDb: RuWriter {
Expand Down Expand Up @@ -31,13 +31,8 @@ pub trait JournalDb: RuWriter {
let bundle_state: BundleState = bsi.into();
let execution_outcome = ExecutionOutcome::new(bundle_state, vec![], header.number());

let block: SealedBlock<TransactionSigned, Header> =
SealedBlock { header: SealedHeader::new(header), body: Default::default() };
let block_result = BlockResult {
sealed_block: RecoveredBlock::new(block, vec![]),
execution_outcome,
host_height,
};
let block = SealedBlock::new(SealedHeader::new(header), vec![]);
let block_result = BlockResult { sealed_block: block, execution_outcome, host_height };

self.append_host_block(
None,
Expand Down
27 changes: 16 additions & 11 deletions crates/db/src/provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ use alloy::{
consensus::BlockHeader,
primitives::{Address, B256, BlockNumber},
};
use reth::{core::primitives::StorageSlotKey, providers::ChangesetEntry};
use reth::{
primitives::StaticFileSegment,
providers::{
Expand All @@ -21,7 +22,7 @@ use reth_db::{
tables,
transaction::{DbTx, DbTxMut},
};
use reth_prune_types::{MINIMUM_PRUNING_DISTANCE, PruneMode};
use reth_prune_types::{MINIMUM_UNWIND_SAFE_DISTANCE, PruneMode};
use signet_evm::BlockResult;
use signet_node_types::NodeTypesDbTrait;
use signet_types::primitives::RecoveredBlock;
Expand Down Expand Up @@ -163,8 +164,8 @@ where
// Put journal hash into the DB
self.tx_ref().put::<crate::JournalHashes>(block_number, journal_hash)?;

let block_hash = block.block.header.hash();
let block_header = block.block.header.header();
let block_hash = block.header.seal();
let block_header = block.header.inner();

self.static_file_provider()
.get_writer(block_number, StaticFileSegment::Headers)?
Expand All @@ -179,14 +180,14 @@ where
.map(|(n, _)| n + 1)
.unwrap_or_default();
let first_tx_num = next_tx_num;
let tx_count = block.block.body.transactions.len() as u64;
let tx_count = block.transactions.len() as u64;

for (sender, transaction) in block.senders.iter().zip(block.block.body.transactions()) {
for (sender, transaction) in block.senders().zip(block.transactions()) {
let hash = *transaction.hash();
debug_assert_ne!(hash, B256::ZERO, "transaction hash is zero");

if self.prune_modes_ref().sender_recovery.as_ref().is_none_or(|m| !m.is_full()) {
self.tx_ref().put::<tables::TransactionSenders>(next_tx_num, *sender)?;
self.tx_ref().put::<tables::TransactionSenders>(next_tx_num, sender)?;
}

if self.prune_modes_ref().transaction_lookup.is_none_or(|m| !m.is_full()) {
Expand Down Expand Up @@ -231,7 +232,7 @@ where
// Increment block on static file header
tx_writer.increment_block(block_number)?;

let tx_count = block.block.body.transactions.len() as u64;
let tx_count = block.transactions.len() as u64;
let block_indices = StoredBlockBodyIndices { first_tx_num: next_tx_num, tx_count };

// insert block meta
Expand All @@ -243,7 +244,7 @@ where
}

// Write transactions
for transaction in block.block.body.transactions() {
for transaction in block.transactions() {
tx_writer.append_transaction(next_tx_num, transaction)?;

// Increment transaction id for each transaction
Expand Down Expand Up @@ -449,7 +450,9 @@ where
.walk_range(storage_start..)?
.collect::<Result<Vec<_>, _>>()?;

self.unwind_storage_history_indices(changed_storages.iter().copied())?;
self.unwind_storage_history_indices(changed_storages.iter().map(|(bna, entry)| {
(*bna, ChangesetEntry { key: StorageSlotKey::plain(entry.key), value: entry.value })
}))?;

// We also skip calculating the reverted root here.
}
Expand Down Expand Up @@ -523,7 +526,9 @@ where
.walk_range(storage_start..)?
.collect::<Result<Vec<_>, _>>()?;

self.unwind_storage_history_indices(changed_storages.iter().copied())?;
self.unwind_storage_history_indices(changed_storages.iter().map(|(bna, entry)| {
(*bna, ChangesetEntry { key: StorageSlotKey::plain(entry.key), value: entry.value })
}))?;

// We also skip calculating the reverted root here.
}
Expand Down Expand Up @@ -604,7 +609,7 @@ where
// All receipts from the last 128 blocks are required for blockchain tree, even with
// [`PruneSegment::ContractLogs`].
let prunable_receipts =
PruneMode::Distance(MINIMUM_PRUNING_DISTANCE).should_prune(first_block, tip);
PruneMode::Distance(MINIMUM_UNWIND_SAFE_DISTANCE).should_prune(first_block, tip);

for (idx, (receipts, first_tx_index)) in
execution_outcome.receipts().iter().zip(block_indices).enumerate()
Expand Down
3 changes: 2 additions & 1 deletion crates/db/tests/common/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,5 +35,6 @@ pub fn create_test_provider_factory() -> ProviderFactory<SignetNodeTypes<TmpDb>>
let sfp = StaticFileProvider::read_write(static_dir.keep()).expect("static file provider");
let rocks_db = RocksDBProvider::builder(rocksdb_dir.keep()).build().unwrap();

ProviderFactory::new(db, chain_spec(), sfp, rocks_db).expect("provider factory")
ProviderFactory::new(db, chain_spec(), sfp, rocks_db, Default::default())
.expect("provider factory")
}
51 changes: 21 additions & 30 deletions crates/db/tests/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@
mod test_common;

use alloy::{
consensus::{BlockBody, BlockHeader, Signed, TxEip1559, TxEnvelope},
consensus::{BlockHeader, Signed, TxEip1559, TxEnvelope},
primitives::{Address, B256, U256},
signers::Signature,
};
use reth::providers::{BlockNumReader, BlockReader};
use signet_constants::test_utils::{DEPLOY_HEIGHT, RU_CHAIN_ID};
use signet_db::RuWriter;
use signet_types::primitives::{RecoveredBlock, SealedBlock, SealedHeader, TransactionSigned};
use signet_types::primitives::{SealedBlock, SealedHeader, TransactionSigned};
use signet_zenith::Zenith;

#[test]
Expand All @@ -35,25 +35,19 @@ fn test_insert_signet_block() {
blockDataHash: B256::repeat_byte(0x22),
});

let block = RecoveredBlock {
block: SealedBlock {
header: SealedHeader::new(alloy::consensus::Header::default()),
body: BlockBody {
transactions: std::iter::repeat_n(
TxEnvelope::Eip1559(Signed::new_unhashed(
TxEip1559::default(),
Signature::test_signature(),
))
.into(),
10,
)
.collect(),
ommers: vec![],
withdrawals: None,
},
},
senders: std::iter::repeat_n(Address::repeat_byte(0x33), 10).collect(),
};
let transactions: Vec<TransactionSigned> = std::iter::repeat_n(
TxEnvelope::Eip1559(Signed::new_unhashed(
TxEip1559::default(),
Signature::test_signature(),
))
.into(),
10,
)
.collect();
let senders: Vec<Address> = std::iter::repeat_n(Address::repeat_byte(0x33), 10).collect();
let sealed =
SealedBlock::new(SealedHeader::new(alloy::consensus::Header::default()), transactions);
let block = sealed.recover_unchecked(senders);

writer.insert_signet_block(header, &block, journal_hash).unwrap();
writer.commit().unwrap();
Expand All @@ -74,8 +68,8 @@ fn test_insert_signet_block() {
.first()
.cloned()
.unwrap();
assert_eq!(loaded_block.header(), block.block.header.header());
assert_eq!(loaded_block.body().transactions.len(), block.block.body.transactions.len());
assert_eq!(loaded_block.header(), block.header.inner());
assert_eq!(loaded_block.body().transactions.len(), block.transactions.len());

// Check that the ZenithHeader can be loaded back
let loaded_header = reader.get_signet_header(block.number()).unwrap();
Expand Down Expand Up @@ -111,13 +105,10 @@ fn test_transaction_hash_indexing() {
let expected_hashes: Vec<B256> =
transactions.iter().map(|tx: &TransactionSigned| *tx.hash()).collect();

let block = RecoveredBlock {
block: SealedBlock {
header: SealedHeader::new(alloy::consensus::Header::default()),
body: BlockBody { transactions, ommers: vec![], withdrawals: None },
},
senders: std::iter::repeat_n(Address::repeat_byte(0x33), 5).collect(),
};
let senders: Vec<Address> = std::iter::repeat_n(Address::repeat_byte(0x33), 5).collect();
let sealed =
SealedBlock::new(SealedHeader::new(alloy::consensus::Header::default()), transactions);
let block = sealed.recover_unchecked(senders);

writer.insert_signet_block(header, &block, journal_hash).unwrap();
writer.commit().unwrap();
Expand Down
Loading