From cf064ddd9243842093995e628b079daa67c5d181 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Wed, 10 Dec 2025 20:21:25 -0600 Subject: [PATCH 01/42] begin tx recorder for mel --- arbnode/mel/recording/tx_recorder.go | 149 +++++++++++++++++++++++++++ 1 file changed, 149 insertions(+) create mode 100644 arbnode/mel/recording/tx_recorder.go diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go new file mode 100644 index 0000000000..07964c9d19 --- /dev/null +++ b/arbnode/mel/recording/tx_recorder.go @@ -0,0 +1,149 @@ +package recording + +import ( + "context" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/triedb" + melrunner "github.com/offchainlabs/nitro/arbnode/mel/runner" +) + +type PreimageRecorder struct { + preimages map[common.Hash][]byte +} + +func NewPreimageRecorder() *PreimageRecorder { + return &PreimageRecorder{ + preimages: make(map[common.Hash][]byte), + } +} + +func (pr *PreimageRecorder) GetPreimages() map[common.Hash][]byte { + return pr.preimages +} + +type RecordingDB struct { + underlying triedb.Database + recorder *PreimageRecorder +} + +func (rdb *RecordingDB) Get(key []byte) ([]byte, error) { + hash := common.BytesToHash(key) + value, err := rdb.underlying.Node(hash) + if err != nil { + return nil, err + } + if rdb.recorder != nil { + rdb.recorder.preimages[hash] = value + } + + return value, nil +} + +func (rdb *RecordingDB) Has(key []byte) (bool, error) { + hash := common.BytesToHash(key) + _, err := rdb.underlying.Reader(hash).Node(common.Hash{}, key) + return err == nil, nil +} + +func (rdb *RecordingDB) Put(key []byte, value []byte) error { + return fmt.Errorf("Put not supported on recording DB") +} + +func (rdb *RecordingDB) Delete(key []byte) error { + return fmt.Errorf("Delete not supported on recording DB") +} + +type TransactionRecorder struct { + parentChainReader melrunner.ParentChainReader + parentChainBlockHash common.Hash + preimages map[common.Hash][]byte + txs []*types.Transaction +} + +func NewTransactionRecorder( + parentChainReader melrunner.ParentChainReader, + parentChainBlockHash common.Hash, + preimages map[common.Hash][]byte, +) *TransactionRecorder { + return &TransactionRecorder{ + parentChainReader: parentChainReader, + parentChainBlockHash: parentChainBlockHash, + preimages: preimages, + } +} + +func (tr *TransactionRecorder) Initialize(ctx context.Context) error { + block, err := tr.parentChainReader.BlockByHash(ctx, tr.parentChainBlockHash) + if err != nil { + return err + } + tdb := triedb.NewDatabase(nil, &triedb.Config{ + Preimages: true, + }) + txsTrie := trie.NewEmpty(tdb) + txs := block.Body().Transactions + for i, tx := range txs { + indexBytes, err := rlp.EncodeToBytes(uint64(i)) + if err != nil { + return fmt.Errorf("failed to encode index %d: %w", i, err) + } + txBytes, err := tx.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to marshal transaction %d: %w", i, err) + } + if err := txsTrie.Update(indexBytes, txBytes); err != nil { + return fmt.Errorf("failed to update trie at index %d: %w", i, err) + } + } + root, nodes := txsTrie.Commit(false) + if root != block.TxHash() { + return fmt.Errorf("computed root %s doesn't match header root %s", + root.Hex(), block.TxHash().Hex()) + } + if nodes != nil { + if err := tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { + return fmt.Errorf("failed to commit trie nodes: %w", err) + } + } + if err := tdb.Commit(root, false); err != nil { + return fmt.Errorf("failed to commit database: %w", err) + } + tr.txs = txs + return nil +} + +func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types.Log) (*types.Transaction, error) { + if log == nil { + return nil, errors.New("transactionByLog got nil log value") + } + if int(log.TxIndex) >= len(tr.txs) { + return nil, fmt.Errorf("index out of range: %d", log.TxIndex) + } + recorder := NewPreimageRecorder() + recordingDB := &RecordingDB{ + underlying: tl.tdb, + recorder: recorder, + } + recordingTDB := triedb.NewDatabase(recordingDB, nil) + txsTrie, err := trie.New(trie.TrieID(log.TxHash), recordingTDB) + if err != nil { + return nil, fmt.Errorf("failed to create trie: %w", err) + } + indexBytes, err := rlp.EncodeToBytes(log.TxIndex) + if err != nil { + return nil, fmt.Errorf("failed to encode index: %w", err) + } + if _, err = tr.Get(indexBytes); err != nil { + return nil, fmt.Errorf("failed to get transaction from trie: %w", err) + } + // TODO: Return the tx itself instead of nil, but also add the + // tx marshaled binary by hash to the preimages map. + return nil, nil +} From d91e37ec55d26f9f8ec33ef94402c5d28e0c05fe Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Fri, 12 Dec 2025 14:18:06 -0600 Subject: [PATCH 02/42] fix recorder --- arbnode/mel/recording/tx_recorder.go | 179 ++++++++++++++++------ arbnode/mel/recording/tx_recorder_test.go | 7 + 2 files changed, 136 insertions(+), 50 deletions(-) create mode 100644 arbnode/mel/recording/tx_recorder_test.go diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index 07964c9d19..0881c0f56a 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -7,6 +7,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" @@ -14,57 +16,12 @@ import ( melrunner "github.com/offchainlabs/nitro/arbnode/mel/runner" ) -type PreimageRecorder struct { - preimages map[common.Hash][]byte -} - -func NewPreimageRecorder() *PreimageRecorder { - return &PreimageRecorder{ - preimages: make(map[common.Hash][]byte), - } -} - -func (pr *PreimageRecorder) GetPreimages() map[common.Hash][]byte { - return pr.preimages -} - -type RecordingDB struct { - underlying triedb.Database - recorder *PreimageRecorder -} - -func (rdb *RecordingDB) Get(key []byte) ([]byte, error) { - hash := common.BytesToHash(key) - value, err := rdb.underlying.Node(hash) - if err != nil { - return nil, err - } - if rdb.recorder != nil { - rdb.recorder.preimages[hash] = value - } - - return value, nil -} - -func (rdb *RecordingDB) Has(key []byte) (bool, error) { - hash := common.BytesToHash(key) - _, err := rdb.underlying.Reader(hash).Node(common.Hash{}, key) - return err == nil, nil -} - -func (rdb *RecordingDB) Put(key []byte, value []byte) error { - return fmt.Errorf("Put not supported on recording DB") -} - -func (rdb *RecordingDB) Delete(key []byte) error { - return fmt.Errorf("Delete not supported on recording DB") -} - type TransactionRecorder struct { parentChainReader melrunner.ParentChainReader parentChainBlockHash common.Hash preimages map[common.Hash][]byte txs []*types.Transaction + trieDB *triedb.Database } func NewTransactionRecorder( @@ -116,10 +73,14 @@ func (tr *TransactionRecorder) Initialize(ctx context.Context) error { return fmt.Errorf("failed to commit database: %w", err) } tr.txs = txs + tr.trieDB = tdb return nil } func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types.Log) (*types.Transaction, error) { + if tr.trieDB == nil { + return nil, errors.New("TransactionRecorder not initialized") + } if log == nil { return nil, errors.New("transactionByLog got nil log value") } @@ -128,7 +89,7 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. } recorder := NewPreimageRecorder() recordingDB := &RecordingDB{ - underlying: tl.tdb, + underlying: tr.trieDB, recorder: recorder, } recordingTDB := triedb.NewDatabase(recordingDB, nil) @@ -140,10 +101,128 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. if err != nil { return nil, fmt.Errorf("failed to encode index: %w", err) } - if _, err = tr.Get(indexBytes); err != nil { + txBytes, err := txsTrie.Get(indexBytes) + if err != nil { return nil, fmt.Errorf("failed to get transaction from trie: %w", err) } - // TODO: Return the tx itself instead of nil, but also add the + // Return the tx itself instead of nil, but also add the // tx marshaled binary by hash to the preimages map. - return nil, nil + tr.preimages[crypto.Keccak256Hash(txBytes)] = txBytes + tx := new(types.Transaction) + if err = tx.UnmarshalBinary(txBytes); err != nil { + return nil, fmt.Errorf("failed to unmarshal transaction: %w", err) + } + return tx, nil +} + +type PreimageRecorder struct { + preimages map[common.Hash][]byte +} + +func NewPreimageRecorder() *PreimageRecorder { + return &PreimageRecorder{ + preimages: make(map[common.Hash][]byte), + } +} + +func (pr *PreimageRecorder) GetPreimages() map[common.Hash][]byte { + return pr.preimages +} + +type RecordingDB struct { + underlying *triedb.Database + recorder *PreimageRecorder +} + +func (rdb *RecordingDB) Get(key []byte) ([]byte, error) { + hash := common.BytesToHash(key) + value, err := rdb.underlying.Node(hash) + if err != nil { + return nil, err + } + if rdb.recorder != nil { + rdb.recorder.preimages[hash] = value + } + + return value, nil +} +func (rdb *RecordingDB) Has(key []byte) (bool, error) { + hash := common.BytesToHash(key) + _, err := rdb.underlying.Node(hash) + return err == nil, nil +} +func (rdb *RecordingDB) Put(key []byte, value []byte) error { + return fmt.Errorf("Put not supported on recording DB") +} +func (rdb *RecordingDB) Delete(key []byte) error { + return fmt.Errorf("Delete not supported on recording DB") +} +func (rdb *RecordingDB) DeleteRange(start, end []byte) error { + return fmt.Errorf("DeleteRange not supported on recording DB") +} +func (rdb *RecordingDB) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { + return fmt.Errorf("ReadAncients not supported on recording DB") +} +func (rdb *RecordingDB) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { + return 0, fmt.Errorf("ReadAncients not supported on recording DB") +} +func (rdb *RecordingDB) SyncAncient() error { + return fmt.Errorf("SyncAncient not supported on recording DB") +} +func (rdb *RecordingDB) TruncateHead(n uint64) (uint64, error) { + return 0, fmt.Errorf("TruncateHead not supported on recording DB") +} +func (rdb *RecordingDB) TruncateTail(n uint64) (uint64, error) { + return 0, fmt.Errorf("TruncateTail not supported on recording DB") +} +func (rdb *RecordingDB) Append(kind string, number uint64, item interface{}) error { + return fmt.Errorf("Append not supported on recording DB") +} +func (rdb *RecordingDB) AppendRaw(kind string, number uint64, item []byte) error { + return fmt.Errorf("AppendRaw not supported on recording DB") +} +func (rdb *RecordingDB) AncientDatadir() (string, error) { + return "", fmt.Errorf("AncientDatadir not supported on recording DB") +} +func (rdb *RecordingDB) Ancient(kind string, number uint64) ([]byte, error) { + return nil, fmt.Errorf("Ancient not supported on recording DB") +} +func (rdb *RecordingDB) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { + return nil, fmt.Errorf("AncientRange not supported on recording DB") +} +func (rdb *RecordingDB) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { + return nil, fmt.Errorf("AncientBytes not supported on recording DB") +} +func (rdb *RecordingDB) Ancients() (uint64, error) { + return 0, fmt.Errorf("Ancients not supported on recording DB") +} +func (rdb *RecordingDB) Tail() (uint64, error) { + return 0, fmt.Errorf("Tail not supported on recording DB") +} +func (rdb *RecordingDB) AncientSize(kind string) (uint64, error) { + return 0, fmt.Errorf("AncientSize not supported on recording DB") +} +func (rdb *RecordingDB) Compact(start []byte, limit []byte) error { + return nil +} +func (rdb *RecordingDB) SyncKeyValue() error { + return nil +} +func (rdb *RecordingDB) Stat() (string, error) { + return "", nil +} +func (rdb *RecordingDB) WasmDataBase() ethdb.KeyValueStore { + return nil +} +func (rdb *RecordingDB) NewBatch() ethdb.Batch { + return nil +} +func (rdb *RecordingDB) NewBatchWithSize(size int) ethdb.Batch { + return nil +} +func (rdb *RecordingDB) NewIterator(prefix []byte, start []byte) ethdb.Iterator { + return nil +} +func (rdb *RecordingDB) Close() error { + return nil } diff --git a/arbnode/mel/recording/tx_recorder_test.go b/arbnode/mel/recording/tx_recorder_test.go new file mode 100644 index 0000000000..4430b729c0 --- /dev/null +++ b/arbnode/mel/recording/tx_recorder_test.go @@ -0,0 +1,7 @@ +package recording + +import "testing" + +func TestTransactionByLog(t *testing.T) { + +} From 20436a819a57f782ecfc53fcfc8bcb476b6fdd38 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Fri, 12 Dec 2025 14:25:38 -0600 Subject: [PATCH 03/42] add unit test for tx recorder --- arbnode/mel/recording/tx_recorder.go | 9 ++-- arbnode/mel/recording/tx_recorder_test.go | 64 ++++++++++++++++++++++- 2 files changed, 69 insertions(+), 4 deletions(-) diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index 0881c0f56a..2ed62bb91e 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -13,11 +13,14 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/triedb" - melrunner "github.com/offchainlabs/nitro/arbnode/mel/runner" ) +type BlockReader interface { + BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) +} + type TransactionRecorder struct { - parentChainReader melrunner.ParentChainReader + parentChainReader BlockReader parentChainBlockHash common.Hash preimages map[common.Hash][]byte txs []*types.Transaction @@ -25,7 +28,7 @@ type TransactionRecorder struct { } func NewTransactionRecorder( - parentChainReader melrunner.ParentChainReader, + parentChainReader BlockReader, parentChainBlockHash common.Hash, preimages map[common.Hash][]byte, ) *TransactionRecorder { diff --git a/arbnode/mel/recording/tx_recorder_test.go b/arbnode/mel/recording/tx_recorder_test.go index 4430b729c0..0d3182815d 100644 --- a/arbnode/mel/recording/tx_recorder_test.go +++ b/arbnode/mel/recording/tx_recorder_test.go @@ -1,7 +1,69 @@ package recording -import "testing" +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie" + "github.com/stretchr/testify/require" +) + +type mockBlockReader struct { + blocks map[common.Hash]*types.Block +} + +func (mbr *mockBlockReader) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + block, exists := mbr.blocks[hash] + if !exists { + return nil, nil + } + return block, nil +} func TestTransactionByLog(t *testing.T) { + ctx := context.Background() + toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") + blockHeader := &types.Header{} + txs := make([]*types.Transaction, 0) + for i := 1; i < 10; i++ { + txData := &types.DynamicFeeTx{ + To: &toAddr, + Nonce: 1, + GasFeeCap: big.NewInt(1), + GasTipCap: big.NewInt(1), + Gas: 1, + Value: big.NewInt(0), + Data: nil, + } + tx := types.NewTx(txData) + txs = append(txs, tx) + } + blockBody := &types.Body{ + Transactions: txs, + } + receipts := []*types.Receipt{} + block := types.NewBlock( + blockHeader, + blockBody, + receipts, + trie.NewStackTrie(nil), + ) + blockReader := &mockBlockReader{ + blocks: map[common.Hash]*types.Block{ + block.Hash(): block, + }, + } + preimages := make(map[common.Hash][]byte) + recorder := NewTransactionRecorder(blockReader, block.Hash(), preimages) + require.NoError(t, recorder.Initialize(ctx)) + log := &types.Log{ + TxIndex: 5, + } + tx, err := recorder.TransactionByLog(ctx, log) + require.NoError(t, err) + require.Equal(t, txs[5], tx) } From 6f3c0f9cac0a76c32bfca9b6f9bf2ae72970df57 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 23 Dec 2025 18:58:41 +0530 Subject: [PATCH 04/42] fix tx recorder --- arbnode/mel/recording/delayed_msg_database.go | 14 +- arbnode/mel/recording/tx_and_logs_database.go | 109 ++++++++++++++ arbnode/mel/recording/tx_recorder.go | 142 +++--------------- arbnode/mel/recording/tx_recorder_test.go | 15 +- cmd/mel-replay/delayed_message_db_test.go | 2 +- 5 files changed, 146 insertions(+), 136 deletions(-) create mode 100644 arbnode/mel/recording/tx_and_logs_database.go diff --git a/arbnode/mel/recording/delayed_msg_database.go b/arbnode/mel/recording/delayed_msg_database.go index 969b0f3f57..c6852b50d9 100644 --- a/arbnode/mel/recording/delayed_msg_database.go +++ b/arbnode/mel/recording/delayed_msg_database.go @@ -16,19 +16,19 @@ import ( "github.com/offchainlabs/nitro/arbos/merkleAccumulator" ) -// RecordingDatabase holds an ethdb.KeyValueStore that contains delayed messages stored by native MEL and implements DelayedMessageDatabase +// DelayedMsgDatabase holds an ethdb.KeyValueStore that contains delayed messages stored by native MEL and implements DelayedMessageDatabase // interface defined in 'mel'. It is solely used for recording of preimages relating to delayed messages needed for MEL validation -type RecordingDatabase struct { +type DelayedMsgDatabase struct { db ethdb.KeyValueStore preimages map[common.Hash][]byte initialized bool } -func NewRecordingDatabase(db ethdb.KeyValueStore) *RecordingDatabase { - return &RecordingDatabase{db, make(map[common.Hash][]byte), false} +func NewDelayedMsgDatabase(db ethdb.KeyValueStore) *DelayedMsgDatabase { + return &DelayedMsgDatabase{db, make(map[common.Hash][]byte), false} } -func (r *RecordingDatabase) initialize(ctx context.Context, state *mel.State) error { +func (r *DelayedMsgDatabase) initialize(ctx context.Context, state *mel.State) error { var acc *merkleAccumulator.MerkleAccumulator for i := state.ParentChainBlockNumber; i > 0; i-- { seenState, err := getState(ctx, r.db, i) @@ -83,9 +83,9 @@ func (r *RecordingDatabase) initialize(ctx context.Context, state *mel.State) er return nil } -func (r *RecordingDatabase) Preimages() map[common.Hash][]byte { return r.preimages } +func (r *DelayedMsgDatabase) Preimages() map[common.Hash][]byte { return r.preimages } -func (r *RecordingDatabase) ReadDelayedMessage(ctx context.Context, state *mel.State, index uint64) (*mel.DelayedInboxMessage, error) { +func (r *DelayedMsgDatabase) ReadDelayedMessage(ctx context.Context, state *mel.State, index uint64) (*mel.DelayedInboxMessage, error) { if index == 0 { // Init message // This message cannot be found in the database as it is supposed to be seen and read in the same block, so we persist that in DelayedMessageBacklog return state.GetDelayedMessageBacklog().GetInitMsg(), nil diff --git a/arbnode/mel/recording/tx_and_logs_database.go b/arbnode/mel/recording/tx_and_logs_database.go new file mode 100644 index 0000000000..4d683bcfbf --- /dev/null +++ b/arbnode/mel/recording/tx_and_logs_database.go @@ -0,0 +1,109 @@ +package melrecording + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/triedb" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" +) + +type TxAndLogsDatabase struct { + underlying *triedb.Database + recorder daprovider.PreimageRecorder +} + +func (rdb *TxAndLogsDatabase) Get(key []byte) ([]byte, error) { + hash := common.BytesToHash(key) + value, err := rdb.underlying.Node(hash) + if err != nil { + return nil, err + } + if rdb.recorder != nil { + rdb.recorder(hash, value, arbutil.Keccak256PreimageType) + } + + return value, nil +} +func (rdb *TxAndLogsDatabase) Has(key []byte) (bool, error) { + hash := common.BytesToHash(key) + _, err := rdb.underlying.Node(hash) + return err == nil, nil +} +func (rdb *TxAndLogsDatabase) Put(key []byte, value []byte) error { + return fmt.Errorf("Put not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) Delete(key []byte) error { + return fmt.Errorf("Delete not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) DeleteRange(start, end []byte) error { + return fmt.Errorf("DeleteRange not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { + return fmt.Errorf("ReadAncients not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { + return 0, fmt.Errorf("ReadAncients not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) SyncAncient() error { + return fmt.Errorf("SyncAncient not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) TruncateHead(n uint64) (uint64, error) { + return 0, fmt.Errorf("TruncateHead not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) TruncateTail(n uint64) (uint64, error) { + return 0, fmt.Errorf("TruncateTail not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) Append(kind string, number uint64, item interface{}) error { + return fmt.Errorf("Append not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) AppendRaw(kind string, number uint64, item []byte) error { + return fmt.Errorf("AppendRaw not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) AncientDatadir() (string, error) { + return "", fmt.Errorf("AncientDatadir not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) Ancient(kind string, number uint64) ([]byte, error) { + return nil, fmt.Errorf("Ancient not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { + return nil, fmt.Errorf("AncientRange not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { + return nil, fmt.Errorf("AncientBytes not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) Ancients() (uint64, error) { + return 0, fmt.Errorf("Ancients not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) Tail() (uint64, error) { + return 0, fmt.Errorf("Tail not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) AncientSize(kind string) (uint64, error) { + return 0, fmt.Errorf("AncientSize not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) Compact(start []byte, limit []byte) error { + return nil +} +func (rdb *TxAndLogsDatabase) SyncKeyValue() error { + return nil +} +func (rdb *TxAndLogsDatabase) Stat() (string, error) { + return "", nil +} +func (rdb *TxAndLogsDatabase) WasmDataBase() ethdb.KeyValueStore { + return nil +} +func (rdb *TxAndLogsDatabase) NewBatch() ethdb.Batch { + return nil +} +func (rdb *TxAndLogsDatabase) NewBatchWithSize(size int) ethdb.Batch { + return nil +} +func (rdb *TxAndLogsDatabase) NewIterator(prefix []byte, start []byte) ethdb.Iterator { + return nil +} +func (rdb *TxAndLogsDatabase) Close() error { + return nil +} diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index 2ed62bb91e..d02fa78867 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -1,4 +1,4 @@ -package recording +package melrecording import ( "context" @@ -6,13 +6,15 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/triedb" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" ) type BlockReader interface { @@ -22,15 +24,16 @@ type BlockReader interface { type TransactionRecorder struct { parentChainReader BlockReader parentChainBlockHash common.Hash - preimages map[common.Hash][]byte + preimages daprovider.PreimagesMap txs []*types.Transaction trieDB *triedb.Database + blockTxHash common.Hash } func NewTransactionRecorder( parentChainReader BlockReader, parentChainBlockHash common.Hash, - preimages map[common.Hash][]byte, + preimages daprovider.PreimagesMap, ) *TransactionRecorder { return &TransactionRecorder{ parentChainReader: parentChainReader, @@ -44,7 +47,7 @@ func (tr *TransactionRecorder) Initialize(ctx context.Context) error { if err != nil { return err } - tdb := triedb.NewDatabase(nil, &triedb.Config{ + tdb := triedb.NewDatabase(rawdb.NewMemoryDatabase(), &triedb.Config{ Preimages: true, }) txsTrie := trie.NewEmpty(tdb) @@ -77,6 +80,7 @@ func (tr *TransactionRecorder) Initialize(ctx context.Context) error { } tr.txs = txs tr.trieDB = tdb + tr.blockTxHash = root return nil } @@ -90,13 +94,12 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. if int(log.TxIndex) >= len(tr.txs) { return nil, fmt.Errorf("index out of range: %d", log.TxIndex) } - recorder := NewPreimageRecorder() - recordingDB := &RecordingDB{ + recordingDB := &TxAndLogsDatabase{ underlying: tr.trieDB, - recorder: recorder, + recorder: daprovider.RecordPreimagesTo(tr.preimages), // RecordingDB will record relevant preimages into tr.preimages } recordingTDB := triedb.NewDatabase(recordingDB, nil) - txsTrie, err := trie.New(trie.TrieID(log.TxHash), recordingTDB) + txsTrie, err := trie.New(trie.TrieID(tr.blockTxHash), recordingTDB) if err != nil { return nil, fmt.Errorf("failed to create trie: %w", err) } @@ -108,124 +111,17 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. if err != nil { return nil, fmt.Errorf("failed to get transaction from trie: %w", err) } - // Return the tx itself instead of nil, but also add the - // tx marshaled binary by hash to the preimages map. - tr.preimages[crypto.Keccak256Hash(txBytes)] = txBytes + // Return the tx itself instead of nil tx := new(types.Transaction) if err = tx.UnmarshalBinary(txBytes); err != nil { return nil, fmt.Errorf("failed to unmarshal transaction: %w", err) } - return tx, nil -} - -type PreimageRecorder struct { - preimages map[common.Hash][]byte -} - -func NewPreimageRecorder() *PreimageRecorder { - return &PreimageRecorder{ - preimages: make(map[common.Hash][]byte), + // Add the tx marshaled binary by hash to the preimages map + if _, ok := tr.preimages[arbutil.Keccak256PreimageType]; !ok { + tr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) } + tr.preimages[arbutil.Keccak256PreimageType][crypto.Keccak256Hash(txBytes)] = txBytes + return tx, nil } -func (pr *PreimageRecorder) GetPreimages() map[common.Hash][]byte { - return pr.preimages -} - -type RecordingDB struct { - underlying *triedb.Database - recorder *PreimageRecorder -} - -func (rdb *RecordingDB) Get(key []byte) ([]byte, error) { - hash := common.BytesToHash(key) - value, err := rdb.underlying.Node(hash) - if err != nil { - return nil, err - } - if rdb.recorder != nil { - rdb.recorder.preimages[hash] = value - } - - return value, nil -} -func (rdb *RecordingDB) Has(key []byte) (bool, error) { - hash := common.BytesToHash(key) - _, err := rdb.underlying.Node(hash) - return err == nil, nil -} -func (rdb *RecordingDB) Put(key []byte, value []byte) error { - return fmt.Errorf("Put not supported on recording DB") -} -func (rdb *RecordingDB) Delete(key []byte) error { - return fmt.Errorf("Delete not supported on recording DB") -} -func (rdb *RecordingDB) DeleteRange(start, end []byte) error { - return fmt.Errorf("DeleteRange not supported on recording DB") -} -func (rdb *RecordingDB) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { - return fmt.Errorf("ReadAncients not supported on recording DB") -} -func (rdb *RecordingDB) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { - return 0, fmt.Errorf("ReadAncients not supported on recording DB") -} -func (rdb *RecordingDB) SyncAncient() error { - return fmt.Errorf("SyncAncient not supported on recording DB") -} -func (rdb *RecordingDB) TruncateHead(n uint64) (uint64, error) { - return 0, fmt.Errorf("TruncateHead not supported on recording DB") -} -func (rdb *RecordingDB) TruncateTail(n uint64) (uint64, error) { - return 0, fmt.Errorf("TruncateTail not supported on recording DB") -} -func (rdb *RecordingDB) Append(kind string, number uint64, item interface{}) error { - return fmt.Errorf("Append not supported on recording DB") -} -func (rdb *RecordingDB) AppendRaw(kind string, number uint64, item []byte) error { - return fmt.Errorf("AppendRaw not supported on recording DB") -} -func (rdb *RecordingDB) AncientDatadir() (string, error) { - return "", fmt.Errorf("AncientDatadir not supported on recording DB") -} -func (rdb *RecordingDB) Ancient(kind string, number uint64) ([]byte, error) { - return nil, fmt.Errorf("Ancient not supported on recording DB") -} -func (rdb *RecordingDB) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { - return nil, fmt.Errorf("AncientRange not supported on recording DB") -} -func (rdb *RecordingDB) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { - return nil, fmt.Errorf("AncientBytes not supported on recording DB") -} -func (rdb *RecordingDB) Ancients() (uint64, error) { - return 0, fmt.Errorf("Ancients not supported on recording DB") -} -func (rdb *RecordingDB) Tail() (uint64, error) { - return 0, fmt.Errorf("Tail not supported on recording DB") -} -func (rdb *RecordingDB) AncientSize(kind string) (uint64, error) { - return 0, fmt.Errorf("AncientSize not supported on recording DB") -} -func (rdb *RecordingDB) Compact(start []byte, limit []byte) error { - return nil -} -func (rdb *RecordingDB) SyncKeyValue() error { - return nil -} -func (rdb *RecordingDB) Stat() (string, error) { - return "", nil -} -func (rdb *RecordingDB) WasmDataBase() ethdb.KeyValueStore { - return nil -} -func (rdb *RecordingDB) NewBatch() ethdb.Batch { - return nil -} -func (rdb *RecordingDB) NewBatchWithSize(size int) ethdb.Batch { - return nil -} -func (rdb *RecordingDB) NewIterator(prefix []byte, start []byte) ethdb.Iterator { - return nil -} -func (rdb *RecordingDB) Close() error { - return nil -} +func (tr *TransactionRecorder) GetPreimages() daprovider.PreimagesMap { return tr.preimages } diff --git a/arbnode/mel/recording/tx_recorder_test.go b/arbnode/mel/recording/tx_recorder_test.go index 0d3182815d..9acc84e506 100644 --- a/arbnode/mel/recording/tx_recorder_test.go +++ b/arbnode/mel/recording/tx_recorder_test.go @@ -1,4 +1,4 @@ -package recording +package melrecording import ( "context" @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/trie" + "github.com/offchainlabs/nitro/daprovider" "github.com/stretchr/testify/require" ) @@ -28,10 +29,10 @@ func TestTransactionByLog(t *testing.T) { toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") blockHeader := &types.Header{} txs := make([]*types.Transaction, 0) - for i := 1; i < 10; i++ { + for i := uint64(1); i < 10; i++ { txData := &types.DynamicFeeTx{ To: &toAddr, - Nonce: 1, + Nonce: i, GasFeeCap: big.NewInt(1), GasTipCap: big.NewInt(1), Gas: 1, @@ -56,7 +57,7 @@ func TestTransactionByLog(t *testing.T) { block.Hash(): block, }, } - preimages := make(map[common.Hash][]byte) + preimages := make(daprovider.PreimagesMap) recorder := NewTransactionRecorder(blockReader, block.Hash(), preimages) require.NoError(t, recorder.Initialize(ctx)) @@ -65,5 +66,9 @@ func TestTransactionByLog(t *testing.T) { } tx, err := recorder.TransactionByLog(ctx, log) require.NoError(t, err) - require.Equal(t, txs[5], tx) + have, err := tx.MarshalJSON() + require.NoError(t, err) + want, err := block.Transactions()[5].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) } diff --git a/cmd/mel-replay/delayed_message_db_test.go b/cmd/mel-replay/delayed_message_db_test.go index e19f8d1036..1e7b7c5660 100644 --- a/cmd/mel-replay/delayed_message_db_test.go +++ b/cmd/mel-replay/delayed_message_db_test.go @@ -70,7 +70,7 @@ func TestRecordingPreimagesForReadDelayedMessage(t *testing.T) { require.NoError(t, state.GenerateDelayedMessagesSeenMerklePartialsAndRoot()) require.NoError(t, melDB.SaveState(ctx, state)) - recordingDB := melrecording.NewRecordingDatabase(db) + recordingDB := melrecording.NewDelayedMsgDatabase(db) for i := startBlockNum; i < numMsgs; i++ { require.NoError(t, state.AccumulateDelayedMessage(delayedMessages[i])) state.DelayedMessagesSeen++ From 02259cf2cb76b863438820a3d85d9843f0bcddc7 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 23 Dec 2025 22:46:21 +0530 Subject: [PATCH 05/42] add changelog and fix lint --- arbnode/mel/recording/tx_and_logs_database.go | 1 + arbnode/mel/recording/tx_recorder.go | 3 +++ arbnode/mel/recording/tx_recorder_test.go | 4 +++- changelog/rauljordan-nit-4254.md | 2 ++ 4 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelog/rauljordan-nit-4254.md diff --git a/arbnode/mel/recording/tx_and_logs_database.go b/arbnode/mel/recording/tx_and_logs_database.go index 4d683bcfbf..e6752d928c 100644 --- a/arbnode/mel/recording/tx_and_logs_database.go +++ b/arbnode/mel/recording/tx_and_logs_database.go @@ -6,6 +6,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/triedb" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" ) diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index d02fa78867..ebe53c3b3b 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/triedb" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" ) @@ -53,6 +54,7 @@ func (tr *TransactionRecorder) Initialize(ctx context.Context) error { txsTrie := trie.NewEmpty(tdb) txs := block.Body().Transactions for i, tx := range txs { + // #nosec G115 indexBytes, err := rlp.EncodeToBytes(uint64(i)) if err != nil { return fmt.Errorf("failed to encode index %d: %w", i, err) @@ -91,6 +93,7 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. if log == nil { return nil, errors.New("transactionByLog got nil log value") } + // #nosec G115 if int(log.TxIndex) >= len(tr.txs) { return nil, fmt.Errorf("index out of range: %d", log.TxIndex) } diff --git a/arbnode/mel/recording/tx_recorder_test.go b/arbnode/mel/recording/tx_recorder_test.go index 9acc84e506..88824bb859 100644 --- a/arbnode/mel/recording/tx_recorder_test.go +++ b/arbnode/mel/recording/tx_recorder_test.go @@ -5,11 +5,13 @@ import ( "math/big" "testing" + "github.com/stretchr/testify/require" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/trie" + "github.com/offchainlabs/nitro/daprovider" - "github.com/stretchr/testify/require" ) type mockBlockReader struct { diff --git a/changelog/rauljordan-nit-4254.md b/changelog/rauljordan-nit-4254.md new file mode 100644 index 0000000000..702ec9b01e --- /dev/null +++ b/changelog/rauljordan-nit-4254.md @@ -0,0 +1,2 @@ +### Added + - Implement recording of txs for MEL validation \ No newline at end of file From a4318bdcd2c2f937121975f2a136a47c8513e50a Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 30 Dec 2025 17:41:36 +0530 Subject: [PATCH 06/42] Implement receipt recorder for mel validation --- arbnode/mel/recording/receipt_recorder.go | 150 ++++++++++++++++++ .../mel/recording/receipt_recorder_test.go | 81 ++++++++++ arbnode/mel/recording/tx_recorder.go | 1 + arbnode/mel/recording/tx_recorder_test.go | 11 +- 4 files changed, 242 insertions(+), 1 deletion(-) create mode 100644 arbnode/mel/recording/receipt_recorder.go create mode 100644 arbnode/mel/recording/receipt_recorder_test.go diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go new file mode 100644 index 0000000000..08bfb3b23b --- /dev/null +++ b/arbnode/mel/recording/receipt_recorder.go @@ -0,0 +1,150 @@ +package melrecording + +import ( + "context" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/triedb" + + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" +) + +type ReceiptRecorder struct { + parentChainReader BlockReader + parentChainBlockHash common.Hash + preimages daprovider.PreimagesMap + receipts []*types.Receipt + logs []*types.Log + trieDB *triedb.Database + blockReceiptHash common.Hash +} + +func NewReceiptRecorder( + parentChainReader BlockReader, + parentChainBlockHash common.Hash, + preimages daprovider.PreimagesMap, +) *ReceiptRecorder { + return &ReceiptRecorder{ + parentChainReader: parentChainReader, + parentChainBlockHash: parentChainBlockHash, + preimages: preimages, + } +} + +func (lr *ReceiptRecorder) Initialize(ctx context.Context) error { + block, err := lr.parentChainReader.BlockByHash(ctx, lr.parentChainBlockHash) + if err != nil { + return err + } + tdb := triedb.NewDatabase(rawdb.NewMemoryDatabase(), &triedb.Config{ + Preimages: true, + }) + receiptsTrie := trie.NewEmpty(tdb) + var receipts []*types.Receipt + txs := block.Body().Transactions + for i, tx := range txs { + receipt, err := lr.parentChainReader.TransactionReceipt(ctx, tx.Hash()) + if err != nil { + return fmt.Errorf("error fetching receipt for tx: %v", tx.Hash()) + } + receipts = append(receipts, receipt) + lr.logs = append(lr.logs, receipt.Logs...) + // #nosec G115 + indexBytes, err := rlp.EncodeToBytes(uint64(i)) + if err != nil { + return fmt.Errorf("failed to encode index %d: %w", i, err) + } + receiptBytes, err := receipt.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to marshal receipt %d: %w", i, err) + } + if err := receiptsTrie.Update(indexBytes, receiptBytes); err != nil { + return fmt.Errorf("failed to update trie at index %d: %w", i, err) + } + } + root, nodes := receiptsTrie.Commit(false) + if root != block.ReceiptHash() { + return fmt.Errorf("computed root %s doesn't match header root %s", + root.Hex(), block.ReceiptHash().Hex()) + } + if nodes != nil { + if err := tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { + return fmt.Errorf("failed to commit trie nodes: %w", err) + } + } + if err := tdb.Commit(root, false); err != nil { + return fmt.Errorf("failed to commit database: %w", err) + } + lr.receipts = receipts + lr.trieDB = tdb + lr.blockReceiptHash = root + return nil +} + +func (lr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockHash common.Hash, txIndex uint) ([]*types.Log, error) { + if lr.trieDB == nil { + return nil, errors.New("TransactionRecorder not initialized") + } + if lr.parentChainBlockHash != parentChainBlockHash { + return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", lr.parentChainBlockHash, parentChainBlockHash) + } + // #nosec G115 + if int(txIndex) >= len(lr.receipts) { + return nil, fmt.Errorf("index out of range: %d", txIndex) + } + recordingDB := &TxAndLogsDatabase{ + underlying: lr.trieDB, + recorder: daprovider.RecordPreimagesTo(lr.preimages), // RecordingDB will record relevant preimages into tr.preimages + } + recordingTDB := triedb.NewDatabase(recordingDB, nil) + receiptsTrie, err := trie.New(trie.TrieID(lr.blockReceiptHash), recordingTDB) + if err != nil { + return nil, fmt.Errorf("failed to create trie: %w", err) + } + indexBytes, err := rlp.EncodeToBytes(txIndex) + if err != nil { + return nil, fmt.Errorf("failed to encode index: %w", err) + } + receiptBytes, err := receiptsTrie.Get(indexBytes) + if err != nil { + return nil, fmt.Errorf("failed to get receipt from trie: %w", err) + } + receipt := new(types.Receipt) + if err = receipt.UnmarshalBinary(receiptBytes); err != nil { + return nil, fmt.Errorf("failed to unmarshal receipt: %w", err) + } + // Add the receipt marshaled binary by hash to the preimages map + if _, ok := lr.preimages[arbutil.Keccak256PreimageType]; !ok { + lr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) + } + lr.preimages[arbutil.Keccak256PreimageType][crypto.Keccak256Hash(receiptBytes)] = receiptBytes + // Fill in the TxIndex (give as input to this method) into the logs so that Tx recording + // is possible. This field is one of the derived fields of Log hence won't be stored in trie. + // + // We use this same trick in validation as well in order to link a tx with its logs + for _, log := range receipt.Logs { + log.TxIndex = txIndex + } + return receipt.Logs, nil +} + +func (lr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { + if lr.trieDB == nil { + return nil, errors.New("TransactionRecorder not initialized") + } + if lr.parentChainBlockHash == parentChainBlockHash { + return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", lr.parentChainBlockHash, parentChainBlockHash) + } + return lr.logs, nil +} + +func (tr *ReceiptRecorder) GetPreimages() daprovider.PreimagesMap { return tr.preimages } diff --git a/arbnode/mel/recording/receipt_recorder_test.go b/arbnode/mel/recording/receipt_recorder_test.go new file mode 100644 index 0000000000..6fefb8ebc9 --- /dev/null +++ b/arbnode/mel/recording/receipt_recorder_test.go @@ -0,0 +1,81 @@ +package melrecording + +import ( + "context" + "fmt" + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie" + + "github.com/offchainlabs/nitro/daprovider" +) + +func TestLogsForTxIndex(t *testing.T) { + ctx := context.Background() + blockReader := &mockBlockReader{ + blocks: make(map[common.Hash]*types.Block), + receiptByTxHash: map[common.Hash]*types.Receipt{}, + } + toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") + blockHeader := &types.Header{} + receipts := []*types.Receipt{} + txs := make([]*types.Transaction, 0) + for i := uint64(1); i < 10; i++ { + txData := &types.DynamicFeeTx{ + To: &toAddr, + Nonce: i, + GasFeeCap: big.NewInt(1), + GasTipCap: big.NewInt(1), + Gas: 1, + Value: big.NewInt(0), + Data: nil, + } + tx := types.NewTx(txData) + txs = append(txs, tx) + receipt := &types.Receipt{ + TxHash: tx.Hash(), + TransactionIndex: uint(i - 1), + Type: types.DynamicFeeTxType, + Logs: []*types.Log{ + { + // Consensus fields: + Address: common.HexToAddress("sample"), + Topics: []common.Hash{common.HexToHash("topic1"), common.HexToHash("topic2")}, + Data: common.Hex2Bytes(fmt.Sprintf("data:%d", i)), + + // Derived Fields: + TxIndex: uint(i - 1), + }, + }, + } + receipts = append(receipts, receipt) + blockReader.receiptByTxHash[tx.Hash()] = receipt + } + blockBody := &types.Body{ + Transactions: txs, + } + block := types.NewBlock( + blockHeader, + blockBody, + receipts, + trie.NewStackTrie(nil), + ) + blockReader.blocks[block.Hash()] = block + preimages := make(daprovider.PreimagesMap) + recorder := NewReceiptRecorder(blockReader, block.Hash(), preimages) + require.NoError(t, recorder.Initialize(ctx)) + + txIndex := uint(3) + logs, err := recorder.LogsForTxIndex(ctx, block.Hash(), txIndex) + require.NoError(t, err) + have, err := logs[0].MarshalJSON() + require.NoError(t, err) + want, err := receipts[txIndex].Logs[0].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) +} diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index ebe53c3b3b..3d14eb1da4 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -20,6 +20,7 @@ import ( type BlockReader interface { BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) + TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) } type TransactionRecorder struct { diff --git a/arbnode/mel/recording/tx_recorder_test.go b/arbnode/mel/recording/tx_recorder_test.go index 88824bb859..83582aed48 100644 --- a/arbnode/mel/recording/tx_recorder_test.go +++ b/arbnode/mel/recording/tx_recorder_test.go @@ -15,7 +15,8 @@ import ( ) type mockBlockReader struct { - blocks map[common.Hash]*types.Block + blocks map[common.Hash]*types.Block + receiptByTxHash map[common.Hash]*types.Receipt } func (mbr *mockBlockReader) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { @@ -26,6 +27,14 @@ func (mbr *mockBlockReader) BlockByHash(ctx context.Context, hash common.Hash) ( return block, nil } +func (mbr *mockBlockReader) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { + receipt, exists := mbr.receiptByTxHash[txHash] + if !exists { + return nil, nil + } + return receipt, nil +} + func TestTransactionByLog(t *testing.T) { ctx := context.Background() toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") From b51c2397e6312d6a3679c53661340a60bc1534e3 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 30 Dec 2025 17:50:46 +0530 Subject: [PATCH 07/42] code refactor --- arbnode/mel/recording/receipt_recorder.go | 2 +- arbnode/mel/recording/tx_and_logs_database.go | 110 ------------------ arbnode/mel/recording/tx_recorder.go | 2 +- .../recording/txs_and_receipts_database.go | 110 ++++++++++++++++++ 4 files changed, 112 insertions(+), 112 deletions(-) delete mode 100644 arbnode/mel/recording/tx_and_logs_database.go create mode 100644 arbnode/mel/recording/txs_and_receipts_database.go diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index 08bfb3b23b..5cc15613c1 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -101,7 +101,7 @@ func (lr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH if int(txIndex) >= len(lr.receipts) { return nil, fmt.Errorf("index out of range: %d", txIndex) } - recordingDB := &TxAndLogsDatabase{ + recordingDB := &TxsAndReceiptsDatabase{ underlying: lr.trieDB, recorder: daprovider.RecordPreimagesTo(lr.preimages), // RecordingDB will record relevant preimages into tr.preimages } diff --git a/arbnode/mel/recording/tx_and_logs_database.go b/arbnode/mel/recording/tx_and_logs_database.go deleted file mode 100644 index e6752d928c..0000000000 --- a/arbnode/mel/recording/tx_and_logs_database.go +++ /dev/null @@ -1,110 +0,0 @@ -package melrecording - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/triedb" - - "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/daprovider" -) - -type TxAndLogsDatabase struct { - underlying *triedb.Database - recorder daprovider.PreimageRecorder -} - -func (rdb *TxAndLogsDatabase) Get(key []byte) ([]byte, error) { - hash := common.BytesToHash(key) - value, err := rdb.underlying.Node(hash) - if err != nil { - return nil, err - } - if rdb.recorder != nil { - rdb.recorder(hash, value, arbutil.Keccak256PreimageType) - } - - return value, nil -} -func (rdb *TxAndLogsDatabase) Has(key []byte) (bool, error) { - hash := common.BytesToHash(key) - _, err := rdb.underlying.Node(hash) - return err == nil, nil -} -func (rdb *TxAndLogsDatabase) Put(key []byte, value []byte) error { - return fmt.Errorf("Put not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) Delete(key []byte) error { - return fmt.Errorf("Delete not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) DeleteRange(start, end []byte) error { - return fmt.Errorf("DeleteRange not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { - return fmt.Errorf("ReadAncients not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { - return 0, fmt.Errorf("ReadAncients not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) SyncAncient() error { - return fmt.Errorf("SyncAncient not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) TruncateHead(n uint64) (uint64, error) { - return 0, fmt.Errorf("TruncateHead not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) TruncateTail(n uint64) (uint64, error) { - return 0, fmt.Errorf("TruncateTail not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) Append(kind string, number uint64, item interface{}) error { - return fmt.Errorf("Append not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) AppendRaw(kind string, number uint64, item []byte) error { - return fmt.Errorf("AppendRaw not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) AncientDatadir() (string, error) { - return "", fmt.Errorf("AncientDatadir not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) Ancient(kind string, number uint64) ([]byte, error) { - return nil, fmt.Errorf("Ancient not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { - return nil, fmt.Errorf("AncientRange not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { - return nil, fmt.Errorf("AncientBytes not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) Ancients() (uint64, error) { - return 0, fmt.Errorf("Ancients not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) Tail() (uint64, error) { - return 0, fmt.Errorf("Tail not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) AncientSize(kind string) (uint64, error) { - return 0, fmt.Errorf("AncientSize not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) Compact(start []byte, limit []byte) error { - return nil -} -func (rdb *TxAndLogsDatabase) SyncKeyValue() error { - return nil -} -func (rdb *TxAndLogsDatabase) Stat() (string, error) { - return "", nil -} -func (rdb *TxAndLogsDatabase) WasmDataBase() ethdb.KeyValueStore { - return nil -} -func (rdb *TxAndLogsDatabase) NewBatch() ethdb.Batch { - return nil -} -func (rdb *TxAndLogsDatabase) NewBatchWithSize(size int) ethdb.Batch { - return nil -} -func (rdb *TxAndLogsDatabase) NewIterator(prefix []byte, start []byte) ethdb.Iterator { - return nil -} -func (rdb *TxAndLogsDatabase) Close() error { - return nil -} diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index 3d14eb1da4..56472cc457 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -98,7 +98,7 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. if int(log.TxIndex) >= len(tr.txs) { return nil, fmt.Errorf("index out of range: %d", log.TxIndex) } - recordingDB := &TxAndLogsDatabase{ + recordingDB := &TxsAndReceiptsDatabase{ underlying: tr.trieDB, recorder: daprovider.RecordPreimagesTo(tr.preimages), // RecordingDB will record relevant preimages into tr.preimages } diff --git a/arbnode/mel/recording/txs_and_receipts_database.go b/arbnode/mel/recording/txs_and_receipts_database.go new file mode 100644 index 0000000000..1c5aaaef2b --- /dev/null +++ b/arbnode/mel/recording/txs_and_receipts_database.go @@ -0,0 +1,110 @@ +package melrecording + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/triedb" + + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" +) + +type TxsAndReceiptsDatabase struct { + underlying *triedb.Database + recorder daprovider.PreimageRecorder +} + +func (rdb *TxsAndReceiptsDatabase) Get(key []byte) ([]byte, error) { + hash := common.BytesToHash(key) + value, err := rdb.underlying.Node(hash) + if err != nil { + return nil, err + } + if rdb.recorder != nil { + rdb.recorder(hash, value, arbutil.Keccak256PreimageType) + } + + return value, nil +} +func (rdb *TxsAndReceiptsDatabase) Has(key []byte) (bool, error) { + hash := common.BytesToHash(key) + _, err := rdb.underlying.Node(hash) + return err == nil, nil +} +func (rdb *TxsAndReceiptsDatabase) Put(key []byte, value []byte) error { + return fmt.Errorf("Put not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) Delete(key []byte) error { + return fmt.Errorf("Delete not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) DeleteRange(start, end []byte) error { + return fmt.Errorf("DeleteRange not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { + return fmt.Errorf("ReadAncients not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { + return 0, fmt.Errorf("ReadAncients not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) SyncAncient() error { + return fmt.Errorf("SyncAncient not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) TruncateHead(n uint64) (uint64, error) { + return 0, fmt.Errorf("TruncateHead not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) TruncateTail(n uint64) (uint64, error) { + return 0, fmt.Errorf("TruncateTail not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) Append(kind string, number uint64, item interface{}) error { + return fmt.Errorf("Append not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) AppendRaw(kind string, number uint64, item []byte) error { + return fmt.Errorf("AppendRaw not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) AncientDatadir() (string, error) { + return "", fmt.Errorf("AncientDatadir not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) Ancient(kind string, number uint64) ([]byte, error) { + return nil, fmt.Errorf("Ancient not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { + return nil, fmt.Errorf("AncientRange not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { + return nil, fmt.Errorf("AncientBytes not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) Ancients() (uint64, error) { + return 0, fmt.Errorf("Ancients not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) Tail() (uint64, error) { + return 0, fmt.Errorf("Tail not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) AncientSize(kind string) (uint64, error) { + return 0, fmt.Errorf("AncientSize not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) Compact(start []byte, limit []byte) error { + return nil +} +func (rdb *TxsAndReceiptsDatabase) SyncKeyValue() error { + return nil +} +func (rdb *TxsAndReceiptsDatabase) Stat() (string, error) { + return "", nil +} +func (rdb *TxsAndReceiptsDatabase) WasmDataBase() ethdb.KeyValueStore { + return nil +} +func (rdb *TxsAndReceiptsDatabase) NewBatch() ethdb.Batch { + return nil +} +func (rdb *TxsAndReceiptsDatabase) NewBatchWithSize(size int) ethdb.Batch { + return nil +} +func (rdb *TxsAndReceiptsDatabase) NewIterator(prefix []byte, start []byte) ethdb.Iterator { + return nil +} +func (rdb *TxsAndReceiptsDatabase) Close() error { + return nil +} From 20ea831f5cd76f894c867375f70f2012e95e94f9 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 2 Jan 2026 19:22:00 +0530 Subject: [PATCH 08/42] Make tx and receipt fetcher in mel-replay to work with recorded preimages --- arbnode/mel/recording/receipt_recorder.go | 32 ++- .../mel/recording/receipt_recorder_test.go | 81 ------- arbnode/mel/recording/tx_recorder.go | 3 +- arbnode/mel/recording/tx_recorder_test.go | 85 ------- changelog/ganeshvanahalli-nit-4276.md | 3 + cmd/mel-replay/receipt_fetcher.go | 223 +++++++++--------- cmd/mel-replay/receipt_fetcher_test.go | 123 ---------- ...ceipt_recorder_and_receipt_fetcher_test.go | 133 +++++++++++ cmd/mel-replay/trie_fetcher.go | 147 ++++++++++++ cmd/mel-replay/tx_fetcher.go | 23 ++ .../tx_recorder_and_tx_fetcher_test.go | 105 +++++++++ cmd/mel-replay/txs_fetcher.go | 90 ------- cmd/mel-replay/txs_fetcher_test.go | 77 ------ 13 files changed, 539 insertions(+), 586 deletions(-) delete mode 100644 arbnode/mel/recording/receipt_recorder_test.go delete mode 100644 arbnode/mel/recording/tx_recorder_test.go create mode 100644 changelog/ganeshvanahalli-nit-4276.md delete mode 100644 cmd/mel-replay/receipt_fetcher_test.go create mode 100644 cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go create mode 100644 cmd/mel-replay/trie_fetcher.go create mode 100644 cmd/mel-replay/tx_fetcher.go create mode 100644 cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go delete mode 100644 cmd/mel-replay/txs_fetcher.go delete mode 100644 cmd/mel-replay/txs_fetcher_test.go diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index 5cc15613c1..d7c8e003bd 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -1,6 +1,7 @@ package melrecording import ( + "bytes" "context" "errors" "fmt" @@ -18,25 +19,28 @@ import ( "github.com/offchainlabs/nitro/daprovider" ) +// maps to an array of uints representing the relevant txIndexes of receipts needed for message extraction +var RELEVANT_LOGS_TXINDEXES_KEY common.Hash = common.HexToHash("123534") + type ReceiptRecorder struct { - parentChainReader BlockReader - parentChainBlockHash common.Hash - preimages daprovider.PreimagesMap - receipts []*types.Receipt - logs []*types.Log - trieDB *triedb.Database - blockReceiptHash common.Hash + parentChainReader BlockReader + parentChainBlockHash common.Hash + preimages daprovider.PreimagesMap + receipts []*types.Receipt + logs []*types.Log + relevantLogsTxIndexes []uint + trieDB *triedb.Database + blockReceiptHash common.Hash } func NewReceiptRecorder( parentChainReader BlockReader, parentChainBlockHash common.Hash, - preimages daprovider.PreimagesMap, ) *ReceiptRecorder { return &ReceiptRecorder{ parentChainReader: parentChainReader, parentChainBlockHash: parentChainBlockHash, - preimages: preimages, + preimages: make(daprovider.PreimagesMap), } } @@ -134,6 +138,7 @@ func (lr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH for _, log := range receipt.Logs { log.TxIndex = txIndex } + lr.relevantLogsTxIndexes = append(lr.relevantLogsTxIndexes, txIndex) return receipt.Logs, nil } @@ -147,4 +152,11 @@ func (lr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBloc return lr.logs, nil } -func (tr *ReceiptRecorder) GetPreimages() daprovider.PreimagesMap { return tr.preimages } +func (tr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { + var buf bytes.Buffer + if err := rlp.Encode(&buf, tr.relevantLogsTxIndexes); err != nil { + return nil, err + } + tr.preimages[arbutil.Keccak256PreimageType][RELEVANT_LOGS_TXINDEXES_KEY] = buf.Bytes() + return tr.preimages, nil +} diff --git a/arbnode/mel/recording/receipt_recorder_test.go b/arbnode/mel/recording/receipt_recorder_test.go deleted file mode 100644 index 6fefb8ebc9..0000000000 --- a/arbnode/mel/recording/receipt_recorder_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package melrecording - -import ( - "context" - "fmt" - "math/big" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/trie" - - "github.com/offchainlabs/nitro/daprovider" -) - -func TestLogsForTxIndex(t *testing.T) { - ctx := context.Background() - blockReader := &mockBlockReader{ - blocks: make(map[common.Hash]*types.Block), - receiptByTxHash: map[common.Hash]*types.Receipt{}, - } - toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") - blockHeader := &types.Header{} - receipts := []*types.Receipt{} - txs := make([]*types.Transaction, 0) - for i := uint64(1); i < 10; i++ { - txData := &types.DynamicFeeTx{ - To: &toAddr, - Nonce: i, - GasFeeCap: big.NewInt(1), - GasTipCap: big.NewInt(1), - Gas: 1, - Value: big.NewInt(0), - Data: nil, - } - tx := types.NewTx(txData) - txs = append(txs, tx) - receipt := &types.Receipt{ - TxHash: tx.Hash(), - TransactionIndex: uint(i - 1), - Type: types.DynamicFeeTxType, - Logs: []*types.Log{ - { - // Consensus fields: - Address: common.HexToAddress("sample"), - Topics: []common.Hash{common.HexToHash("topic1"), common.HexToHash("topic2")}, - Data: common.Hex2Bytes(fmt.Sprintf("data:%d", i)), - - // Derived Fields: - TxIndex: uint(i - 1), - }, - }, - } - receipts = append(receipts, receipt) - blockReader.receiptByTxHash[tx.Hash()] = receipt - } - blockBody := &types.Body{ - Transactions: txs, - } - block := types.NewBlock( - blockHeader, - blockBody, - receipts, - trie.NewStackTrie(nil), - ) - blockReader.blocks[block.Hash()] = block - preimages := make(daprovider.PreimagesMap) - recorder := NewReceiptRecorder(blockReader, block.Hash(), preimages) - require.NoError(t, recorder.Initialize(ctx)) - - txIndex := uint(3) - logs, err := recorder.LogsForTxIndex(ctx, block.Hash(), txIndex) - require.NoError(t, err) - have, err := logs[0].MarshalJSON() - require.NoError(t, err) - want, err := receipts[txIndex].Logs[0].MarshalJSON() - require.NoError(t, err) - require.Equal(t, want, have) -} diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index 56472cc457..5688b3a5af 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -35,12 +35,11 @@ type TransactionRecorder struct { func NewTransactionRecorder( parentChainReader BlockReader, parentChainBlockHash common.Hash, - preimages daprovider.PreimagesMap, ) *TransactionRecorder { return &TransactionRecorder{ parentChainReader: parentChainReader, parentChainBlockHash: parentChainBlockHash, - preimages: preimages, + preimages: make(daprovider.PreimagesMap), } } diff --git a/arbnode/mel/recording/tx_recorder_test.go b/arbnode/mel/recording/tx_recorder_test.go deleted file mode 100644 index 83582aed48..0000000000 --- a/arbnode/mel/recording/tx_recorder_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package melrecording - -import ( - "context" - "math/big" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/trie" - - "github.com/offchainlabs/nitro/daprovider" -) - -type mockBlockReader struct { - blocks map[common.Hash]*types.Block - receiptByTxHash map[common.Hash]*types.Receipt -} - -func (mbr *mockBlockReader) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - block, exists := mbr.blocks[hash] - if !exists { - return nil, nil - } - return block, nil -} - -func (mbr *mockBlockReader) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { - receipt, exists := mbr.receiptByTxHash[txHash] - if !exists { - return nil, nil - } - return receipt, nil -} - -func TestTransactionByLog(t *testing.T) { - ctx := context.Background() - toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") - blockHeader := &types.Header{} - txs := make([]*types.Transaction, 0) - for i := uint64(1); i < 10; i++ { - txData := &types.DynamicFeeTx{ - To: &toAddr, - Nonce: i, - GasFeeCap: big.NewInt(1), - GasTipCap: big.NewInt(1), - Gas: 1, - Value: big.NewInt(0), - Data: nil, - } - tx := types.NewTx(txData) - txs = append(txs, tx) - } - blockBody := &types.Body{ - Transactions: txs, - } - receipts := []*types.Receipt{} - block := types.NewBlock( - blockHeader, - blockBody, - receipts, - trie.NewStackTrie(nil), - ) - blockReader := &mockBlockReader{ - blocks: map[common.Hash]*types.Block{ - block.Hash(): block, - }, - } - preimages := make(daprovider.PreimagesMap) - recorder := NewTransactionRecorder(blockReader, block.Hash(), preimages) - require.NoError(t, recorder.Initialize(ctx)) - - log := &types.Log{ - TxIndex: 5, - } - tx, err := recorder.TransactionByLog(ctx, log) - require.NoError(t, err) - have, err := tx.MarshalJSON() - require.NoError(t, err) - want, err := block.Transactions()[5].MarshalJSON() - require.NoError(t, err) - require.Equal(t, want, have) -} diff --git a/changelog/ganeshvanahalli-nit-4276.md b/changelog/ganeshvanahalli-nit-4276.md new file mode 100644 index 0000000000..ae0080656a --- /dev/null +++ b/changelog/ganeshvanahalli-nit-4276.md @@ -0,0 +1,3 @@ +### Fixed +- Update implementation of receipts and txs fetching in mel-replay +- Added testing for recording and fetching of logs and txs needed for MEL validation \ No newline at end of file diff --git a/cmd/mel-replay/receipt_fetcher.go b/cmd/mel-replay/receipt_fetcher.go index d8c499ea36..64ce26633f 100644 --- a/cmd/mel-replay/receipt_fetcher.go +++ b/cmd/mel-replay/receipt_fetcher.go @@ -4,162 +4,149 @@ package main import ( - "bytes" "context" "errors" "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/triedb" "github.com/offchainlabs/nitro/arbutil" ) +// maps to an array of uints representing the relevant txIndexes of receipts needed for message extraction +var RELEVANT_LOGS_TXINDEXES_KEY common.Hash = common.HexToHash("123534") + type receiptFetcherForBlock struct { header *types.Header preimageResolver preimageResolver } -// ReceiptForTransactionIndex fetches a receipt for a specific transaction index by walking +// LogsForTxIndex fetches logs for a specific transaction index by walking // the receipt trie of the block header. It uses the preimage resolver to fetch the preimages // of the trie nodes as needed. -func (rf *receiptFetcherForBlock) ReceiptForTransactionIndex( - ctx context.Context, - txIndex uint, -) (*types.Receipt, error) { - return fetchReceiptFromBlock(rf.header.ReceiptHash, txIndex, rf.preimageResolver) +func (rf *receiptFetcherForBlock) LogsForTxIndex(ctx context.Context, parentChainBlockHash common.Hash, txIndex uint) ([]*types.Log, error) { + if rf.header.Hash() != parentChainBlockHash { + return nil, errors.New("parentChainBlockHash mismatch") + } + receipt, err := fetchObjectFromTrie[types.Receipt](rf.header.ReceiptHash, txIndex, rf.preimageResolver) + if err != nil { + return nil, err + } + // This is needed to enable fetching corresponding tx from the txFetcher + for _, log := range receipt.Logs { + log.TxIndex = txIndex + } + return receipt.Logs, nil } -// Fetches a specific receipt index from a block's receipt trie by navigating its -// Merkle Patricia Trie structure. It uses the preimage resolver to fetch preimages -// of trie nodes as needed, and determines how to navigate depending on the structure of the trie nodes. -func fetchReceiptFromBlock( - receiptsRoot common.Hash, - receiptIndex uint, - preimageResolver preimageResolver, -) (*types.Receipt, error) { - currentNodeHash := receiptsRoot - currentPath := []byte{} // Track nibbles consumed so far. - receiptKey, err := rlp.EncodeToBytes(receiptIndex) +// LogsForBlockHash first gets the txIndexes corresponding to the relevant logs by reading +// RELEVANT_LOGS_TXINDEXES_KEY from the preimages and then fetches logs for each of these txIndexes +func (rf *receiptFetcherForBlock) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { + if rf.header.Hash() != parentChainBlockHash { + return nil, errors.New("parentChainBlockHash mismatch") + } + txIndexData, err := rf.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, RELEVANT_LOGS_TXINDEXES_KEY) if err != nil { return nil, err } - targetNibbles := keyToNibbles(receiptKey) - for { - nodeData, err := preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentNodeHash) + var txIndexes []uint + if err := rlp.DecodeBytes(txIndexData, &txIndexes); err != nil { + return nil, err + } + var relevantLogs []*types.Log + for _, txIndex := range txIndexes { + logs, err := rf.LogsForTxIndex(ctx, parentChainBlockHash, txIndex) if err != nil { return nil, err } - var node []any - if err = rlp.DecodeBytes(nodeData, &node); err != nil { - return nil, fmt.Errorf("failed to decode RLP node: %w", err) - } - switch len(node) { - case 17: - // We hit a branch node, which has 16 children and a value. - if len(currentPath) == len(targetNibbles) { - // A branch node's 17th item could be the value, so we check if it contains the receipt. - if valueBytes, ok := node[16].([]byte); ok && len(valueBytes) > 0 { - // This branch node has the actual value as the last item, so we decode the receipt - return decodeReceipt(valueBytes) - } - return nil, fmt.Errorf("no receipt found at target key") - } - // Get the next nibble to follow. - targetNibble := targetNibbles[len(currentPath)] - childData, ok := node[targetNibble].([]byte) - if !ok || len(childData) == 0 { - return nil, fmt.Errorf("no child at nibble %d", targetNibble) - } - // Move to the child node, which is the next hash we have to navigate. - currentNodeHash = common.BytesToHash(childData) - currentPath = append(currentPath, targetNibble) - case 2: - keyPath, ok := node[0].([]byte) - if !ok { - return nil, fmt.Errorf("invalid key path in node") - } - key := extractKeyNibbles(keyPath) - expectedPath := make([]byte, 0) - expectedPath = append(expectedPath, currentPath...) - expectedPath = append(expectedPath, key...) - - // Check if it is a leaf or extension node. - leaf, err := isLeaf(keyPath) - if err != nil { - return nil, err - } - if leaf { - // Check that the keyPath matches the target nibbles, - // otherwise, the receipt does not exist in the trie. - if !bytes.Equal(expectedPath, targetNibbles) { - return nil, fmt.Errorf("leaf key does not match target nibbles") - } - rawData, ok := node[1].([]byte) - if !ok { - return nil, fmt.Errorf("invalid receipt data in leaf node") - } - return decodeReceipt(rawData) - } - // If the node is not a leaf node, it is an extension node. - // Check if our target key matches this extension path. - if len(expectedPath) > len(targetNibbles) || !bytes.Equal(expectedPath, targetNibbles[:len(expectedPath)]) { - return nil, fmt.Errorf("extension path mismatch") - } - nextNodeBytes, ok := node[1].([]byte) - if !ok { - return nil, fmt.Errorf("invalid next node in extension") - } - // We navigate to the next node in the trie. - currentNodeHash = common.BytesToHash(nextNodeBytes) - currentPath = expectedPath - default: - return nil, fmt.Errorf("invalid node structure: unexpected length %d", len(node)) - } + relevantLogs = append(relevantLogs, logs...) } + return relevantLogs, nil } -// Converts a byte slice key into a slice of nibbles (4-bit values). -// Keys are encoded in big endian format, which is required by Ethereum MPTs. -func keyToNibbles(key []byte) []byte { - nibbles := make([]byte, len(key)*2) - for i, b := range key { - nibbles[i*2] = b >> 4 - nibbles[i*2+1] = b & 0x0f +// LogsForBlockHashAllLogs is kept, in case we go with an implementation of returning all logs present in a block +func (rf *receiptFetcherForBlock) LogsForBlockHashAllLogs(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { + if rf.header.Hash() != parentChainBlockHash { + return nil, errors.New("parentChainBlockHash mismatch") + } + preimageDB := &DB{ + resolver: rf.preimageResolver, + } + tdb := triedb.NewDatabase(preimageDB, nil) + receiptsTrie, err := trie.New(trie.TrieID(rf.header.ReceiptHash), tdb) + if err != nil { + return nil, err + } + entries, indices := collectTrieEntries(receiptsTrie) + fmt.Println("indices ", indices, len(entries)) + rawReceipts := reconstructOrderedData(entries, indices) + receipts, err := decodeReceiptsData(rawReceipts) + if err != nil { + return nil, err } - return nibbles + var relevantLogs []*types.Log + for _, receipt := range receipts { + relevantLogs = append(relevantLogs, receipt.Logs...) + } + return relevantLogs, nil } -// Extracts the key nibbles from a key path, handling odd/even length cases. -func extractKeyNibbles(keyPath []byte) []byte { - if len(keyPath) == 0 { - return nil +func collectTrieEntries(txTrie *trie.Trie) ([][]byte, []uint64) { + nodeIterator, iterErr := txTrie.NodeIterator(nil) + if iterErr != nil { + panic(iterErr) } - nibbles := keyToNibbles(keyPath) - if nibbles[0]&1 != 0 { - return nibbles[1:] + + var rawValues [][]byte + var indexKeys []uint64 + + for nodeIterator.Next(true) { + if !nodeIterator.Leaf() { + continue + } + + leafKey := nodeIterator.LeafKey() + var decodedIndex uint64 + + decodeErr := rlp.DecodeBytes(leafKey, &decodedIndex) + if decodeErr != nil { + panic(fmt.Errorf("key decoding error: %w", decodeErr)) + } + + indexKeys = append(indexKeys, decodedIndex) + rawValues = append(rawValues, nodeIterator.LeafBlob()) } - return nibbles[2:] + + return rawValues, indexKeys } -func isLeaf(keyPath []byte) (bool, error) { - firstByte := keyPath[0] - firstNibble := firstByte >> 4 - // 2 or 3 indicates leaf, while 0 or 1 indicates extension nodes in the Ethereum MPT specification. - if firstNibble > 3 { - return false, errors.New("first nibble cannot be greater than 3") +func reconstructOrderedData(rawValues [][]byte, indices []uint64) []hexutil.Bytes { + orderedData := make([]hexutil.Bytes, len(rawValues)) + for position, index := range indices { + if index >= uint64(len(rawValues)) { + panic(fmt.Sprintf("index out of bounds: %d", index-1)) + } + if orderedData[index] != nil { + panic(fmt.Sprintf("index collision detected: %d", index-1)) + } + orderedData[index] = rawValues[position] } - return firstNibble >= 2, nil + return orderedData } -func decodeReceipt(data []byte) (*types.Receipt, error) { - if len(data) == 0 { - return nil, errors.New("empty data cannot be decoded into receipt") - } - rpt := new(types.Receipt) - if err := rpt.UnmarshalBinary(data); err != nil { - return nil, err +func decodeReceiptsData(encodedData []hexutil.Bytes) (types.Receipts, error) { + receiptList := make(types.Receipts, 0, len(encodedData)) + for _, encodedReceipt := range encodedData { + decodedReceipt := new(types.Receipt) + if decodeErr := decodedReceipt.UnmarshalBinary(encodedReceipt); decodeErr != nil { + return nil, fmt.Errorf("receipt decoding failed: %w", decodeErr) + } + receiptList = append(receiptList, decodedReceipt) } - return rpt, nil + return receiptList, nil } diff --git a/cmd/mel-replay/receipt_fetcher_test.go b/cmd/mel-replay/receipt_fetcher_test.go deleted file mode 100644 index bec77c9ea9..0000000000 --- a/cmd/mel-replay/receipt_fetcher_test.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2025-2026, Offchain Labs, Inc. -// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md - -package main - -import ( - "context" - "fmt" - "math/big" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/trie" - - "github.com/offchainlabs/nitro/arbutil" -) - -func TestFetchReceiptFromBlock_Multiple(t *testing.T) { - ctx := context.Background() - // Creates a block with 42 transactions and receipts. - numReceipts := 42 - receipts := createTestReceipts(numReceipts) - hasher := newRecordingHasher() - receiptsRoot := types.DeriveSha(types.Receipts(receipts), hasher) - header := &types.Header{} - txes := make([]*types.Transaction, numReceipts) - for i := 0; i < numReceipts; i++ { - txes[i] = types.NewTransaction(uint64(i), common.Address{}, big.NewInt(0), 21000, big.NewInt(1), nil) // #nosec G115 - } - body := &types.Body{ - Transactions: txes, - } - blk := types.NewBlock(header, body, receipts, hasher) - require.Equal(t, blk.ReceiptHash(), receiptsRoot) - preimages := hasher.GetPreimages() - mockPreimageResolver := &mockPreimageResolver{ - preimages: preimages, - } - receiptFetcher := &receiptFetcherForBlock{ - header: blk.Header(), - preimageResolver: mockPreimageResolver, - } - for i := 0; i < numReceipts; i++ { - receipt, err := receiptFetcher.ReceiptForTransactionIndex(ctx, uint(i)) // #nosec G115 - require.NoError(t, err) - require.Equal(t, receipts[i].CumulativeGasUsed, receipt.CumulativeGasUsed) - } -} - -type mockPreimageResolver struct { - preimages map[common.Hash][]byte -} - -func (m *mockPreimageResolver) ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) { - if preimage, exists := m.preimages[hash]; exists { - return preimage, nil - } - return nil, fmt.Errorf("preimage not found for hash: %s", hash.Hex()) -} - -// Implements a hasher that captures preimages of hashes as it computes them. -type preimageRecordingHasher struct { - trie *trie.StackTrie - preimages map[common.Hash][]byte -} - -func newRecordingHasher() *preimageRecordingHasher { - h := &preimageRecordingHasher{ - preimages: make(map[common.Hash][]byte), - } - // OnTrieNode callback captures all trie nodes. - onTrieNode := func(path []byte, hash common.Hash, blob []byte) { - // Deep copy the blob since the callback warns contents may change, so this is required. - h.preimages[hash] = common.CopyBytes(blob) - } - - h.trie = trie.NewStackTrie(onTrieNode) - return h -} - -func (h *preimageRecordingHasher) Reset() { - onTrieNode := func(path []byte, hash common.Hash, blob []byte) { - h.preimages[hash] = common.CopyBytes(blob) - } - h.trie = trie.NewStackTrie(onTrieNode) -} - -func (h *preimageRecordingHasher) Update(key, value []byte) error { - valueHash := crypto.Keccak256Hash(value) - h.preimages[valueHash] = common.CopyBytes(value) - return h.trie.Update(key, value) -} - -func (h *preimageRecordingHasher) Hash() common.Hash { - return h.trie.Hash() -} - -func (h *preimageRecordingHasher) GetPreimages() map[common.Hash][]byte { - return h.preimages -} - -func createTestReceipts(count int) types.Receipts { - receipts := make(types.Receipts, count) - for i := 0; i < count; i++ { - receipt := &types.Receipt{ - Status: 1, - CumulativeGasUsed: 50_000 + uint64(i), // #nosec G115 - TxHash: common.Hash{}, - ContractAddress: common.Address{}, - Logs: []*types.Log{}, - BlockHash: common.BytesToHash([]byte("foobar")), - BlockNumber: big.NewInt(100), - TransactionIndex: uint(i), // #nosec G115 - } - receipt.Bloom = types.BytesToBloom(make([]byte, 256)) - receipts[i] = receipt - } - return receipts -} diff --git a/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go b/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go new file mode 100644 index 0000000000..b85c419ee4 --- /dev/null +++ b/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go @@ -0,0 +1,133 @@ +// Copyright 2025-2026, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package main + +import ( + "context" + "fmt" + "math/big" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie" + + melrecording "github.com/offchainlabs/nitro/arbnode/mel/recording" + "github.com/offchainlabs/nitro/arbutil" +) + +type mockPreimageResolver struct { + preimages map[common.Hash][]byte +} + +func (m *mockPreimageResolver) ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) { + if preimage, exists := m.preimages[hash]; exists { + return preimage, nil + } + return nil, fmt.Errorf("preimage not found for hash: %s", hash.Hex()) +} + +func TestRecordingOfReceiptPreimagesAndFetchingLogsFromPreimages(t *testing.T) { + ctx := context.Background() + blockReader := &mockBlockReader{ + blocks: make(map[common.Hash]*types.Block), + receiptByTxHash: map[common.Hash]*types.Receipt{}, + } + toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") + blockHeader := &types.Header{} + receipts := []*types.Receipt{} + txs := make([]*types.Transaction, 0) + for i := range uint64(50) { + txData := &types.DynamicFeeTx{ + To: &toAddr, + Nonce: i, + GasFeeCap: big.NewInt(1), + GasTipCap: big.NewInt(1), + Gas: 1, + Value: big.NewInt(0), + Data: nil, + } + tx := types.NewTx(txData) + txs = append(txs, tx) + receipt := &types.Receipt{ + TxHash: tx.Hash(), + TransactionIndex: uint(i), + Type: types.DynamicFeeTxType, + Logs: []*types.Log{ + { + // Consensus fields: + Address: common.HexToAddress(fmt.Sprintf("%d", i)), + Topics: []common.Hash{common.HexToHash("topic1"), common.HexToHash("topic2")}, + Data: common.Hex2Bytes(fmt.Sprintf("%d", i)), + + // Derived Fields: + TxIndex: uint(i), + }, + }, + } + receipts = append(receipts, receipt) + blockReader.receiptByTxHash[tx.Hash()] = receipt + } + blockBody := &types.Body{Transactions: txs} + block := types.NewBlock(blockHeader, blockBody, receipts, trie.NewStackTrie(nil)) + blockReader.blocks[block.Hash()] = block + recorder := melrecording.NewReceiptRecorder(blockReader, block.Hash()) + require.NoError(t, recorder.Initialize(ctx)) + + // Test recording of preimages + recordStart := uint(6) + recordEnd := uint(20) + for i := recordStart; i <= recordEnd; i++ { + logs, err := recorder.LogsForTxIndex(ctx, block.Hash(), i) + require.NoError(t, err) + have, err := logs[0].MarshalJSON() + require.NoError(t, err) + want, err := receipts[i].Logs[0].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) + } + + // Test reading of logs from the recorded preimages + preimages, err := recorder.GetPreimages() + require.NoError(t, err) + receiptFetcher := &receiptFetcherForBlock{ + header: block.Header(), + preimageResolver: &testPreimageResolver{ + preimages: preimages[arbutil.Keccak256PreimageType], + }, + } + // Test LogsForBlockHash + logs, err := receiptFetcher.LogsForBlockHash(ctx, block.Hash()) + require.NoError(t, err) + // #nosec G115 + if len(logs) != int(recordEnd-recordStart+1) { + t.Fatalf("number of logs from LogsForBlockHash mismatch. Want: %d, Got: %d", recordEnd-recordStart+1, len(logs)) + } + for _, log := range logs { + have, err := log.MarshalJSON() + require.NoError(t, err) + want, err := receipts[log.TxIndex].Logs[0].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) + } + // Test LogsForTxIndex + for i := recordStart; i <= recordEnd; i++ { + logs, err := receiptFetcher.LogsForTxIndex(ctx, block.Hash(), i) + require.NoError(t, err) + have, err := logs[0].MarshalJSON() + require.NoError(t, err) + want, err := receipts[i].Logs[0].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) + } + + // Logs fetching should fail for not recorded ones + _, err = receiptFetcher.LogsForTxIndex(ctx, block.Hash(), recordStart-1) + if err == nil || !strings.Contains(err.Error(), "preimage not found for hash") { + t.Fatalf("failed with unexpected error: %v", err) + } +} diff --git a/cmd/mel-replay/trie_fetcher.go b/cmd/mel-replay/trie_fetcher.go new file mode 100644 index 0000000000..277ab65e0a --- /dev/null +++ b/cmd/mel-replay/trie_fetcher.go @@ -0,0 +1,147 @@ +package main + +import ( + "bytes" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" + + "github.com/offchainlabs/nitro/arbutil" +) + +// Fetches a specific object at index from a block's Receipt/Tx trie by navigating its +// Merkle Patricia Trie structure. It uses the preimage resolver to fetch preimages +// of trie nodes as needed, and determines how to navigate depending on the structure of the trie nodes. +func fetchObjectFromTrie[T any](root common.Hash, index uint, preimageResolver preimageResolver) (*T, error) { + var empty *T + currentNodeHash := root + currentPath := []byte{} // Track nibbles consumed so far. + receiptKey, err := rlp.EncodeToBytes(index) + if err != nil { + return empty, err + } + targetNibbles := keyToNibbles(receiptKey) + for { + nodeData, err := preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentNodeHash) + if err != nil { + return empty, err + } + var node []any + if err = rlp.DecodeBytes(nodeData, &node); err != nil { + return empty, fmt.Errorf("failed to decode RLP node: %w", err) + } + switch len(node) { + case 17: + // We hit a branch node, which has 16 children and a value. + if len(currentPath) == len(targetNibbles) { + // A branch node's 17th item could be the value, so we check if it contains the receipt. + if valueBytes, ok := node[16].([]byte); ok && len(valueBytes) > 0 { + // This branch node has the actual value as the last item, so we decode the receipt + return decodeBinary[T](valueBytes) + } + return empty, fmt.Errorf("no receipt found at target key") + } + // Get the next nibble to follow. + targetNibble := targetNibbles[len(currentPath)] + childData, ok := node[targetNibble].([]byte) + if !ok || len(childData) == 0 { + return empty, fmt.Errorf("no child at nibble %d", targetNibble) + } + // Move to the child node, which is the next hash we have to navigate. + currentNodeHash = common.BytesToHash(childData) + currentPath = append(currentPath, targetNibble) + case 2: + keyPath, ok := node[0].([]byte) + if !ok { + return empty, fmt.Errorf("invalid key path in node") + } + key := extractKeyNibbles(keyPath) + expectedPath := make([]byte, 0) + expectedPath = append(expectedPath, currentPath...) + expectedPath = append(expectedPath, key...) + + // Check if it is a leaf or extension node. + leaf, err := isLeaf(keyPath) + if err != nil { + return empty, err + } + if leaf { + // Check that the keyPath matches the target nibbles, + // otherwise, the receipt does not exist in the trie. + if !bytes.Equal(expectedPath, targetNibbles) { + return empty, fmt.Errorf("leaf key does not match target nibbles") + } + rawData, ok := node[1].([]byte) + if !ok { + return empty, fmt.Errorf("invalid receipt data in leaf node") + } + return decodeBinary[T](rawData) + } + // If the node is not a leaf node, it is an extension node. + // Check if our target key matches this extension path. + if len(expectedPath) > len(targetNibbles) || !bytes.Equal(expectedPath, targetNibbles[:len(expectedPath)]) { + return empty, fmt.Errorf("extension path mismatch") + } + nextNodeBytes, ok := node[1].([]byte) + if !ok { + return empty, fmt.Errorf("invalid next node in extension") + } + // We navigate to the next node in the trie. + currentNodeHash = common.BytesToHash(nextNodeBytes) + currentPath = expectedPath + default: + return empty, fmt.Errorf("invalid node structure: unexpected length %d", len(node)) + } + } +} + +// Converts a byte slice key into a slice of nibbles (4-bit values). +// Keys are encoded in big endian format, which is required by Ethereum MPTs. +func keyToNibbles(key []byte) []byte { + nibbles := make([]byte, len(key)*2) + for i, b := range key { + nibbles[i*2] = b >> 4 + nibbles[i*2+1] = b & 0x0f + } + return nibbles +} + +// Extracts the key nibbles from a key path, handling odd/even length cases. +func extractKeyNibbles(keyPath []byte) []byte { + if len(keyPath) == 0 { + return nil + } + nibbles := keyToNibbles(keyPath) + if nibbles[0]&1 != 0 { + return nibbles[1:] + } + return nibbles[2:] +} + +func isLeaf(keyPath []byte) (bool, error) { + firstByte := keyPath[0] + firstNibble := firstByte >> 4 + // 2 or 3 indicates leaf, while 0 or 1 indicates extension nodes in the Ethereum MPT specification. + if firstNibble > 3 { + return false, errors.New("first nibble cannot be greater than 3") + } + return firstNibble >= 2, nil +} + +func decodeBinary[T any](data []byte) (*T, error) { + var empty *T + if len(data) == 0 { + return empty, errors.New("empty data cannot be decoded") + } + v := new(T) + u, ok := any(v).(interface{ UnmarshalBinary([]byte) error }) + if !ok { + return empty, errors.New("decodeBinary is called on a type that doesnt implement UnmarshalBinary") + } + if err := u.UnmarshalBinary(data); err != nil { + return empty, err + } + return v, nil +} diff --git a/cmd/mel-replay/tx_fetcher.go b/cmd/mel-replay/tx_fetcher.go new file mode 100644 index 0000000000..be7290b0c3 --- /dev/null +++ b/cmd/mel-replay/tx_fetcher.go @@ -0,0 +1,23 @@ +package main + +import ( + "context" + + "github.com/ethereum/go-ethereum/core/types" +) + +type txFetcherForBlock struct { + header *types.Header + preimageResolver preimageResolver +} + +// TransactionByLog fetches the tx for a specific transaction index by walking +// the tx trie of the block header. It uses the preimage resolver to fetch the preimages +// of the trie nodes as needed. +func (tf *txFetcherForBlock) TransactionByLog(ctx context.Context, log *types.Log) (*types.Transaction, error) { + tx, err := fetchObjectFromTrie[types.Transaction](tf.header.TxHash, log.TxIndex, tf.preimageResolver) + if err != nil { + return nil, err + } + return tx, err +} diff --git a/cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go b/cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go new file mode 100644 index 0000000000..b0e441a85a --- /dev/null +++ b/cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go @@ -0,0 +1,105 @@ +package main + +import ( + "context" + "math/big" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie" + + "github.com/offchainlabs/nitro/arbnode/mel/recording" + "github.com/offchainlabs/nitro/arbutil" +) + +type mockBlockReader struct { + blocks map[common.Hash]*types.Block + receiptByTxHash map[common.Hash]*types.Receipt +} + +func (mbr *mockBlockReader) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + block, exists := mbr.blocks[hash] + if !exists { + return nil, nil + } + return block, nil +} + +func (mbr *mockBlockReader) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { + receipt, exists := mbr.receiptByTxHash[txHash] + if !exists { + return nil, nil + } + return receipt, nil +} + +func TestRecordingOfTxPreimagesAndFetchingTxsFromPreimages(t *testing.T) { + ctx := context.Background() + toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") + blockHeader := &types.Header{} + txs := make([]*types.Transaction, 0) + for i := range uint64(50) { + txData := &types.DynamicFeeTx{ + To: &toAddr, + Nonce: i, + GasFeeCap: big.NewInt(1), + GasTipCap: big.NewInt(1), + Gas: 1, + Value: big.NewInt(0), + Data: nil, + } + tx := types.NewTx(txData) + txs = append(txs, tx) + } + blockBody := &types.Body{Transactions: txs} + receipts := []*types.Receipt{} + block := types.NewBlock(blockHeader, blockBody, receipts, trie.NewStackTrie(nil)) + blockReader := &mockBlockReader{ + blocks: map[common.Hash]*types.Block{ + block.Hash(): block, + }, + } + recorder := melrecording.NewTransactionRecorder(blockReader, block.Hash()) + require.NoError(t, recorder.Initialize(ctx)) + + // Test recording of preimages + recordStart := uint(9) + recordEnd := uint(27) + for i := recordStart; i <= recordEnd; i++ { + tx, err := recorder.TransactionByLog(ctx, &types.Log{TxIndex: i}) + require.NoError(t, err) + have, err := tx.MarshalJSON() + require.NoError(t, err) + want, err := block.Transactions()[i].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) + } + + // Test reading of txs from the recorded preimages + preimages := recorder.GetPreimages() + txsFetcher := &txFetcherForBlock{ + header: block.Header(), + preimageResolver: &testPreimageResolver{ + preimages: preimages[arbutil.Keccak256PreimageType], + }, + } + for i := recordStart; i <= recordEnd; i++ { + tx, err := txsFetcher.TransactionByLog(ctx, &types.Log{TxIndex: i}) + require.NoError(t, err) + have, err := tx.MarshalJSON() + require.NoError(t, err) + want, err := block.Transactions()[i].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) + } + + // Tx fetching should fail for not recorded ones + _, err := txsFetcher.TransactionByLog(ctx, &types.Log{TxIndex: recordStart - 1}) + if err == nil || !strings.Contains(err.Error(), "preimage not found for hash") { + t.Fatalf("failed with unexpected error: %v", err) + } +} diff --git a/cmd/mel-replay/txs_fetcher.go b/cmd/mel-replay/txs_fetcher.go deleted file mode 100644 index 2307ba0792..0000000000 --- a/cmd/mel-replay/txs_fetcher.go +++ /dev/null @@ -1,90 +0,0 @@ -package main - -import ( - "context" - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/trie" - "github.com/ethereum/go-ethereum/triedb" -) - -type txsFetcherForBlock struct { - header *types.Header - preimageResolver preimageResolver -} - -func (tf *txsFetcherForBlock) TransactionsByHeader( - ctx context.Context, - parentChainHeaderHash common.Hash, -) (types.Transactions, error) { - preimageDB := &DB{ - resolver: tf.preimageResolver, - } - tdb := triedb.NewDatabase(preimageDB, nil) - tr, err := trie.New(trie.TrieID(tf.header.TxHash), tdb) - if err != nil { - panic(err) - } - entries, indices := tf.collectTrieEntries(tr) - rawTxs := tf.reconstructOrderedData(entries, indices) - return tf.decodeTransactionData(rawTxs) -} - -func (btr *txsFetcherForBlock) collectTrieEntries(txTrie *trie.Trie) ([][]byte, []uint64) { - nodeIterator, iterErr := txTrie.NodeIterator(nil) - if iterErr != nil { - panic(iterErr) - } - - var rawValues [][]byte - var indexKeys []uint64 - - for nodeIterator.Next(true) { - if !nodeIterator.Leaf() { - continue - } - - leafKey := nodeIterator.LeafKey() - var decodedIndex uint64 - - decodeErr := rlp.DecodeBytes(leafKey, &decodedIndex) - if decodeErr != nil { - panic(fmt.Errorf("key decoding error: %w", decodeErr)) - } - - indexKeys = append(indexKeys, decodedIndex) - rawValues = append(rawValues, nodeIterator.LeafBlob()) - } - - return rawValues, indexKeys -} - -func (btr *txsFetcherForBlock) reconstructOrderedData(rawValues [][]byte, indices []uint64) []hexutil.Bytes { - orderedData := make([]hexutil.Bytes, len(rawValues)) - for position, index := range indices { - if index >= uint64(len(rawValues)) { - panic(fmt.Sprintf("index out of bounds: %d", index)) - } - if orderedData[index] != nil { - panic(fmt.Sprintf("index collision detected: %d", index)) - } - orderedData[index] = rawValues[position] - } - return orderedData -} - -func (btr *txsFetcherForBlock) decodeTransactionData(encodedData []hexutil.Bytes) (types.Transactions, error) { - transactionList := make(types.Transactions, 0, len(encodedData)) - for _, encodedTx := range encodedData { - decodedTx := new(types.Transaction) - if decodeErr := decodedTx.UnmarshalBinary(encodedTx); decodeErr != nil { - return nil, fmt.Errorf("transaction decoding failed: %w", decodeErr) - } - transactionList = append(transactionList, decodedTx) - } - return transactionList, nil -} diff --git a/cmd/mel-replay/txs_fetcher_test.go b/cmd/mel-replay/txs_fetcher_test.go deleted file mode 100644 index ec8c651516..0000000000 --- a/cmd/mel-replay/txs_fetcher_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package main - -import ( - "context" - "math/big" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" -) - -func TestFetchTransactionsForBlockHeader_DynamicFeeTxs(t *testing.T) { - ctx := context.Background() - total := uint64(42) - txes := make([]*types.Transaction, total) - for i := uint64(0); i < total; i++ { - txData := types.DynamicFeeTx{ - Nonce: i, - To: nil, - Gas: 21000, - GasTipCap: big.NewInt(1), - GasFeeCap: big.NewInt(1), - } - txes[i] = types.NewTx(&txData) - } - hasher := newRecordingHasher() - txsRoot := types.DeriveSha(types.Transactions(txes), hasher) - header := &types.Header{ - TxHash: txsRoot, - } - preimages := hasher.GetPreimages() - mockPreimageResolver := &mockPreimageResolver{ - preimages: preimages, - } - txsFetcher := &txsFetcherForBlock{ - header: header, - preimageResolver: mockPreimageResolver, - } - fetched, err := txsFetcher.TransactionsByHeader(ctx, header.Hash()) - require.NoError(t, err) - require.True(t, uint64(len(fetched)) == total) // #nosec G115 - for i, tx := range fetched { - require.Equal(t, txes[i].Hash(), tx.Hash()) - require.Equal(t, uint64(i), tx.Nonce()) // #nosec G115 - } -} - -func TestFetchTransactionsForBlockHeader_LegacyTxs(t *testing.T) { - ctx := context.Background() - total := uint64(42) - txes := make([]*types.Transaction, total) - for i := uint64(0); i < total; i++ { - txes[i] = types.NewTransaction(i, common.Address{}, big.NewInt(0), 21000, big.NewInt(1), nil) - } - hasher := newRecordingHasher() - txsRoot := types.DeriveSha(types.Transactions(txes), hasher) - header := &types.Header{ - TxHash: txsRoot, - } - preimages := hasher.GetPreimages() - mockPreimageResolver := &mockPreimageResolver{ - preimages: preimages, - } - txsFetcher := &txsFetcherForBlock{ - header: header, - preimageResolver: mockPreimageResolver, - } - fetched, err := txsFetcher.TransactionsByHeader(ctx, header.Hash()) - require.NoError(t, err) - require.True(t, uint64(len(fetched)) == total) // #nosec G115 - for i, tx := range fetched { - require.Equal(t, txes[i].Hash(), tx.Hash()) - require.Equal(t, uint64(i), tx.Nonce()) // #nosec G115 - } -} From be5fd45ef65c07399714c62fd97d0f17d5bdd586 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 17 Dec 2025 16:04:34 +0530 Subject: [PATCH 09/42] initial mel validator work --- arbnode/mel/runner/mel.go | 4 +++ arbnode/mel/state.go | 7 ++++ staker/mel_validator.go | 73 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 84 insertions(+) create mode 100644 staker/mel_validator.go diff --git a/arbnode/mel/runner/mel.go b/arbnode/mel/runner/mel.go index c45b6d7b14..8cb13c2cf5 100644 --- a/arbnode/mel/runner/mel.go +++ b/arbnode/mel/runner/mel.go @@ -238,6 +238,10 @@ func (m *MessageExtractor) GetHeadState(ctx context.Context) (*mel.State, error) return m.melDB.GetHeadMelState(ctx) } +func (m *MessageExtractor) GetState(ctx context.Context, parentchainBlocknumber uint64) (*mel.State, error) { + return m.melDB.State(ctx, parentchainBlocknumber) +} + func (m *MessageExtractor) GetMsgCount(ctx context.Context) (arbutil.MessageIndex, error) { headState, err := m.melDB.GetHeadMelState(ctx) if err != nil { diff --git a/arbnode/mel/state.go b/arbnode/mel/state.go index a4ab059c4a..230d93b5ee 100644 --- a/arbnode/mel/state.go +++ b/arbnode/mel/state.go @@ -205,6 +205,13 @@ func (s *State) ReorgTo(newState *State) error { return nil } +func WasMessageExtracted(preState, postState *State) bool { + return preState.MsgCount != postState.MsgCount || + preState.BatchCount != postState.BatchCount || // TODO: can BatchCount increase without MsgCount increasing? + preState.DelayedMessagesSeen != postState.DelayedMessagesSeen || // TODO: should seen count increment be considered a message extraction? + preState.DelayedMessagesRead != postState.DelayedMessagesRead // TODO: maybe not needed? as MsgCount would anyway increase +} + func ToPtrSlice[T any](list []T) []*T { var ptrs []*T for _, item := range list { diff --git a/staker/mel_validator.go b/staker/mel_validator.go new file mode 100644 index 0000000000..201e7813dd --- /dev/null +++ b/staker/mel_validator.go @@ -0,0 +1,73 @@ +package staker + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/offchainlabs/nitro/arbnode/mel" + "github.com/offchainlabs/nitro/arbnode/mel/extraction" + "github.com/offchainlabs/nitro/arbnode/mel/runner" + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/util/stopwaiter" +) + +type MELValidator struct { + stopwaiter.StopWaiter + arbDb ethdb.KeyValueStore + l1client *ethclient.Client + messageExtractor *melrunner.MessageExtractor + dapReaders arbstate.DapReaderSource +} + +func NewMELValdidator(messageExtractor *melrunner.MessageExtractor) *MELValidator { + return &MELValidator{ + messageExtractor: messageExtractor, + } +} + +func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, startPosition uint64) (*validationEntry, error) { + if startPosition == 0 { + return nil, errors.New("trying to create validation entry for zero block number") + } + preState, err := mv.messageExtractor.GetState(ctx, startPosition-1) + if err != nil { + return nil, err + } + delayedMsgRecordingDB := melrunner.NewRecordingDatabase(mv.arbDb) + recordingDAPReaders := melrunner.NewRecordingDAPReaderSource(ctx, mv.dapReaders) + for i := startPosition; ; i++ { + header, err := mv.l1client.HeaderByNumber(ctx, new(big.Int).SetUint64(i)) + if err != nil { + return nil, err + } + // Awaiting recording implementations of logsFetcher and txsFetcher + state, _, _, _, err := melextraction.ExtractMessages(ctx, preState, header, recordingDAPReaders, delayedMsgRecordingDB, nil, nil) + if err != nil { + return nil, fmt.Errorf("error calling melextraction.ExtractMessages in recording mode: %w", err) + } + wantState, err := mv.messageExtractor.GetState(ctx, i) + if err != nil { + return nil, err + } + if state.Hash() != wantState.Hash() { + return nil, fmt.Errorf("calculated MEL state hash in recording mode doesn't match the one computed in native mode, parentchainBlocknumber: %d", i) + } + // Currently we are ending recording as soon + if mel.WasMessageExtracted(preState, state) { + break + } + preState = state + } + preimages := recordingDAPReaders.Preimages() + delayedPreimages := daprovider.PreimagesMap{ + arbutil.Keccak256PreimageType: delayedMsgRecordingDB.Preimages(), + } + daprovider.CopyPreimagesInto(preimages, delayedPreimages) + return nil, nil +} From 37bce6be50e63b858c1f9c63c0af253a05deeb23 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Thu, 18 Dec 2025 11:19:10 +0530 Subject: [PATCH 10/42] refactor --- staker/mel_validator.go | 1 - 1 file changed, 1 deletion(-) diff --git a/staker/mel_validator.go b/staker/mel_validator.go index 201e7813dd..c814e1de74 100644 --- a/staker/mel_validator.go +++ b/staker/mel_validator.go @@ -58,7 +58,6 @@ func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, startPosi if state.Hash() != wantState.Hash() { return nil, fmt.Errorf("calculated MEL state hash in recording mode doesn't match the one computed in native mode, parentchainBlocknumber: %d", i) } - // Currently we are ending recording as soon if mel.WasMessageExtracted(preState, state) { break } From 8b45fd304d99b878031ebb5e165f291693f4be0e Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 19 Dec 2025 18:07:10 +0530 Subject: [PATCH 11/42] add testing for correctness of preimages --- arbnode/mel/state.go | 7 - cmd/nitro/init.go | 6 +- cmd/pruning/pruning.go | 3 +- staker/bold/bold_staker.go | 94 +------- staker/bold_assertioncreation.go | 105 +++++++++ staker/mel_validator.go | 120 +++++++++- system_tests/message_extraction_layer_test.go | 6 +- ...essage_extraction_layer_validation_test.go | 215 ++++++++++++++++++ 8 files changed, 437 insertions(+), 119 deletions(-) create mode 100644 staker/bold_assertioncreation.go create mode 100644 system_tests/message_extraction_layer_validation_test.go diff --git a/arbnode/mel/state.go b/arbnode/mel/state.go index 230d93b5ee..a4ab059c4a 100644 --- a/arbnode/mel/state.go +++ b/arbnode/mel/state.go @@ -205,13 +205,6 @@ func (s *State) ReorgTo(newState *State) error { return nil } -func WasMessageExtracted(preState, postState *State) bool { - return preState.MsgCount != postState.MsgCount || - preState.BatchCount != postState.BatchCount || // TODO: can BatchCount increase without MsgCount increasing? - preState.DelayedMessagesSeen != postState.DelayedMessagesSeen || // TODO: should seen count increment be considered a message extraction? - preState.DelayedMessagesRead != postState.DelayedMessagesRead // TODO: maybe not needed? as MsgCount would anyway increase -} - func ToPtrSlice[T any](list []T) []*T { var ptrs []*T for _, item := range list { diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 3a1bb6eced..d0234ae3c5 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -47,7 +47,7 @@ import ( "github.com/offchainlabs/nitro/cmd/staterecovery" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/rollupgen" - "github.com/offchainlabs/nitro/staker/bold" + "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/arbmath" @@ -957,7 +957,7 @@ func validateGenesisAssertion(ctx context.Context, rollupAddress common.Address, if err != nil { return err } - genesisAssertionCreationInfo, err := bold.ReadBoldAssertionCreationInfo(ctx, userLogic, l1Client, rollupAddress, genesisAssertionHash) + genesisAssertionCreationInfo, err := staker.ReadBoldAssertionCreationInfo(ctx, userLogic, l1Client, rollupAddress, genesisAssertionHash) if err != nil { // If we can't find the empty genesis assertion, try to compute the assertion for non-empty genesis genesisGlobalState := protocol.GoGlobalState{ @@ -978,7 +978,7 @@ func validateGenesisAssertion(ctx context.Context, rollupAddress common.Address, if err != nil { return err } - genesisAssertionCreationInfo, err = bold.ReadBoldAssertionCreationInfo(ctx, userLogic, l1Client, rollupAddress, genesisAssertionHash) + genesisAssertionCreationInfo, err = staker.ReadBoldAssertionCreationInfo(ctx, userLogic, l1Client, rollupAddress, genesisAssertionHash) if err != nil { return err } diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go index dc7802c63d..34a3915ac5 100644 --- a/cmd/pruning/pruning.go +++ b/cmd/pruning/pruning.go @@ -32,7 +32,6 @@ import ( "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/staker" - "github.com/offchainlabs/nitro/staker/bold" legacystaker "github.com/offchainlabs/nitro/staker/legacy" multiprotocolstaker "github.com/offchainlabs/nitro/staker/multi_protocol" ) @@ -258,7 +257,7 @@ func getLatestConfirmedHash(ctx context.Context, rollupAddrs chaininfo.RollupAdd if err != nil { return common.Hash{}, err } - assertion, err := bold.ReadBoldAssertionCreationInfo( + assertion, err := staker.ReadBoldAssertionCreationInfo( ctx, rollupUserLogic, l1Client, diff --git a/staker/bold/bold_staker.go b/staker/bold/bold_staker.go index 983bb107fe..50f3404e0d 100644 --- a/staker/bold/bold_staker.go +++ b/staker/bold/bold_staker.go @@ -6,13 +6,11 @@ import ( "context" "errors" "fmt" - "math/big" "strings" "time" "github.com/spf13/pflag" - "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -45,20 +43,6 @@ var ( boldStakerAmountStakedGauge = metrics.GetOrRegisterGauge("arb/staker/amount_staked", nil) ) -var assertionCreatedId common.Hash - -func init() { - rollupAbi, err := rollupgen.RollupCoreMetaData.GetAbi() - if err != nil { - panic(err) - } - assertionCreatedEvent, ok := rollupAbi.Events["AssertionCreated"] - if !ok { - panic("RollupCore ABI missing AssertionCreated event") - } - assertionCreatedId = assertionCreatedEvent.ID -} - type BoldConfig struct { // How often to post assertions onchain. AssertionPostingInterval time.Duration `koanf:"assertion-posting-interval"` @@ -290,7 +274,7 @@ func (b *BOLDStaker) Initialize(ctx context.Context) error { } latestStaked = latestConfirmed } - assertion, err := ReadBoldAssertionCreationInfo( + assertion, err := staker.ReadBoldAssertionCreationInfo( ctx, rollupUserLogic, b.client, @@ -629,79 +613,3 @@ func newBOLDChallengeManager( } return manager, nil } - -// Read the creation info for an assertion by looking up its creation -// event from the rollup contracts. -func ReadBoldAssertionCreationInfo( - ctx context.Context, - rollup *rollupgen.RollupUserLogic, - client bind.ContractBackend, - rollupAddress common.Address, - assertionHash common.Hash, -) (*protocol.AssertionCreatedInfo, error) { - var creationBlock uint64 - var topics [][]common.Hash - if assertionHash == (common.Hash{}) { - rollupDeploymentBlock, err := rollup.RollupDeploymentBlock(&bind.CallOpts{Context: ctx}) - if err != nil { - return nil, err - } - if !rollupDeploymentBlock.IsUint64() { - return nil, errors.New("rollup deployment block was not a uint64") - } - creationBlock = rollupDeploymentBlock.Uint64() - } else { - var b [32]byte - copy(b[:], assertionHash[:]) - assertionCreationBlock, err := rollup.GetAssertionCreationBlockForLogLookup(&bind.CallOpts{Context: ctx}, b) - if err != nil { - return nil, err - } - if !assertionCreationBlock.IsUint64() { - return nil, errors.New("assertion creation block was not a uint64") - } - creationBlock = assertionCreationBlock.Uint64() - } - topics = [][]common.Hash{{assertionCreatedId}, {assertionHash}} - var query = ethereum.FilterQuery{ - FromBlock: new(big.Int).SetUint64(creationBlock), - ToBlock: new(big.Int).SetUint64(creationBlock), - Addresses: []common.Address{rollupAddress}, - Topics: topics, - } - logs, err := client.FilterLogs(ctx, query) - if err != nil { - return nil, err - } - if len(logs) == 0 { - return nil, errors.New("no assertion creation logs found") - } - if len(logs) > 1 { - return nil, errors.New("found multiple instances of requested node") - } - ethLog := logs[0] - parsedLog, err := rollup.ParseAssertionCreated(ethLog) - if err != nil { - return nil, err - } - afterState := parsedLog.Assertion.AfterState - creationL1Block, err := arbutil.CorrespondingL1BlockNumber(ctx, client, ethLog.BlockNumber) - if err != nil { - return nil, err - } - return &protocol.AssertionCreatedInfo{ - ConfirmPeriodBlocks: parsedLog.ConfirmPeriodBlocks, - RequiredStake: parsedLog.RequiredStake, - ParentAssertionHash: protocol.AssertionHash{Hash: parsedLog.ParentAssertionHash}, - BeforeState: parsedLog.Assertion.BeforeState, - AfterState: afterState, - InboxMaxCount: parsedLog.InboxMaxCount, - AfterInboxBatchAcc: parsedLog.AfterInboxBatchAcc, - AssertionHash: protocol.AssertionHash{Hash: parsedLog.AssertionHash}, - WasmModuleRoot: parsedLog.WasmModuleRoot, - ChallengeManager: parsedLog.ChallengeManager, - TransactionHash: ethLog.TxHash, - CreationParentBlock: ethLog.BlockNumber, - CreationL1Block: creationL1Block, - }, nil -} diff --git a/staker/bold_assertioncreation.go b/staker/bold_assertioncreation.go new file mode 100644 index 0000000000..44e7d95220 --- /dev/null +++ b/staker/bold_assertioncreation.go @@ -0,0 +1,105 @@ +package staker + +import ( + "context" + "errors" + "math/big" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind/v2" + "github.com/ethereum/go-ethereum/common" + + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/bold/chain-abstraction" + "github.com/offchainlabs/nitro/solgen/go/rollupgen" +) + +var assertionCreatedId common.Hash + +func init() { + rollupAbi, err := rollupgen.RollupCoreMetaData.GetAbi() + if err != nil { + panic(err) + } + assertionCreatedEvent, ok := rollupAbi.Events["AssertionCreated"] + if !ok { + panic("RollupCore ABI missing AssertionCreated event") + } + assertionCreatedId = assertionCreatedEvent.ID +} + +// Read the creation info for an assertion by looking up its creation +// event from the rollup contracts. +func ReadBoldAssertionCreationInfo( + ctx context.Context, + rollup *rollupgen.RollupUserLogic, + client bind.ContractBackend, + rollupAddress common.Address, + assertionHash common.Hash, +) (*protocol.AssertionCreatedInfo, error) { + var creationBlock uint64 + var topics [][]common.Hash + if assertionHash == (common.Hash{}) { + rollupDeploymentBlock, err := rollup.RollupDeploymentBlock(&bind.CallOpts{Context: ctx}) + if err != nil { + return nil, err + } + if !rollupDeploymentBlock.IsUint64() { + return nil, errors.New("rollup deployment block was not a uint64") + } + creationBlock = rollupDeploymentBlock.Uint64() + } else { + var b [32]byte + copy(b[:], assertionHash[:]) + assertionCreationBlock, err := rollup.GetAssertionCreationBlockForLogLookup(&bind.CallOpts{Context: ctx}, b) + if err != nil { + return nil, err + } + if !assertionCreationBlock.IsUint64() { + return nil, errors.New("assertion creation block was not a uint64") + } + creationBlock = assertionCreationBlock.Uint64() + } + topics = [][]common.Hash{{assertionCreatedId}, {assertionHash}} + var query = ethereum.FilterQuery{ + FromBlock: new(big.Int).SetUint64(creationBlock), + ToBlock: new(big.Int).SetUint64(creationBlock), + Addresses: []common.Address{rollupAddress}, + Topics: topics, + } + logs, err := client.FilterLogs(ctx, query) + if err != nil { + return nil, err + } + if len(logs) == 0 { + return nil, errors.New("no assertion creation logs found") + } + if len(logs) > 1 { + return nil, errors.New("found multiple instances of requested node") + } + ethLog := logs[0] + parsedLog, err := rollup.ParseAssertionCreated(ethLog) + if err != nil { + return nil, err + } + afterState := parsedLog.Assertion.AfterState + creationL1Block, err := arbutil.CorrespondingL1BlockNumber(ctx, client, ethLog.BlockNumber) + if err != nil { + return nil, err + } + return &protocol.AssertionCreatedInfo{ + ConfirmPeriodBlocks: parsedLog.ConfirmPeriodBlocks, + RequiredStake: parsedLog.RequiredStake, + ParentAssertionHash: protocol.AssertionHash{Hash: parsedLog.ParentAssertionHash}, + BeforeState: parsedLog.Assertion.BeforeState, + AfterState: afterState, + InboxMaxCount: parsedLog.InboxMaxCount, + AfterInboxBatchAcc: parsedLog.AfterInboxBatchAcc, + AssertionHash: protocol.AssertionHash{Hash: parsedLog.AssertionHash}, + WasmModuleRoot: parsedLog.WasmModuleRoot, + ChallengeManager: parsedLog.ChallengeManager, + TransactionHash: ethLog.TxHash, + CreationParentBlock: ethLog.BlockNumber, + CreationL1Block: creationL1Block, + }, nil +} diff --git a/staker/mel_validator.go b/staker/mel_validator.go index c814e1de74..04a68ca789 100644 --- a/staker/mel_validator.go +++ b/staker/mel_validator.go @@ -5,49 +5,141 @@ import ( "errors" "fmt" "math/big" + "time" + "github.com/ethereum/go-ethereum/accounts/abi/bind/v2" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" - "github.com/offchainlabs/nitro/arbnode/mel" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/nitro/arbnode/mel/extraction" "github.com/offchainlabs/nitro/arbnode/mel/runner" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/util/stopwaiter" ) +// dummyTxsAndLogsFetcher is for testing purposes. TODO: remove once we have preimages recorder implementations +type DummyTxsAndLogsFetcher struct { + L1client *ethclient.Client + receipts types.Receipts +} + +func (d *DummyTxsAndLogsFetcher) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { + receipts, err := d.L1client.BlockReceipts(ctx, rpc.BlockNumberOrHashWithHash(parentChainBlockHash, false)) + if err != nil { + return nil, err + } + var logs []*types.Log + for _, receipt := range receipts { + logs = append(logs, receipt.Logs...) + } + d.receipts = receipts + return logs, nil +} + +func (d *DummyTxsAndLogsFetcher) LogsForTxIndex(ctx context.Context, parentChainBlockHash common.Hash, txIndex uint) ([]*types.Log, error) { + // #nosec G115 + if d.receipts.Len() < int(txIndex+1) { + return nil, fmt.Errorf("insufficient number of receipts: %d, txIndex: %d", d.receipts.Len(), txIndex) + } + receipt := d.receipts[txIndex] + return receipt.Logs, nil +} + +func (d *DummyTxsAndLogsFetcher) TransactionByLog(ctx context.Context, log *types.Log) (*types.Transaction, error) { + tx, _, err := d.L1client.TransactionByHash(ctx, log.TxHash) + return tx, err +} + type MELValidator struct { stopwaiter.StopWaiter - arbDb ethdb.KeyValueStore - l1client *ethclient.Client + + arbDb ethdb.KeyValueStore + l1client *ethclient.Client + + boldStakerAddr common.Address + rollupAddr common.Address + rollup *rollupgen.RollupUserLogic + messageExtractor *melrunner.MessageExtractor dapReaders arbstate.DapReaderSource + + lastValidatedParentChainBlock uint64 } -func NewMELValdidator(messageExtractor *melrunner.MessageExtractor) *MELValidator { +func NewMELValidator(arbDb ethdb.KeyValueStore, l1client *ethclient.Client, messageExtractor *melrunner.MessageExtractor, dapReaders arbstate.DapReaderSource) *MELValidator { return &MELValidator{ + arbDb: arbDb, + l1client: l1client, messageExtractor: messageExtractor, + dapReaders: dapReaders, } } -func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, startPosition uint64) (*validationEntry, error) { - if startPosition == 0 { +func (mv *MELValidator) Start(ctx context.Context) { + mv.CallIteratively(func(ctx context.Context) time.Duration { + latestStaked, err := mv.rollup.LatestStakedAssertion(&bind.CallOpts{}, mv.boldStakerAddr) + if err != nil { + log.Error("MEL validator: Error fetching latest staked assertion hash", "err", err) + return 0 + } + latestStakedAssertion, err := ReadBoldAssertionCreationInfo(ctx, mv.rollup, mv.l1client, mv.rollupAddr, latestStaked) + if err != nil { + log.Error("MEL validator: Error fetching latest staked assertion creation info", "err", err) + return 0 + } + if latestStakedAssertion.InboxMaxCount == nil || !latestStakedAssertion.InboxMaxCount.IsUint64() { + log.Error("MEL validator: latestStakedAssertion.InboxMaxCount is not uint64") + return 0 + } + + // Create validation entry + entry, err := mv.CreateNextValidationEntry(ctx, mv.lastValidatedParentChainBlock, latestStakedAssertion.InboxMaxCount.Uint64()) + if err != nil { + log.Error("MEL validator: Error creating validation entry", "lastValidatedParentChainBlock", mv.lastValidatedParentChainBlock, "inboxMaxCount", latestStakedAssertion.InboxMaxCount.Uint64(), "err", err) + return time.Minute // wait for latestStakedAssertion to progress by the blockValidator + } + + // Send validation entry to validation nodes + if err := mv.SendValidationEntry(ctx, entry); err != nil { + log.Error("MEL validator: Error sending validation entry", "err", err) + } + + // Advance validations + return 0 + }) +} + +func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, lastValidatedParentChainBlock, toValidateMsgExtractionCount uint64) (*validationEntry, error) { + if lastValidatedParentChainBlock == 0 { // TODO: last validated. + // ending position- bold staker latest posted assertion on chain that it agrees with (l1blockhash)- return nil, errors.New("trying to create validation entry for zero block number") } - preState, err := mv.messageExtractor.GetState(ctx, startPosition-1) + preState, err := mv.messageExtractor.GetState(ctx, lastValidatedParentChainBlock) if err != nil { return nil, err } + // We have already validated message extraction of messages till count toValidateMsgExtractionCount, so can return early + // and wait for block validator to progress the toValidateMsgExtractionCount + if preState.MsgCount >= toValidateMsgExtractionCount { + return nil, nil + } delayedMsgRecordingDB := melrunner.NewRecordingDatabase(mv.arbDb) recordingDAPReaders := melrunner.NewRecordingDAPReaderSource(ctx, mv.dapReaders) - for i := startPosition; ; i++ { + for i := lastValidatedParentChainBlock + 1; ; i++ { header, err := mv.l1client.HeaderByNumber(ctx, new(big.Int).SetUint64(i)) if err != nil { return nil, err } // Awaiting recording implementations of logsFetcher and txsFetcher - state, _, _, _, err := melextraction.ExtractMessages(ctx, preState, header, recordingDAPReaders, delayedMsgRecordingDB, nil, nil) + txsAndLogsFetcher := &DummyTxsAndLogsFetcher{L1client: mv.l1client} + state, _, _, _, err := melextraction.ExtractMessages(ctx, preState, header, recordingDAPReaders, delayedMsgRecordingDB, txsAndLogsFetcher, txsAndLogsFetcher) if err != nil { return nil, fmt.Errorf("error calling melextraction.ExtractMessages in recording mode: %w", err) } @@ -58,7 +150,7 @@ func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, startPosi if state.Hash() != wantState.Hash() { return nil, fmt.Errorf("calculated MEL state hash in recording mode doesn't match the one computed in native mode, parentchainBlocknumber: %d", i) } - if mel.WasMessageExtracted(preState, state) { + if state.MsgCount >= toValidateMsgExtractionCount { break } preState = state @@ -68,5 +160,11 @@ func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, startPosi arbutil.Keccak256PreimageType: delayedMsgRecordingDB.Preimages(), } daprovider.CopyPreimagesInto(preimages, delayedPreimages) - return nil, nil + return &validationEntry{ + Preimages: preimages, + }, nil +} + +func (mv *MELValidator) SendValidationEntry(ctx context.Context, entry *validationEntry) error { + return nil } diff --git a/system_tests/message_extraction_layer_test.go b/system_tests/message_extraction_layer_test.go index ed344b436a..339642e25b 100644 --- a/system_tests/message_extraction_layer_test.go +++ b/system_tests/message_extraction_layer_test.go @@ -19,7 +19,7 @@ import ( "github.com/offchainlabs/nitro/arbcompress" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbnode/mel" - melrunner "github.com/offchainlabs/nitro/arbnode/mel/runner" + "github.com/offchainlabs/nitro/arbnode/mel/runner" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" @@ -27,7 +27,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" - "github.com/offchainlabs/nitro/staker/bold" + "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/testhelpers" ) @@ -816,7 +816,7 @@ func createInitialMELState( Require(t, err) confirmedHash, err := rollup.LatestConfirmed(&bind.CallOpts{}) Require(t, err) - latestConfirmedAssertion, err := bold.ReadBoldAssertionCreationInfo( + latestConfirmedAssertion, err := staker.ReadBoldAssertionCreationInfo( ctx, rollup, client, diff --git a/system_tests/message_extraction_layer_validation_test.go b/system_tests/message_extraction_layer_validation_test.go new file mode 100644 index 0000000000..09b8676021 --- /dev/null +++ b/system_tests/message_extraction_layer_validation_test.go @@ -0,0 +1,215 @@ +package arbtest + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/big" + "math/bits" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/rlp" + + "github.com/offchainlabs/nitro/arbnode/mel" + melextraction "github.com/offchainlabs/nitro/arbnode/mel/extraction" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/staker" +) + +func TestMELValidator_Recording_Preimages(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.L2Info.GenerateAccount("User2") + builder.nodeConfig.BatchPoster.Post4844Blobs = true + builder.nodeConfig.BatchPoster.IgnoreBlobPrice = true + builder.nodeConfig.BatchPoster.MaxDelay = time.Hour // set high max-delay so we can test the delay buffer + builder.nodeConfig.BatchPoster.PollInterval = time.Hour // set a high poll interval to avoid continuous polling + cleanup := builder.Build(t) + defer cleanup() + + // Post a blob batch with a bunch of txs + startBlock, err := builder.L1.Client.BlockNumber(ctx) + Require(t, err) + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanupB() + initialBatchCount := GetBatchCount(t, builder) + var txs types.Transactions + for i := 0; i < 20; i++ { + tx, _ := builder.L2.TransferBalance(t, "Faucet", "User2", big.NewInt(1e12), builder.L2Info) + txs = append(txs, tx) + } + builder.nodeConfig.BatchPoster.MaxDelay = 0 + builder.L2.ConsensusConfigFetcher.Set(builder.nodeConfig) + _, err = builder.L2.ConsensusNode.BatchPoster.MaybePostSequencerBatch(ctx) + Require(t, err) + for _, tx := range txs { + _, err := testClientB.EnsureTxSucceeded(tx) + Require(t, err, "tx not found on second node") + } + CheckBatchCount(t, builder, initialBatchCount+1) + + // Post delayed messages + forceDelayedBatchPosting(t, ctx, builder, testClientB, 10, 0) + + // MEL Validator: create validation entry + blobReaderRegistry := daprovider.NewDAProviderRegistry() + Require(t, blobReaderRegistry.SetupBlobReader(daprovider.NewReaderForBlobReader(builder.L1.L1BlobReader))) + melValidator := staker.NewMELValidator(builder.L2.ConsensusNode.ArbDB, builder.L1.Client, builder.L2.ConsensusNode.MessageExtractor, blobReaderRegistry) + extractedMsgCount, err := builder.L2.ConsensusNode.TxStreamer.GetMessageCount() + Require(t, err) + entry, err := melValidator.CreateNextValidationEntry(ctx, startBlock, uint64(extractedMsgCount)) + Require(t, err) + + // Represents running of MEL validation using preimages in wasm mode. TODO: remove this once we have validation wired + state, err := builder.L2.ConsensusNode.MessageExtractor.GetState(ctx, startBlock) + Require(t, err) + preimagesBasedDelayedDb := &delayedMessageDatabase{ + preimageResolver: &testPreimageResolver{ + preimages: entry.Preimages[arbutil.Keccak256PreimageType], + }, + } + preimagesBasedDapReaders := daprovider.NewDAProviderRegistry() + Require(t, preimagesBasedDapReaders.SetupBlobReader(daprovider.NewReaderForBlobReader(&blobPreimageReader{entry.Preimages}))) + for state.MsgCount < uint64(extractedMsgCount) { + header, err := builder.L1.Client.HeaderByNumber(ctx, new(big.Int).SetUint64(state.ParentChainBlockNumber+1)) + Require(t, err) + // Awaiting recording implementations of logsFetcher and txsFetcher + txsAndLogsFetcher := &staker.DummyTxsAndLogsFetcher{L1client: builder.L1.Client} + postState, _, _, _, err := melextraction.ExtractMessages(ctx, state, header, preimagesBasedDapReaders, preimagesBasedDelayedDb, txsAndLogsFetcher, txsAndLogsFetcher) + Require(t, err) + wantState, err := builder.L2.ConsensusNode.MessageExtractor.GetState(ctx, state.ParentChainBlockNumber+1) + Require(t, err) + if postState.Hash() != wantState.Hash() { + t.Fatalf("MEL state mismatch") + } + state = postState + } +} + +// TODO: Code from cmd/mel-replay and cmd/replay packages for verification of preimages, should be deleted once we have validation wired +type blobPreimageReader struct { + preimages daprovider.PreimagesMap +} + +func (r *blobPreimageReader) Initialize(ctx context.Context) error { return nil } + +func (r *blobPreimageReader) GetBlobs( + ctx context.Context, + batchBlockHash common.Hash, + versionedHashes []common.Hash, +) ([]kzg4844.Blob, error) { + var blobs []kzg4844.Blob + for _, h := range versionedHashes { + var blob kzg4844.Blob + if _, ok := r.preimages[arbutil.EthVersionedHashPreimageType]; !ok { + return nil, errors.New("no blobs found in preimages") + } + preimage, ok := r.preimages[arbutil.EthVersionedHashPreimageType][h] + if !ok { + return nil, errors.New("no blobs found in preimages") + } + if len(preimage) != len(blob) { + return nil, fmt.Errorf("for blob %v got back preimage of length %v but expected blob length %v", h, len(preimage), len(blob)) + } + copy(blob[:], preimage) + blobs = append(blobs, blob) + } + return blobs, nil +} + +type testPreimageResolver struct { + preimages map[common.Hash][]byte +} + +func (r *testPreimageResolver) ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) { + if preimageType != arbutil.Keccak256PreimageType { + return nil, fmt.Errorf("unsupported preimageType: %d", preimageType) + } + if preimage, ok := r.preimages[hash]; ok { + return preimage, nil + } + return nil, fmt.Errorf("preimage not found for hash: %v", hash) +} + +type preimageResolver interface { + ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) +} + +type delayedMessageDatabase struct { + preimageResolver preimageResolver +} + +func (d *delayedMessageDatabase) ReadDelayedMessage( + ctx context.Context, + state *mel.State, + msgIndex uint64, +) (*mel.DelayedInboxMessage, error) { + originalMsgIndex := msgIndex + totalMsgsSeen := state.DelayedMessagesSeen + if msgIndex >= totalMsgsSeen { + return nil, fmt.Errorf("index %d out of range, total delayed messages seen: %d", msgIndex, totalMsgsSeen) + } + treeSize := nextPowerOfTwo(totalMsgsSeen) + merkleDepth := bits.TrailingZeros64(treeSize) + + // Start traversal from root, which is the delayed messages seen root. + merkleRoot := state.DelayedMessagesSeenRoot + currentHash := merkleRoot + currentDepth := merkleDepth + + // Traverse down the Merkle tree to find the leaf at the given index. + for currentDepth > 0 { + // Resolve the preimage to get left and right children. + result, err := d.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentHash) + if err != nil { + return nil, err + } + if len(result) != 64 { + return nil, fmt.Errorf("invalid preimage result length: %d, wanted 64", len(result)) + } + // Split result into left and right halves. + mid := len(result) / 2 + left := result[:mid] + right := result[mid:] + + // Calculate which subtree contains our index. + subtreeSize := uint64(1) << (currentDepth - 1) + if msgIndex < subtreeSize { + // Go left. + currentHash = common.BytesToHash(left) + } else { + // Go right. + currentHash = common.BytesToHash(right) + msgIndex -= subtreeSize + } + currentDepth-- + } + // At this point, currentHash should be the hash of the delayed message. + delayedMsgBytes, err := d.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentHash) + if err != nil { + return nil, err + } + delayedMessage := new(mel.DelayedInboxMessage) + if err = rlp.Decode(bytes.NewBuffer(delayedMsgBytes), &delayedMessage); err != nil { + return nil, fmt.Errorf("failed to decode delayed message at index %d: %w", originalMsgIndex, err) + } + return delayedMessage, nil +} + +func nextPowerOfTwo(n uint64) uint64 { + if n == 0 { + return 1 + } + if n&(n-1) == 0 { + return n + } + return 1 << bits.Len64(n) +} From 2cea9a29b601e5775ed039c9c97844d6a6ab6bed Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 11:52:11 +0530 Subject: [PATCH 12/42] add changelog --- changelog/ganeshvanahalli-nit-4142.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 changelog/ganeshvanahalli-nit-4142.md diff --git a/changelog/ganeshvanahalli-nit-4142.md b/changelog/ganeshvanahalli-nit-4142.md new file mode 100644 index 0000000000..b95a880f8e --- /dev/null +++ b/changelog/ganeshvanahalli-nit-4142.md @@ -0,0 +1,2 @@ +### Added + - Introduces MEL validator \ No newline at end of file From 2a6119d2e452622aa7dbdf371f9e0223e4afcf21 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 12:20:34 +0530 Subject: [PATCH 13/42] minor fixes --- arbnode/mel/recording/dap_reader_source.go | 24 +++++++++---------- staker/bold_assertioncreation.go | 2 +- staker/mel_validator.go | 7 +++--- ...essage_extraction_layer_validation_test.go | 6 ++--- 4 files changed, 20 insertions(+), 19 deletions(-) diff --git a/arbnode/mel/recording/dap_reader_source.go b/arbnode/mel/recording/dap_reader_source.go index bc6ad1db29..24c8f05422 100644 --- a/arbnode/mel/recording/dap_reader_source.go +++ b/arbnode/mel/recording/dap_reader_source.go @@ -11,16 +11,16 @@ import ( "github.com/offchainlabs/nitro/validator" ) -// RecordingDAPReader implements recording of preimages when melextraction.ExtractMessages function is called by MEL validator for creation +// DAPReader implements recording of preimages when melextraction.ExtractMessages function is called by MEL validator for creation // of validation entry. Since ExtractMessages function would use daprovider.Reader interface to fetch the sequencer batch via RecoverPayload // we implement collecting of preimages as well in the same method and record it -type RecordingDAPReader struct { +type DAPReader struct { validatorCtx context.Context reader daprovider.Reader preimages daprovider.PreimagesMap } -func (r *RecordingDAPReader) RecoverPayload(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadResult] { +func (r *DAPReader) RecoverPayload(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadResult] { promise := r.reader.RecoverPayloadAndPreimages(batchNum, batchBlockHash, sequencerMsg) result, err := promise.Await(r.validatorCtx) if err != nil { @@ -30,38 +30,38 @@ func (r *RecordingDAPReader) RecoverPayload(batchNum uint64, batchBlockHash comm return containers.NewReadyPromise(daprovider.PayloadResult{Payload: result.Payload}, nil) } -func (r *RecordingDAPReader) CollectPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PreimagesResult] { +func (r *DAPReader) CollectPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PreimagesResult] { return r.reader.CollectPreimages(batchNum, batchBlockHash, sequencerMsg) } -func (r *RecordingDAPReader) RecoverPayloadAndPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadAndPreimagesResult] { +func (r *DAPReader) RecoverPayloadAndPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadAndPreimagesResult] { return r.reader.RecoverPayloadAndPreimages(batchNum, batchBlockHash, sequencerMsg) } -// RecordingDAPReaderSource is used for recording preimages related to sequencer batches stored by da providers, given a +// DAPReaderSource is used for recording preimages related to sequencer batches stored by da providers, given a // DapReaderSource it implements GetReader method to return a daprovider.Reader interface that records preimgaes. It takes // in a context variable (corresponding to creation of validation entry) from the MEL validator -type RecordingDAPReaderSource struct { +type DAPReaderSource struct { validatorCtx context.Context dapReaders arbstate.DapReaderSource preimages daprovider.PreimagesMap } -func NewRecordingDAPReaderSource(validatorCtx context.Context, dapReaders arbstate.DapReaderSource) *RecordingDAPReaderSource { - return &RecordingDAPReaderSource{ +func NewDAPReaderSource(validatorCtx context.Context, dapReaders arbstate.DapReaderSource) *DAPReaderSource { + return &DAPReaderSource{ validatorCtx: validatorCtx, dapReaders: dapReaders, preimages: make(daprovider.PreimagesMap), } } -func (s *RecordingDAPReaderSource) GetReader(headerByte byte) daprovider.Reader { +func (s *DAPReaderSource) GetReader(headerByte byte) daprovider.Reader { reader := s.dapReaders.GetReader(headerByte) - return &RecordingDAPReader{ + return &DAPReader{ validatorCtx: s.validatorCtx, reader: reader, preimages: s.preimages, } } -func (s *RecordingDAPReaderSource) Preimages() daprovider.PreimagesMap { return s.preimages } +func (s *DAPReaderSource) Preimages() daprovider.PreimagesMap { return s.preimages } diff --git a/staker/bold_assertioncreation.go b/staker/bold_assertioncreation.go index 44e7d95220..a4739daaa5 100644 --- a/staker/bold_assertioncreation.go +++ b/staker/bold_assertioncreation.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/bold/chain-abstraction" + "github.com/offchainlabs/nitro/bold/protocol" "github.com/offchainlabs/nitro/solgen/go/rollupgen" ) diff --git a/staker/mel_validator.go b/staker/mel_validator.go index 04a68ca789..f3699a8764 100644 --- a/staker/mel_validator.go +++ b/staker/mel_validator.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode/mel/extraction" + "github.com/offchainlabs/nitro/arbnode/mel/recording" "github.com/offchainlabs/nitro/arbnode/mel/runner" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" @@ -130,8 +131,8 @@ func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, lastValid if preState.MsgCount >= toValidateMsgExtractionCount { return nil, nil } - delayedMsgRecordingDB := melrunner.NewRecordingDatabase(mv.arbDb) - recordingDAPReaders := melrunner.NewRecordingDAPReaderSource(ctx, mv.dapReaders) + delayedMsgRecordingDB := melrecording.NewDelayedMsgDatabase(mv.arbDb) + recordingDAPReaders := melrecording.NewDAPReaderSource(ctx, mv.dapReaders) for i := lastValidatedParentChainBlock + 1; ; i++ { header, err := mv.l1client.HeaderByNumber(ctx, new(big.Int).SetUint64(i)) if err != nil { @@ -139,7 +140,7 @@ func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, lastValid } // Awaiting recording implementations of logsFetcher and txsFetcher txsAndLogsFetcher := &DummyTxsAndLogsFetcher{L1client: mv.l1client} - state, _, _, _, err := melextraction.ExtractMessages(ctx, preState, header, recordingDAPReaders, delayedMsgRecordingDB, txsAndLogsFetcher, txsAndLogsFetcher) + state, _, _, _, err := melextraction.ExtractMessages(ctx, preState, header, recordingDAPReaders, delayedMsgRecordingDB, txsAndLogsFetcher, txsAndLogsFetcher, nil) if err != nil { return nil, fmt.Errorf("error calling melextraction.ExtractMessages in recording mode: %w", err) } diff --git a/system_tests/message_extraction_layer_validation_test.go b/system_tests/message_extraction_layer_validation_test.go index 09b8676021..42a653e987 100644 --- a/system_tests/message_extraction_layer_validation_test.go +++ b/system_tests/message_extraction_layer_validation_test.go @@ -16,7 +16,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbnode/mel" - melextraction "github.com/offchainlabs/nitro/arbnode/mel/extraction" + "github.com/offchainlabs/nitro/arbnode/mel/extraction" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/staker" @@ -62,7 +62,7 @@ func TestMELValidator_Recording_Preimages(t *testing.T) { // MEL Validator: create validation entry blobReaderRegistry := daprovider.NewDAProviderRegistry() Require(t, blobReaderRegistry.SetupBlobReader(daprovider.NewReaderForBlobReader(builder.L1.L1BlobReader))) - melValidator := staker.NewMELValidator(builder.L2.ConsensusNode.ArbDB, builder.L1.Client, builder.L2.ConsensusNode.MessageExtractor, blobReaderRegistry) + melValidator := staker.NewMELValidator(builder.L2.ConsensusNode.ConsensusDB, builder.L1.Client, builder.L2.ConsensusNode.MessageExtractor, blobReaderRegistry) extractedMsgCount, err := builder.L2.ConsensusNode.TxStreamer.GetMessageCount() Require(t, err) entry, err := melValidator.CreateNextValidationEntry(ctx, startBlock, uint64(extractedMsgCount)) @@ -83,7 +83,7 @@ func TestMELValidator_Recording_Preimages(t *testing.T) { Require(t, err) // Awaiting recording implementations of logsFetcher and txsFetcher txsAndLogsFetcher := &staker.DummyTxsAndLogsFetcher{L1client: builder.L1.Client} - postState, _, _, _, err := melextraction.ExtractMessages(ctx, state, header, preimagesBasedDapReaders, preimagesBasedDelayedDb, txsAndLogsFetcher, txsAndLogsFetcher) + postState, _, _, _, err := melextraction.ExtractMessages(ctx, state, header, preimagesBasedDapReaders, preimagesBasedDelayedDb, txsAndLogsFetcher, txsAndLogsFetcher, nil) Require(t, err) wantState, err := builder.L2.ConsensusNode.MessageExtractor.GetState(ctx, state.ParentChainBlockNumber+1) Require(t, err) From 9ab227c9ebdb96bca1dbbde3236714979893877c Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 12:45:04 +0530 Subject: [PATCH 14/42] remove debug statement --- cmd/mel-replay/receipt_fetcher.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/mel-replay/receipt_fetcher.go b/cmd/mel-replay/receipt_fetcher.go index 64ce26633f..7aa83b39fd 100644 --- a/cmd/mel-replay/receipt_fetcher.go +++ b/cmd/mel-replay/receipt_fetcher.go @@ -83,7 +83,6 @@ func (rf *receiptFetcherForBlock) LogsForBlockHashAllLogs(ctx context.Context, p return nil, err } entries, indices := collectTrieEntries(receiptsTrie) - fmt.Println("indices ", indices, len(entries)) rawReceipts := reconstructOrderedData(entries, indices) receipts, err := decodeReceiptsData(rawReceipts) if err != nil { From 063ce8e5710f5430422fddb338ac52eb2ff0f626 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 13:04:32 +0530 Subject: [PATCH 15/42] refactor --- arbnode/mel/recording/receipt_recorder.go | 46 +++++++++++------------ 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index 5cc15613c1..c9d71554e7 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -40,8 +40,8 @@ func NewReceiptRecorder( } } -func (lr *ReceiptRecorder) Initialize(ctx context.Context) error { - block, err := lr.parentChainReader.BlockByHash(ctx, lr.parentChainBlockHash) +func (rr *ReceiptRecorder) Initialize(ctx context.Context) error { + block, err := rr.parentChainReader.BlockByHash(ctx, rr.parentChainBlockHash) if err != nil { return err } @@ -52,12 +52,12 @@ func (lr *ReceiptRecorder) Initialize(ctx context.Context) error { var receipts []*types.Receipt txs := block.Body().Transactions for i, tx := range txs { - receipt, err := lr.parentChainReader.TransactionReceipt(ctx, tx.Hash()) + receipt, err := rr.parentChainReader.TransactionReceipt(ctx, tx.Hash()) if err != nil { return fmt.Errorf("error fetching receipt for tx: %v", tx.Hash()) } receipts = append(receipts, receipt) - lr.logs = append(lr.logs, receipt.Logs...) + rr.logs = append(rr.logs, receipt.Logs...) // #nosec G115 indexBytes, err := rlp.EncodeToBytes(uint64(i)) if err != nil { @@ -84,29 +84,29 @@ func (lr *ReceiptRecorder) Initialize(ctx context.Context) error { if err := tdb.Commit(root, false); err != nil { return fmt.Errorf("failed to commit database: %w", err) } - lr.receipts = receipts - lr.trieDB = tdb - lr.blockReceiptHash = root + rr.receipts = receipts + rr.trieDB = tdb + rr.blockReceiptHash = root return nil } -func (lr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockHash common.Hash, txIndex uint) ([]*types.Log, error) { - if lr.trieDB == nil { +func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockHash common.Hash, txIndex uint) ([]*types.Log, error) { + if rr.trieDB == nil { return nil, errors.New("TransactionRecorder not initialized") } - if lr.parentChainBlockHash != parentChainBlockHash { - return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", lr.parentChainBlockHash, parentChainBlockHash) + if rr.parentChainBlockHash != parentChainBlockHash { + return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", rr.parentChainBlockHash, parentChainBlockHash) } // #nosec G115 - if int(txIndex) >= len(lr.receipts) { + if int(txIndex) >= len(rr.receipts) { return nil, fmt.Errorf("index out of range: %d", txIndex) } recordingDB := &TxsAndReceiptsDatabase{ - underlying: lr.trieDB, - recorder: daprovider.RecordPreimagesTo(lr.preimages), // RecordingDB will record relevant preimages into tr.preimages + underlying: rr.trieDB, + recorder: daprovider.RecordPreimagesTo(rr.preimages), // RecordingDB will record relevant preimages into tr.preimages } recordingTDB := triedb.NewDatabase(recordingDB, nil) - receiptsTrie, err := trie.New(trie.TrieID(lr.blockReceiptHash), recordingTDB) + receiptsTrie, err := trie.New(trie.TrieID(rr.blockReceiptHash), recordingTDB) if err != nil { return nil, fmt.Errorf("failed to create trie: %w", err) } @@ -123,10 +123,10 @@ func (lr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH return nil, fmt.Errorf("failed to unmarshal receipt: %w", err) } // Add the receipt marshaled binary by hash to the preimages map - if _, ok := lr.preimages[arbutil.Keccak256PreimageType]; !ok { - lr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) + if _, ok := rr.preimages[arbutil.Keccak256PreimageType]; !ok { + rr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) } - lr.preimages[arbutil.Keccak256PreimageType][crypto.Keccak256Hash(receiptBytes)] = receiptBytes + rr.preimages[arbutil.Keccak256PreimageType][crypto.Keccak256Hash(receiptBytes)] = receiptBytes // Fill in the TxIndex (give as input to this method) into the logs so that Tx recording // is possible. This field is one of the derived fields of Log hence won't be stored in trie. // @@ -137,14 +137,14 @@ func (lr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH return receipt.Logs, nil } -func (lr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { - if lr.trieDB == nil { +func (rr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { + if rr.trieDB == nil { return nil, errors.New("TransactionRecorder not initialized") } - if lr.parentChainBlockHash == parentChainBlockHash { - return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", lr.parentChainBlockHash, parentChainBlockHash) + if rr.parentChainBlockHash != parentChainBlockHash { + return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", rr.parentChainBlockHash, parentChainBlockHash) } - return lr.logs, nil + return rr.logs, nil } func (tr *ReceiptRecorder) GetPreimages() daprovider.PreimagesMap { return tr.preimages } From c19f416e37bdaeec63ae33513937750f811d4464 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 13:07:33 +0530 Subject: [PATCH 16/42] code refactor --- arbnode/mel/recording/receipt_recorder.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index c3d7c75eaa..c4a6d34df1 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -138,7 +138,7 @@ func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH for _, log := range receipt.Logs { log.TxIndex = txIndex } - lr.relevantLogsTxIndexes = append(lr.relevantLogsTxIndexes, txIndex) + rr.relevantLogsTxIndexes = append(rr.relevantLogsTxIndexes, txIndex) return receipt.Logs, nil } @@ -152,11 +152,17 @@ func (rr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBloc return rr.logs, nil } -func (tr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { +func (rr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { + if len(rr.relevantLogsTxIndexes) == 0 { + return nil, nil + } var buf bytes.Buffer - if err := rlp.Encode(&buf, tr.relevantLogsTxIndexes); err != nil { + if err := rlp.Encode(&buf, rr.relevantLogsTxIndexes); err != nil { return nil, err } - tr.preimages[arbutil.Keccak256PreimageType][RELEVANT_LOGS_TXINDEXES_KEY] = buf.Bytes() - return tr.preimages, nil + if _, ok := rr.preimages[arbutil.Keccak256PreimageType]; !ok { + rr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) + } + rr.preimages[arbutil.Keccak256PreimageType][RELEVANT_LOGS_TXINDEXES_KEY] = buf.Bytes() + return rr.preimages, nil } From 9457d22190635878d71cdcef8bbd7e036986b92e Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 13:24:21 +0530 Subject: [PATCH 17/42] update impl of GetPreimages --- arbnode/mel/recording/receipt_recorder.go | 11 ++++------- cmd/mel-replay/receipt_fetcher.go | 7 ++----- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index c4a6d34df1..38207ebff6 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -19,9 +19,6 @@ import ( "github.com/offchainlabs/nitro/daprovider" ) -// maps to an array of uints representing the relevant txIndexes of receipts needed for message extraction -var RELEVANT_LOGS_TXINDEXES_KEY common.Hash = common.HexToHash("123534") - type ReceiptRecorder struct { parentChainReader BlockReader parentChainBlockHash common.Hash @@ -152,10 +149,10 @@ func (rr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBloc return rr.logs, nil } +// GetPreimages returns the preimages of recorded receipts, and also adds the array of relevant tx indexes +// to the preimages map as a value to the key represented by parentChainBlockHash. +// TODO: If we use parentChainBlockHash as the key for header- then we need to modify this implementation func (rr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { - if len(rr.relevantLogsTxIndexes) == 0 { - return nil, nil - } var buf bytes.Buffer if err := rlp.Encode(&buf, rr.relevantLogsTxIndexes); err != nil { return nil, err @@ -163,6 +160,6 @@ func (rr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { if _, ok := rr.preimages[arbutil.Keccak256PreimageType]; !ok { rr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) } - rr.preimages[arbutil.Keccak256PreimageType][RELEVANT_LOGS_TXINDEXES_KEY] = buf.Bytes() + rr.preimages[arbutil.Keccak256PreimageType][rr.parentChainBlockHash] = buf.Bytes() return rr.preimages, nil } diff --git a/cmd/mel-replay/receipt_fetcher.go b/cmd/mel-replay/receipt_fetcher.go index 7aa83b39fd..d52727df9b 100644 --- a/cmd/mel-replay/receipt_fetcher.go +++ b/cmd/mel-replay/receipt_fetcher.go @@ -18,9 +18,6 @@ import ( "github.com/offchainlabs/nitro/arbutil" ) -// maps to an array of uints representing the relevant txIndexes of receipts needed for message extraction -var RELEVANT_LOGS_TXINDEXES_KEY common.Hash = common.HexToHash("123534") - type receiptFetcherForBlock struct { header *types.Header preimageResolver preimageResolver @@ -45,12 +42,12 @@ func (rf *receiptFetcherForBlock) LogsForTxIndex(ctx context.Context, parentChai } // LogsForBlockHash first gets the txIndexes corresponding to the relevant logs by reading -// RELEVANT_LOGS_TXINDEXES_KEY from the preimages and then fetches logs for each of these txIndexes +// the key `parentChainBlockHash` from the preimages and then fetches logs for each of these txIndexes func (rf *receiptFetcherForBlock) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { if rf.header.Hash() != parentChainBlockHash { return nil, errors.New("parentChainBlockHash mismatch") } - txIndexData, err := rf.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, RELEVANT_LOGS_TXINDEXES_KEY) + txIndexData, err := rf.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, rf.header.Hash()) if err != nil { return nil, err } From 10aa51df2d0592d437be6dcecef93a66d63b5378 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 21:38:20 +0530 Subject: [PATCH 18/42] correctly record preimages for relevant logs --- arbnode/mel/extraction/batch_lookup.go | 5 + .../mel/extraction/delayed_message_lookup.go | 8 + arbnode/mel/recording/receipt_recorder.go | 38 +- .../mel/runner/logs_and_headers_fetcher.go | 5 + arbnode/mel/state.go | 23 +- cmd/mel-replay/receipt_fetcher.go | 2 + staker/mel_validator.go | 68 ++-- .../message_extraction_layer_utils.go | 337 ++++++++++++++++++ ...essage_extraction_layer_validation_test.go | 141 +------- 9 files changed, 435 insertions(+), 192 deletions(-) create mode 100644 system_tests/message_extraction_layer_utils.go diff --git a/arbnode/mel/extraction/batch_lookup.go b/arbnode/mel/extraction/batch_lookup.go index 06c6692793..4d535317c8 100644 --- a/arbnode/mel/extraction/batch_lookup.go +++ b/arbnode/mel/extraction/batch_lookup.go @@ -58,6 +58,11 @@ func ParseBatchesFromBlock( return nil, nil, fmt.Errorf("error fetching tx by hash: %v in ParseBatchesFromBlock: %w ", log.TxHash, err) } + // Record this log for MEL validation + if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), log.TxIndex); err != nil { + return nil, nil, fmt.Errorf("error recording relevant logs: %w", err) + } + batch := &mel.SequencerInboxBatch{ BlockHash: log.BlockHash, ParentChainBlockNumber: log.BlockNumber, diff --git a/arbnode/mel/extraction/delayed_message_lookup.go b/arbnode/mel/extraction/delayed_message_lookup.go index c7bc40cacf..a7da8b16bb 100644 --- a/arbnode/mel/extraction/delayed_message_lookup.go +++ b/arbnode/mel/extraction/delayed_message_lookup.go @@ -37,6 +37,10 @@ func parseDelayedMessagesFromBlock( // On Arbitrum One, this is the bridge contract which emits a MessageDelivered event. if log.Address == melState.DelayedMessagePostingTargetAddress { relevantLogs = append(relevantLogs, log) + // Record this log for MEL validation + if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), log.TxIndex); err != nil { + return nil, fmt.Errorf("error recording relevant logs: %w", err) + } } } if len(relevantLogs) > 0 { @@ -76,6 +80,10 @@ func parseDelayedMessagesFromBlock( return nil, err } messageData[common.BigToHash(msgNum)] = msg + // Record this log for MEL validation + if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), inboxMsgLog.TxIndex); err != nil { + return nil, fmt.Errorf("error recording relevant logs: %w", err) + } } for i, parsedLog := range messageDeliveredEvents { msgKey := common.BigToHash(parsedLog.MessageIndex) diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index 38207ebff6..4ed1a73162 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -20,14 +20,15 @@ import ( ) type ReceiptRecorder struct { - parentChainReader BlockReader - parentChainBlockHash common.Hash - preimages daprovider.PreimagesMap - receipts []*types.Receipt - logs []*types.Log - relevantLogsTxIndexes []uint - trieDB *triedb.Database - blockReceiptHash common.Hash + parentChainReader BlockReader + parentChainBlockHash common.Hash + parentChainBlockNumber uint64 + preimages daprovider.PreimagesMap + receipts []*types.Receipt + logs []*types.Log + relevantLogsTxIndexes map[uint]struct{} + trieDB *triedb.Database + blockReceiptHash common.Hash } func NewReceiptRecorder( @@ -35,9 +36,10 @@ func NewReceiptRecorder( parentChainBlockHash common.Hash, ) *ReceiptRecorder { return &ReceiptRecorder{ - parentChainReader: parentChainReader, - parentChainBlockHash: parentChainBlockHash, - preimages: make(daprovider.PreimagesMap), + parentChainReader: parentChainReader, + parentChainBlockHash: parentChainBlockHash, + preimages: make(daprovider.PreimagesMap), + relevantLogsTxIndexes: make(map[uint]struct{}), } } @@ -88,6 +90,7 @@ func (rr *ReceiptRecorder) Initialize(ctx context.Context) error { rr.receipts = receipts rr.trieDB = tdb rr.blockReceiptHash = root + rr.parentChainBlockNumber = block.NumberU64() return nil } @@ -98,6 +101,9 @@ func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH if rr.parentChainBlockHash != parentChainBlockHash { return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", rr.parentChainBlockHash, parentChainBlockHash) } + if _, recorded := rr.relevantLogsTxIndexes[txIndex]; recorded { + return rr.receipts[txIndex].Logs, nil + } // #nosec G115 if int(txIndex) >= len(rr.receipts) { return nil, fmt.Errorf("index out of range: %d", txIndex) @@ -134,8 +140,10 @@ func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH // We use this same trick in validation as well in order to link a tx with its logs for _, log := range receipt.Logs { log.TxIndex = txIndex + log.BlockHash = parentChainBlockHash + log.BlockNumber = rr.parentChainBlockNumber } - rr.relevantLogsTxIndexes = append(rr.relevantLogsTxIndexes, txIndex) + rr.relevantLogsTxIndexes[txIndex] = struct{}{} return receipt.Logs, nil } @@ -153,8 +161,12 @@ func (rr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBloc // to the preimages map as a value to the key represented by parentChainBlockHash. // TODO: If we use parentChainBlockHash as the key for header- then we need to modify this implementation func (rr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { + var relevantLogsTxIndexes []uint + for k := range rr.relevantLogsTxIndexes { + relevantLogsTxIndexes = append(relevantLogsTxIndexes, k) + } var buf bytes.Buffer - if err := rlp.Encode(&buf, rr.relevantLogsTxIndexes); err != nil { + if err := rlp.Encode(&buf, relevantLogsTxIndexes); err != nil { return nil, err } if _, ok := rr.preimages[arbutil.Keccak256PreimageType]; !ok { diff --git a/arbnode/mel/runner/logs_and_headers_fetcher.go b/arbnode/mel/runner/logs_and_headers_fetcher.go index 2fb0a04104..039ccd99b7 100644 --- a/arbnode/mel/runner/logs_and_headers_fetcher.go +++ b/arbnode/mel/runner/logs_and_headers_fetcher.go @@ -149,6 +149,11 @@ func (f *logsAndHeadersFetcher) fetchDelayedMessageLogs(ctx context.Context, fro } for _, log := range logs { f.logsByBlockHash[log.BlockHash] = append(f.logsByBlockHash[log.BlockHash], &log) + // Not necessary in native mode but needed to make the behavior consistent with recording mode + if _, ok := f.logsByTxIndex[log.BlockHash]; !ok { + f.logsByTxIndex[log.BlockHash] = make(map[uint][]*types.Log) + } + f.logsByTxIndex[log.BlockHash][log.TxIndex] = append(f.logsByTxIndex[log.BlockHash][log.TxIndex], &log) } return nil } diff --git a/arbnode/mel/state.go b/arbnode/mel/state.go index a4ab059c4a..7d9c497c06 100644 --- a/arbnode/mel/state.go +++ b/arbnode/mel/state.go @@ -4,10 +4,12 @@ import ( "context" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/merkleAccumulator" + "github.com/offchainlabs/nitro/util/arbmath" ) // State defines the main struct describing the results of processing a single parent @@ -77,7 +79,26 @@ type MessageConsumer interface { } func (s *State) Hash() common.Hash { - return common.Hash{} + var delayedMerklePartialsBytes []byte + for _, partials := range s.DelayedMessageMerklePartials { + delayedMerklePartialsBytes = append(delayedMerklePartialsBytes, partials.Bytes()...) + } + hash := crypto.Keccak256( + arbmath.Uint16ToBytes(s.Version), + arbmath.UintToBytes(s.ParentChainId), + arbmath.UintToBytes(s.ParentChainBlockNumber), + s.BatchPostingTargetAddress.Bytes(), + s.DelayedMessagePostingTargetAddress.Bytes(), + s.ParentChainBlockHash.Bytes(), + s.ParentChainPreviousBlockHash.Bytes(), + s.MessageAccumulator.Bytes(), + s.DelayedMessagesSeenRoot.Bytes(), + arbmath.UintToBytes(s.MsgCount), + arbmath.UintToBytes(s.BatchCount), + arbmath.UintToBytes(s.DelayedMessagesRead), + arbmath.UintToBytes(s.DelayedMessagesSeen), + ) + return crypto.Keccak256Hash(hash, delayedMerklePartialsBytes) } // Performs a deep clone of the state struct to prevent any unintended diff --git a/cmd/mel-replay/receipt_fetcher.go b/cmd/mel-replay/receipt_fetcher.go index d52727df9b..cb6dd20943 100644 --- a/cmd/mel-replay/receipt_fetcher.go +++ b/cmd/mel-replay/receipt_fetcher.go @@ -37,6 +37,8 @@ func (rf *receiptFetcherForBlock) LogsForTxIndex(ctx context.Context, parentChai // This is needed to enable fetching corresponding tx from the txFetcher for _, log := range receipt.Logs { log.TxIndex = txIndex + log.BlockHash = rf.header.Hash() + log.BlockNumber = rf.header.Number.Uint64() } return receipt.Logs, nil } diff --git a/staker/mel_validator.go b/staker/mel_validator.go index f3699a8764..981f743610 100644 --- a/staker/mel_validator.go +++ b/staker/mel_validator.go @@ -9,11 +9,9 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind/v2" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode/mel/extraction" "github.com/offchainlabs/nitro/arbnode/mel/recording" @@ -23,46 +21,14 @@ import ( "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/util/stopwaiter" + "github.com/offchainlabs/nitro/validator" ) -// dummyTxsAndLogsFetcher is for testing purposes. TODO: remove once we have preimages recorder implementations -type DummyTxsAndLogsFetcher struct { - L1client *ethclient.Client - receipts types.Receipts -} - -func (d *DummyTxsAndLogsFetcher) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { - receipts, err := d.L1client.BlockReceipts(ctx, rpc.BlockNumberOrHashWithHash(parentChainBlockHash, false)) - if err != nil { - return nil, err - } - var logs []*types.Log - for _, receipt := range receipts { - logs = append(logs, receipt.Logs...) - } - d.receipts = receipts - return logs, nil -} - -func (d *DummyTxsAndLogsFetcher) LogsForTxIndex(ctx context.Context, parentChainBlockHash common.Hash, txIndex uint) ([]*types.Log, error) { - // #nosec G115 - if d.receipts.Len() < int(txIndex+1) { - return nil, fmt.Errorf("insufficient number of receipts: %d, txIndex: %d", d.receipts.Len(), txIndex) - } - receipt := d.receipts[txIndex] - return receipt.Logs, nil -} - -func (d *DummyTxsAndLogsFetcher) TransactionByLog(ctx context.Context, log *types.Log) (*types.Transaction, error) { - tx, _, err := d.L1client.TransactionByHash(ctx, log.TxHash) - return tx, err -} - type MELValidator struct { stopwaiter.StopWaiter arbDb ethdb.KeyValueStore - l1client *ethclient.Client + l1Client *ethclient.Client boldStakerAddr common.Address rollupAddr common.Address @@ -74,10 +40,10 @@ type MELValidator struct { lastValidatedParentChainBlock uint64 } -func NewMELValidator(arbDb ethdb.KeyValueStore, l1client *ethclient.Client, messageExtractor *melrunner.MessageExtractor, dapReaders arbstate.DapReaderSource) *MELValidator { +func NewMELValidator(arbDb ethdb.KeyValueStore, l1Client *ethclient.Client, messageExtractor *melrunner.MessageExtractor, dapReaders arbstate.DapReaderSource) *MELValidator { return &MELValidator{ arbDb: arbDb, - l1client: l1client, + l1Client: l1Client, messageExtractor: messageExtractor, dapReaders: dapReaders, } @@ -90,7 +56,7 @@ func (mv *MELValidator) Start(ctx context.Context) { log.Error("MEL validator: Error fetching latest staked assertion hash", "err", err) return 0 } - latestStakedAssertion, err := ReadBoldAssertionCreationInfo(ctx, mv.rollup, mv.l1client, mv.rollupAddr, latestStaked) + latestStakedAssertion, err := ReadBoldAssertionCreationInfo(ctx, mv.rollup, mv.l1Client, mv.rollupAddr, latestStaked) if err != nil { log.Error("MEL validator: Error fetching latest staked assertion creation info", "err", err) return 0 @@ -133,14 +99,21 @@ func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, lastValid } delayedMsgRecordingDB := melrecording.NewDelayedMsgDatabase(mv.arbDb) recordingDAPReaders := melrecording.NewDAPReaderSource(ctx, mv.dapReaders) + txsAndReceiptsPreimages := make(daprovider.PreimagesMap) for i := lastValidatedParentChainBlock + 1; ; i++ { - header, err := mv.l1client.HeaderByNumber(ctx, new(big.Int).SetUint64(i)) + header, err := mv.l1Client.HeaderByNumber(ctx, new(big.Int).SetUint64(i)) if err != nil { return nil, err } - // Awaiting recording implementations of logsFetcher and txsFetcher - txsAndLogsFetcher := &DummyTxsAndLogsFetcher{L1client: mv.l1client} - state, _, _, _, err := melextraction.ExtractMessages(ctx, preState, header, recordingDAPReaders, delayedMsgRecordingDB, txsAndLogsFetcher, txsAndLogsFetcher, nil) + txsRecorder := melrecording.NewTransactionRecorder(mv.l1Client, header.Hash()) + if err := txsRecorder.Initialize(ctx); err != nil { + return nil, err + } + receiptsRecorder := melrecording.NewReceiptRecorder(mv.l1Client, header.Hash()) + if err := receiptsRecorder.Initialize(ctx); err != nil { + return nil, err + } + state, _, _, _, err := melextraction.ExtractMessages(ctx, preState, header, recordingDAPReaders, delayedMsgRecordingDB, txsRecorder, receiptsRecorder, nil) if err != nil { return nil, fmt.Errorf("error calling melextraction.ExtractMessages in recording mode: %w", err) } @@ -151,6 +124,12 @@ func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, lastValid if state.Hash() != wantState.Hash() { return nil, fmt.Errorf("calculated MEL state hash in recording mode doesn't match the one computed in native mode, parentchainBlocknumber: %d", i) } + validator.CopyPreimagesInto(txsAndReceiptsPreimages, txsRecorder.GetPreimages()) + receiptsPreimages, err := receiptsRecorder.GetPreimages() + if err != nil { + return nil, err + } + validator.CopyPreimagesInto(txsAndReceiptsPreimages, receiptsPreimages) if state.MsgCount >= toValidateMsgExtractionCount { break } @@ -160,7 +139,8 @@ func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, lastValid delayedPreimages := daprovider.PreimagesMap{ arbutil.Keccak256PreimageType: delayedMsgRecordingDB.Preimages(), } - daprovider.CopyPreimagesInto(preimages, delayedPreimages) + validator.CopyPreimagesInto(preimages, delayedPreimages) + validator.CopyPreimagesInto(preimages, txsAndReceiptsPreimages) return &validationEntry{ Preimages: preimages, }, nil diff --git a/system_tests/message_extraction_layer_utils.go b/system_tests/message_extraction_layer_utils.go new file mode 100644 index 0000000000..725d7e72db --- /dev/null +++ b/system_tests/message_extraction_layer_utils.go @@ -0,0 +1,337 @@ +package arbtest + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/bits" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/rlp" + + "github.com/offchainlabs/nitro/arbnode/mel" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" +) + +// TODO: Code from cmd/mel-replay and cmd/replay packages for verification of preimages, should be deleted once we have validation wired +type blobPreimageReader struct { + preimages daprovider.PreimagesMap +} + +func (r *blobPreimageReader) Initialize(ctx context.Context) error { return nil } + +func (r *blobPreimageReader) GetBlobs( + ctx context.Context, + batchBlockHash common.Hash, + versionedHashes []common.Hash, +) ([]kzg4844.Blob, error) { + var blobs []kzg4844.Blob + for _, h := range versionedHashes { + var blob kzg4844.Blob + if _, ok := r.preimages[arbutil.EthVersionedHashPreimageType]; !ok { + return nil, errors.New("no blobs found in preimages") + } + preimage, ok := r.preimages[arbutil.EthVersionedHashPreimageType][h] + if !ok { + return nil, errors.New("no blobs found in preimages") + } + if len(preimage) != len(blob) { + return nil, fmt.Errorf("for blob %v got back preimage of length %v but expected blob length %v", h, len(preimage), len(blob)) + } + copy(blob[:], preimage) + blobs = append(blobs, blob) + } + return blobs, nil +} + +type testPreimageResolver struct { + preimages map[common.Hash][]byte +} + +func (r *testPreimageResolver) ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) { + if preimageType != arbutil.Keccak256PreimageType { + return nil, fmt.Errorf("unsupported preimageType: %d", preimageType) + } + if preimage, ok := r.preimages[hash]; ok { + return preimage, nil + } + return nil, fmt.Errorf("preimage not found for hash: %v", hash) +} + +type preimageResolver interface { + ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) +} + +type delayedMessageDatabase struct { + preimageResolver preimageResolver +} + +func (d *delayedMessageDatabase) ReadDelayedMessage( + ctx context.Context, + state *mel.State, + msgIndex uint64, +) (*mel.DelayedInboxMessage, error) { + originalMsgIndex := msgIndex + totalMsgsSeen := state.DelayedMessagesSeen + if msgIndex >= totalMsgsSeen { + return nil, fmt.Errorf("index %d out of range, total delayed messages seen: %d", msgIndex, totalMsgsSeen) + } + treeSize := nextPowerOfTwo(totalMsgsSeen) + merkleDepth := bits.TrailingZeros64(treeSize) + + // Start traversal from root, which is the delayed messages seen root. + merkleRoot := state.DelayedMessagesSeenRoot + currentHash := merkleRoot + currentDepth := merkleDepth + + // Traverse down the Merkle tree to find the leaf at the given index. + for currentDepth > 0 { + // Resolve the preimage to get left and right children. + result, err := d.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentHash) + if err != nil { + return nil, err + } + if len(result) != 64 { + return nil, fmt.Errorf("invalid preimage result length: %d, wanted 64", len(result)) + } + // Split result into left and right halves. + mid := len(result) / 2 + left := result[:mid] + right := result[mid:] + + // Calculate which subtree contains our index. + subtreeSize := uint64(1) << (currentDepth - 1) + if msgIndex < subtreeSize { + // Go left. + currentHash = common.BytesToHash(left) + } else { + // Go right. + currentHash = common.BytesToHash(right) + msgIndex -= subtreeSize + } + currentDepth-- + } + // At this point, currentHash should be the hash of the delayed message. + delayedMsgBytes, err := d.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentHash) + if err != nil { + return nil, err + } + delayedMessage := new(mel.DelayedInboxMessage) + if err = rlp.Decode(bytes.NewBuffer(delayedMsgBytes), &delayedMessage); err != nil { + return nil, fmt.Errorf("failed to decode delayed message at index %d: %w", originalMsgIndex, err) + } + return delayedMessage, nil +} + +func nextPowerOfTwo(n uint64) uint64 { + if n == 0 { + return 1 + } + if n&(n-1) == 0 { + return n + } + return 1 << bits.Len64(n) +} + +type txFetcherForBlock struct { + header *types.Header + preimageResolver preimageResolver +} + +// TransactionByLog fetches the tx for a specific transaction index by walking +// the tx trie of the block header. It uses the preimage resolver to fetch the preimages +// of the trie nodes as needed. +func (tf *txFetcherForBlock) TransactionByLog(ctx context.Context, log *types.Log) (*types.Transaction, error) { + tx, err := fetchObjectFromTrie[types.Transaction](tf.header.TxHash, log.TxIndex, tf.preimageResolver) + if err != nil { + return nil, err + } + return tx, err +} + +type receiptFetcherForBlock struct { + header *types.Header + preimageResolver preimageResolver +} + +// LogsForTxIndex fetches logs for a specific transaction index by walking +// the receipt trie of the block header. It uses the preimage resolver to fetch the preimages +// of the trie nodes as needed. +func (rf *receiptFetcherForBlock) LogsForTxIndex(ctx context.Context, parentChainBlockHash common.Hash, txIndex uint) ([]*types.Log, error) { + if rf.header.Hash() != parentChainBlockHash { + return nil, errors.New("parentChainBlockHash mismatch") + } + receipt, err := fetchObjectFromTrie[types.Receipt](rf.header.ReceiptHash, txIndex, rf.preimageResolver) + if err != nil { + return nil, err + } + // This is needed to enable fetching corresponding tx from the txFetcher + for _, log := range receipt.Logs { + log.TxIndex = txIndex + } + return receipt.Logs, nil +} + +// LogsForBlockHash first gets the txIndexes corresponding to the relevant logs by reading +// the key `parentChainBlockHash` from the preimages and then fetches logs for each of these txIndexes +func (rf *receiptFetcherForBlock) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { + if rf.header.Hash() != parentChainBlockHash { + return nil, errors.New("parentChainBlockHash mismatch") + } + txIndexData, err := rf.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, rf.header.Hash()) + if err != nil { + return nil, err + } + var txIndexes []uint + if err := rlp.DecodeBytes(txIndexData, &txIndexes); err != nil { + return nil, err + } + var relevantLogs []*types.Log + for _, txIndex := range txIndexes { + logs, err := rf.LogsForTxIndex(ctx, parentChainBlockHash, txIndex) + if err != nil { + return nil, err + } + relevantLogs = append(relevantLogs, logs...) + } + return relevantLogs, nil +} + +// Fetches a specific object at index from a block's Receipt/Tx trie by navigating its +// Merkle Patricia Trie structure. It uses the preimage resolver to fetch preimages +// of trie nodes as needed, and determines how to navigate depending on the structure of the trie nodes. +func fetchObjectFromTrie[T any](root common.Hash, index uint, preimageResolver preimageResolver) (*T, error) { + var empty *T + currentNodeHash := root + currentPath := []byte{} // Track nibbles consumed so far. + receiptKey, err := rlp.EncodeToBytes(index) + if err != nil { + return empty, err + } + targetNibbles := keyToNibbles(receiptKey) + for { + nodeData, err := preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentNodeHash) + if err != nil { + return empty, err + } + var node []any + if err = rlp.DecodeBytes(nodeData, &node); err != nil { + return empty, fmt.Errorf("failed to decode RLP node: %w", err) + } + switch len(node) { + case 17: + // We hit a branch node, which has 16 children and a value. + if len(currentPath) == len(targetNibbles) { + // A branch node's 17th item could be the value, so we check if it contains the receipt. + if valueBytes, ok := node[16].([]byte); ok && len(valueBytes) > 0 { + // This branch node has the actual value as the last item, so we decode the receipt + return decodeBinary[T](valueBytes) + } + return empty, fmt.Errorf("no receipt found at target key") + } + // Get the next nibble to follow. + targetNibble := targetNibbles[len(currentPath)] + childData, ok := node[targetNibble].([]byte) + if !ok || len(childData) == 0 { + return empty, fmt.Errorf("no child at nibble %d", targetNibble) + } + // Move to the child node, which is the next hash we have to navigate. + currentNodeHash = common.BytesToHash(childData) + currentPath = append(currentPath, targetNibble) + case 2: + keyPath, ok := node[0].([]byte) + if !ok { + return empty, fmt.Errorf("invalid key path in node") + } + key := extractKeyNibbles(keyPath) + expectedPath := make([]byte, 0) + expectedPath = append(expectedPath, currentPath...) + expectedPath = append(expectedPath, key...) + + // Check if it is a leaf or extension node. + leaf, err := isLeaf(keyPath) + if err != nil { + return empty, err + } + if leaf { + // Check that the keyPath matches the target nibbles, + // otherwise, the receipt does not exist in the trie. + if !bytes.Equal(expectedPath, targetNibbles) { + return empty, fmt.Errorf("leaf key does not match target nibbles") + } + rawData, ok := node[1].([]byte) + if !ok { + return empty, fmt.Errorf("invalid receipt data in leaf node") + } + return decodeBinary[T](rawData) + } + // If the node is not a leaf node, it is an extension node. + // Check if our target key matches this extension path. + if len(expectedPath) > len(targetNibbles) || !bytes.Equal(expectedPath, targetNibbles[:len(expectedPath)]) { + return empty, fmt.Errorf("extension path mismatch") + } + nextNodeBytes, ok := node[1].([]byte) + if !ok { + return empty, fmt.Errorf("invalid next node in extension") + } + // We navigate to the next node in the trie. + currentNodeHash = common.BytesToHash(nextNodeBytes) + currentPath = expectedPath + default: + return empty, fmt.Errorf("invalid node structure: unexpected length %d", len(node)) + } + } +} + +// Converts a byte slice key into a slice of nibbles (4-bit values). +// Keys are encoded in big endian format, which is required by Ethereum MPTs. +func keyToNibbles(key []byte) []byte { + nibbles := make([]byte, len(key)*2) + for i, b := range key { + nibbles[i*2] = b >> 4 + nibbles[i*2+1] = b & 0x0f + } + return nibbles +} + +// Extracts the key nibbles from a key path, handling odd/even length cases. +func extractKeyNibbles(keyPath []byte) []byte { + if len(keyPath) == 0 { + return nil + } + nibbles := keyToNibbles(keyPath) + if nibbles[0]&1 != 0 { + return nibbles[1:] + } + return nibbles[2:] +} + +func isLeaf(keyPath []byte) (bool, error) { + firstByte := keyPath[0] + firstNibble := firstByte >> 4 + // 2 or 3 indicates leaf, while 0 or 1 indicates extension nodes in the Ethereum MPT specification. + if firstNibble > 3 { + return false, errors.New("first nibble cannot be greater than 3") + } + return firstNibble >= 2, nil +} + +func decodeBinary[T any](data []byte) (*T, error) { + var empty *T + if len(data) == 0 { + return empty, errors.New("empty data cannot be decoded") + } + v := new(T) + u, ok := any(v).(interface{ UnmarshalBinary([]byte) error }) + if !ok { + return empty, errors.New("decodeBinary is called on a type that doesnt implement UnmarshalBinary") + } + if err := u.UnmarshalBinary(data); err != nil { + return empty, err + } + return v, nil +} diff --git a/system_tests/message_extraction_layer_validation_test.go b/system_tests/message_extraction_layer_validation_test.go index 42a653e987..c38b030cf8 100644 --- a/system_tests/message_extraction_layer_validation_test.go +++ b/system_tests/message_extraction_layer_validation_test.go @@ -1,21 +1,13 @@ package arbtest import ( - "bytes" "context" - "errors" - "fmt" "math/big" - "math/bits" "testing" "time" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/rlp" - "github.com/offchainlabs/nitro/arbnode/mel" "github.com/offchainlabs/nitro/arbnode/mel/extraction" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" @@ -69,21 +61,22 @@ func TestMELValidator_Recording_Preimages(t *testing.T) { Require(t, err) // Represents running of MEL validation using preimages in wasm mode. TODO: remove this once we have validation wired + preimageResolver := &testPreimageResolver{ + preimages: entry.Preimages[arbutil.Keccak256PreimageType], + } state, err := builder.L2.ConsensusNode.MessageExtractor.GetState(ctx, startBlock) Require(t, err) preimagesBasedDelayedDb := &delayedMessageDatabase{ - preimageResolver: &testPreimageResolver{ - preimages: entry.Preimages[arbutil.Keccak256PreimageType], - }, + preimageResolver: preimageResolver, } preimagesBasedDapReaders := daprovider.NewDAProviderRegistry() Require(t, preimagesBasedDapReaders.SetupBlobReader(daprovider.NewReaderForBlobReader(&blobPreimageReader{entry.Preimages}))) for state.MsgCount < uint64(extractedMsgCount) { header, err := builder.L1.Client.HeaderByNumber(ctx, new(big.Int).SetUint64(state.ParentChainBlockNumber+1)) Require(t, err) - // Awaiting recording implementations of logsFetcher and txsFetcher - txsAndLogsFetcher := &staker.DummyTxsAndLogsFetcher{L1client: builder.L1.Client} - postState, _, _, _, err := melextraction.ExtractMessages(ctx, state, header, preimagesBasedDapReaders, preimagesBasedDelayedDb, txsAndLogsFetcher, txsAndLogsFetcher, nil) + preimagesBasedTxsFetcher := &txFetcherForBlock{header, preimageResolver} + preimagesBasedReceiptsFetcher := &receiptFetcherForBlock{header, preimageResolver} + postState, _, _, _, err := melextraction.ExtractMessages(ctx, state, header, preimagesBasedDapReaders, preimagesBasedDelayedDb, preimagesBasedTxsFetcher, preimagesBasedReceiptsFetcher, nil) Require(t, err) wantState, err := builder.L2.ConsensusNode.MessageExtractor.GetState(ctx, state.ParentChainBlockNumber+1) Require(t, err) @@ -93,123 +86,3 @@ func TestMELValidator_Recording_Preimages(t *testing.T) { state = postState } } - -// TODO: Code from cmd/mel-replay and cmd/replay packages for verification of preimages, should be deleted once we have validation wired -type blobPreimageReader struct { - preimages daprovider.PreimagesMap -} - -func (r *blobPreimageReader) Initialize(ctx context.Context) error { return nil } - -func (r *blobPreimageReader) GetBlobs( - ctx context.Context, - batchBlockHash common.Hash, - versionedHashes []common.Hash, -) ([]kzg4844.Blob, error) { - var blobs []kzg4844.Blob - for _, h := range versionedHashes { - var blob kzg4844.Blob - if _, ok := r.preimages[arbutil.EthVersionedHashPreimageType]; !ok { - return nil, errors.New("no blobs found in preimages") - } - preimage, ok := r.preimages[arbutil.EthVersionedHashPreimageType][h] - if !ok { - return nil, errors.New("no blobs found in preimages") - } - if len(preimage) != len(blob) { - return nil, fmt.Errorf("for blob %v got back preimage of length %v but expected blob length %v", h, len(preimage), len(blob)) - } - copy(blob[:], preimage) - blobs = append(blobs, blob) - } - return blobs, nil -} - -type testPreimageResolver struct { - preimages map[common.Hash][]byte -} - -func (r *testPreimageResolver) ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) { - if preimageType != arbutil.Keccak256PreimageType { - return nil, fmt.Errorf("unsupported preimageType: %d", preimageType) - } - if preimage, ok := r.preimages[hash]; ok { - return preimage, nil - } - return nil, fmt.Errorf("preimage not found for hash: %v", hash) -} - -type preimageResolver interface { - ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) -} - -type delayedMessageDatabase struct { - preimageResolver preimageResolver -} - -func (d *delayedMessageDatabase) ReadDelayedMessage( - ctx context.Context, - state *mel.State, - msgIndex uint64, -) (*mel.DelayedInboxMessage, error) { - originalMsgIndex := msgIndex - totalMsgsSeen := state.DelayedMessagesSeen - if msgIndex >= totalMsgsSeen { - return nil, fmt.Errorf("index %d out of range, total delayed messages seen: %d", msgIndex, totalMsgsSeen) - } - treeSize := nextPowerOfTwo(totalMsgsSeen) - merkleDepth := bits.TrailingZeros64(treeSize) - - // Start traversal from root, which is the delayed messages seen root. - merkleRoot := state.DelayedMessagesSeenRoot - currentHash := merkleRoot - currentDepth := merkleDepth - - // Traverse down the Merkle tree to find the leaf at the given index. - for currentDepth > 0 { - // Resolve the preimage to get left and right children. - result, err := d.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentHash) - if err != nil { - return nil, err - } - if len(result) != 64 { - return nil, fmt.Errorf("invalid preimage result length: %d, wanted 64", len(result)) - } - // Split result into left and right halves. - mid := len(result) / 2 - left := result[:mid] - right := result[mid:] - - // Calculate which subtree contains our index. - subtreeSize := uint64(1) << (currentDepth - 1) - if msgIndex < subtreeSize { - // Go left. - currentHash = common.BytesToHash(left) - } else { - // Go right. - currentHash = common.BytesToHash(right) - msgIndex -= subtreeSize - } - currentDepth-- - } - // At this point, currentHash should be the hash of the delayed message. - delayedMsgBytes, err := d.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentHash) - if err != nil { - return nil, err - } - delayedMessage := new(mel.DelayedInboxMessage) - if err = rlp.Decode(bytes.NewBuffer(delayedMsgBytes), &delayedMessage); err != nil { - return nil, fmt.Errorf("failed to decode delayed message at index %d: %w", originalMsgIndex, err) - } - return delayedMessage, nil -} - -func nextPowerOfTwo(n uint64) uint64 { - if n == 0 { - return 1 - } - if n&(n-1) == 0 { - return n - } - return 1 << bits.Len64(n) -} From cec5783dd2fd727c748244fe0d24a24b4041be0d Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 21:44:47 +0530 Subject: [PATCH 19/42] reduce code diff --- arbnode/mel/extraction/batch_lookup.go | 5 +++ .../mel/extraction/delayed_message_lookup.go | 8 ++++ arbnode/mel/recording/dap_reader_source.go | 24 ++++++------ arbnode/mel/recording/receipt_recorder.go | 38 ++++++++++++------- cmd/mel-replay/receipt_fetcher.go | 2 + 5 files changed, 52 insertions(+), 25 deletions(-) diff --git a/arbnode/mel/extraction/batch_lookup.go b/arbnode/mel/extraction/batch_lookup.go index 06c6692793..4d535317c8 100644 --- a/arbnode/mel/extraction/batch_lookup.go +++ b/arbnode/mel/extraction/batch_lookup.go @@ -58,6 +58,11 @@ func ParseBatchesFromBlock( return nil, nil, fmt.Errorf("error fetching tx by hash: %v in ParseBatchesFromBlock: %w ", log.TxHash, err) } + // Record this log for MEL validation + if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), log.TxIndex); err != nil { + return nil, nil, fmt.Errorf("error recording relevant logs: %w", err) + } + batch := &mel.SequencerInboxBatch{ BlockHash: log.BlockHash, ParentChainBlockNumber: log.BlockNumber, diff --git a/arbnode/mel/extraction/delayed_message_lookup.go b/arbnode/mel/extraction/delayed_message_lookup.go index c7bc40cacf..a7da8b16bb 100644 --- a/arbnode/mel/extraction/delayed_message_lookup.go +++ b/arbnode/mel/extraction/delayed_message_lookup.go @@ -37,6 +37,10 @@ func parseDelayedMessagesFromBlock( // On Arbitrum One, this is the bridge contract which emits a MessageDelivered event. if log.Address == melState.DelayedMessagePostingTargetAddress { relevantLogs = append(relevantLogs, log) + // Record this log for MEL validation + if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), log.TxIndex); err != nil { + return nil, fmt.Errorf("error recording relevant logs: %w", err) + } } } if len(relevantLogs) > 0 { @@ -76,6 +80,10 @@ func parseDelayedMessagesFromBlock( return nil, err } messageData[common.BigToHash(msgNum)] = msg + // Record this log for MEL validation + if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), inboxMsgLog.TxIndex); err != nil { + return nil, fmt.Errorf("error recording relevant logs: %w", err) + } } for i, parsedLog := range messageDeliveredEvents { msgKey := common.BigToHash(parsedLog.MessageIndex) diff --git a/arbnode/mel/recording/dap_reader_source.go b/arbnode/mel/recording/dap_reader_source.go index bc6ad1db29..24c8f05422 100644 --- a/arbnode/mel/recording/dap_reader_source.go +++ b/arbnode/mel/recording/dap_reader_source.go @@ -11,16 +11,16 @@ import ( "github.com/offchainlabs/nitro/validator" ) -// RecordingDAPReader implements recording of preimages when melextraction.ExtractMessages function is called by MEL validator for creation +// DAPReader implements recording of preimages when melextraction.ExtractMessages function is called by MEL validator for creation // of validation entry. Since ExtractMessages function would use daprovider.Reader interface to fetch the sequencer batch via RecoverPayload // we implement collecting of preimages as well in the same method and record it -type RecordingDAPReader struct { +type DAPReader struct { validatorCtx context.Context reader daprovider.Reader preimages daprovider.PreimagesMap } -func (r *RecordingDAPReader) RecoverPayload(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadResult] { +func (r *DAPReader) RecoverPayload(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadResult] { promise := r.reader.RecoverPayloadAndPreimages(batchNum, batchBlockHash, sequencerMsg) result, err := promise.Await(r.validatorCtx) if err != nil { @@ -30,38 +30,38 @@ func (r *RecordingDAPReader) RecoverPayload(batchNum uint64, batchBlockHash comm return containers.NewReadyPromise(daprovider.PayloadResult{Payload: result.Payload}, nil) } -func (r *RecordingDAPReader) CollectPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PreimagesResult] { +func (r *DAPReader) CollectPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PreimagesResult] { return r.reader.CollectPreimages(batchNum, batchBlockHash, sequencerMsg) } -func (r *RecordingDAPReader) RecoverPayloadAndPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadAndPreimagesResult] { +func (r *DAPReader) RecoverPayloadAndPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadAndPreimagesResult] { return r.reader.RecoverPayloadAndPreimages(batchNum, batchBlockHash, sequencerMsg) } -// RecordingDAPReaderSource is used for recording preimages related to sequencer batches stored by da providers, given a +// DAPReaderSource is used for recording preimages related to sequencer batches stored by da providers, given a // DapReaderSource it implements GetReader method to return a daprovider.Reader interface that records preimgaes. It takes // in a context variable (corresponding to creation of validation entry) from the MEL validator -type RecordingDAPReaderSource struct { +type DAPReaderSource struct { validatorCtx context.Context dapReaders arbstate.DapReaderSource preimages daprovider.PreimagesMap } -func NewRecordingDAPReaderSource(validatorCtx context.Context, dapReaders arbstate.DapReaderSource) *RecordingDAPReaderSource { - return &RecordingDAPReaderSource{ +func NewDAPReaderSource(validatorCtx context.Context, dapReaders arbstate.DapReaderSource) *DAPReaderSource { + return &DAPReaderSource{ validatorCtx: validatorCtx, dapReaders: dapReaders, preimages: make(daprovider.PreimagesMap), } } -func (s *RecordingDAPReaderSource) GetReader(headerByte byte) daprovider.Reader { +func (s *DAPReaderSource) GetReader(headerByte byte) daprovider.Reader { reader := s.dapReaders.GetReader(headerByte) - return &RecordingDAPReader{ + return &DAPReader{ validatorCtx: s.validatorCtx, reader: reader, preimages: s.preimages, } } -func (s *RecordingDAPReaderSource) Preimages() daprovider.PreimagesMap { return s.preimages } +func (s *DAPReaderSource) Preimages() daprovider.PreimagesMap { return s.preimages } diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index 38207ebff6..4ed1a73162 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -20,14 +20,15 @@ import ( ) type ReceiptRecorder struct { - parentChainReader BlockReader - parentChainBlockHash common.Hash - preimages daprovider.PreimagesMap - receipts []*types.Receipt - logs []*types.Log - relevantLogsTxIndexes []uint - trieDB *triedb.Database - blockReceiptHash common.Hash + parentChainReader BlockReader + parentChainBlockHash common.Hash + parentChainBlockNumber uint64 + preimages daprovider.PreimagesMap + receipts []*types.Receipt + logs []*types.Log + relevantLogsTxIndexes map[uint]struct{} + trieDB *triedb.Database + blockReceiptHash common.Hash } func NewReceiptRecorder( @@ -35,9 +36,10 @@ func NewReceiptRecorder( parentChainBlockHash common.Hash, ) *ReceiptRecorder { return &ReceiptRecorder{ - parentChainReader: parentChainReader, - parentChainBlockHash: parentChainBlockHash, - preimages: make(daprovider.PreimagesMap), + parentChainReader: parentChainReader, + parentChainBlockHash: parentChainBlockHash, + preimages: make(daprovider.PreimagesMap), + relevantLogsTxIndexes: make(map[uint]struct{}), } } @@ -88,6 +90,7 @@ func (rr *ReceiptRecorder) Initialize(ctx context.Context) error { rr.receipts = receipts rr.trieDB = tdb rr.blockReceiptHash = root + rr.parentChainBlockNumber = block.NumberU64() return nil } @@ -98,6 +101,9 @@ func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH if rr.parentChainBlockHash != parentChainBlockHash { return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", rr.parentChainBlockHash, parentChainBlockHash) } + if _, recorded := rr.relevantLogsTxIndexes[txIndex]; recorded { + return rr.receipts[txIndex].Logs, nil + } // #nosec G115 if int(txIndex) >= len(rr.receipts) { return nil, fmt.Errorf("index out of range: %d", txIndex) @@ -134,8 +140,10 @@ func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH // We use this same trick in validation as well in order to link a tx with its logs for _, log := range receipt.Logs { log.TxIndex = txIndex + log.BlockHash = parentChainBlockHash + log.BlockNumber = rr.parentChainBlockNumber } - rr.relevantLogsTxIndexes = append(rr.relevantLogsTxIndexes, txIndex) + rr.relevantLogsTxIndexes[txIndex] = struct{}{} return receipt.Logs, nil } @@ -153,8 +161,12 @@ func (rr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBloc // to the preimages map as a value to the key represented by parentChainBlockHash. // TODO: If we use parentChainBlockHash as the key for header- then we need to modify this implementation func (rr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { + var relevantLogsTxIndexes []uint + for k := range rr.relevantLogsTxIndexes { + relevantLogsTxIndexes = append(relevantLogsTxIndexes, k) + } var buf bytes.Buffer - if err := rlp.Encode(&buf, rr.relevantLogsTxIndexes); err != nil { + if err := rlp.Encode(&buf, relevantLogsTxIndexes); err != nil { return nil, err } if _, ok := rr.preimages[arbutil.Keccak256PreimageType]; !ok { diff --git a/cmd/mel-replay/receipt_fetcher.go b/cmd/mel-replay/receipt_fetcher.go index d52727df9b..cb6dd20943 100644 --- a/cmd/mel-replay/receipt_fetcher.go +++ b/cmd/mel-replay/receipt_fetcher.go @@ -37,6 +37,8 @@ func (rf *receiptFetcherForBlock) LogsForTxIndex(ctx context.Context, parentChai // This is needed to enable fetching corresponding tx from the txFetcher for _, log := range receipt.Logs { log.TxIndex = txIndex + log.BlockHash = rf.header.Hash() + log.BlockNumber = rf.header.Number.Uint64() } return receipt.Logs, nil } From 5aa9a517403de208a9273e498cbec760a76bc9f4 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Wed, 10 Dec 2025 20:21:25 -0600 Subject: [PATCH 20/42] begin tx recorder for mel --- arbnode/mel/recording/tx_recorder.go | 149 +++++++++++++++++++++++++++ 1 file changed, 149 insertions(+) create mode 100644 arbnode/mel/recording/tx_recorder.go diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go new file mode 100644 index 0000000000..07964c9d19 --- /dev/null +++ b/arbnode/mel/recording/tx_recorder.go @@ -0,0 +1,149 @@ +package recording + +import ( + "context" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/triedb" + melrunner "github.com/offchainlabs/nitro/arbnode/mel/runner" +) + +type PreimageRecorder struct { + preimages map[common.Hash][]byte +} + +func NewPreimageRecorder() *PreimageRecorder { + return &PreimageRecorder{ + preimages: make(map[common.Hash][]byte), + } +} + +func (pr *PreimageRecorder) GetPreimages() map[common.Hash][]byte { + return pr.preimages +} + +type RecordingDB struct { + underlying triedb.Database + recorder *PreimageRecorder +} + +func (rdb *RecordingDB) Get(key []byte) ([]byte, error) { + hash := common.BytesToHash(key) + value, err := rdb.underlying.Node(hash) + if err != nil { + return nil, err + } + if rdb.recorder != nil { + rdb.recorder.preimages[hash] = value + } + + return value, nil +} + +func (rdb *RecordingDB) Has(key []byte) (bool, error) { + hash := common.BytesToHash(key) + _, err := rdb.underlying.Reader(hash).Node(common.Hash{}, key) + return err == nil, nil +} + +func (rdb *RecordingDB) Put(key []byte, value []byte) error { + return fmt.Errorf("Put not supported on recording DB") +} + +func (rdb *RecordingDB) Delete(key []byte) error { + return fmt.Errorf("Delete not supported on recording DB") +} + +type TransactionRecorder struct { + parentChainReader melrunner.ParentChainReader + parentChainBlockHash common.Hash + preimages map[common.Hash][]byte + txs []*types.Transaction +} + +func NewTransactionRecorder( + parentChainReader melrunner.ParentChainReader, + parentChainBlockHash common.Hash, + preimages map[common.Hash][]byte, +) *TransactionRecorder { + return &TransactionRecorder{ + parentChainReader: parentChainReader, + parentChainBlockHash: parentChainBlockHash, + preimages: preimages, + } +} + +func (tr *TransactionRecorder) Initialize(ctx context.Context) error { + block, err := tr.parentChainReader.BlockByHash(ctx, tr.parentChainBlockHash) + if err != nil { + return err + } + tdb := triedb.NewDatabase(nil, &triedb.Config{ + Preimages: true, + }) + txsTrie := trie.NewEmpty(tdb) + txs := block.Body().Transactions + for i, tx := range txs { + indexBytes, err := rlp.EncodeToBytes(uint64(i)) + if err != nil { + return fmt.Errorf("failed to encode index %d: %w", i, err) + } + txBytes, err := tx.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to marshal transaction %d: %w", i, err) + } + if err := txsTrie.Update(indexBytes, txBytes); err != nil { + return fmt.Errorf("failed to update trie at index %d: %w", i, err) + } + } + root, nodes := txsTrie.Commit(false) + if root != block.TxHash() { + return fmt.Errorf("computed root %s doesn't match header root %s", + root.Hex(), block.TxHash().Hex()) + } + if nodes != nil { + if err := tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { + return fmt.Errorf("failed to commit trie nodes: %w", err) + } + } + if err := tdb.Commit(root, false); err != nil { + return fmt.Errorf("failed to commit database: %w", err) + } + tr.txs = txs + return nil +} + +func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types.Log) (*types.Transaction, error) { + if log == nil { + return nil, errors.New("transactionByLog got nil log value") + } + if int(log.TxIndex) >= len(tr.txs) { + return nil, fmt.Errorf("index out of range: %d", log.TxIndex) + } + recorder := NewPreimageRecorder() + recordingDB := &RecordingDB{ + underlying: tl.tdb, + recorder: recorder, + } + recordingTDB := triedb.NewDatabase(recordingDB, nil) + txsTrie, err := trie.New(trie.TrieID(log.TxHash), recordingTDB) + if err != nil { + return nil, fmt.Errorf("failed to create trie: %w", err) + } + indexBytes, err := rlp.EncodeToBytes(log.TxIndex) + if err != nil { + return nil, fmt.Errorf("failed to encode index: %w", err) + } + if _, err = tr.Get(indexBytes); err != nil { + return nil, fmt.Errorf("failed to get transaction from trie: %w", err) + } + // TODO: Return the tx itself instead of nil, but also add the + // tx marshaled binary by hash to the preimages map. + return nil, nil +} From 4065abcc3c4d6c3e5187cd17d3161c2d7f495e9b Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Fri, 12 Dec 2025 14:18:06 -0600 Subject: [PATCH 21/42] fix recorder --- arbnode/mel/recording/tx_recorder.go | 179 ++++++++++++++++------ arbnode/mel/recording/tx_recorder_test.go | 7 + 2 files changed, 136 insertions(+), 50 deletions(-) create mode 100644 arbnode/mel/recording/tx_recorder_test.go diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index 07964c9d19..0881c0f56a 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -7,6 +7,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" @@ -14,57 +16,12 @@ import ( melrunner "github.com/offchainlabs/nitro/arbnode/mel/runner" ) -type PreimageRecorder struct { - preimages map[common.Hash][]byte -} - -func NewPreimageRecorder() *PreimageRecorder { - return &PreimageRecorder{ - preimages: make(map[common.Hash][]byte), - } -} - -func (pr *PreimageRecorder) GetPreimages() map[common.Hash][]byte { - return pr.preimages -} - -type RecordingDB struct { - underlying triedb.Database - recorder *PreimageRecorder -} - -func (rdb *RecordingDB) Get(key []byte) ([]byte, error) { - hash := common.BytesToHash(key) - value, err := rdb.underlying.Node(hash) - if err != nil { - return nil, err - } - if rdb.recorder != nil { - rdb.recorder.preimages[hash] = value - } - - return value, nil -} - -func (rdb *RecordingDB) Has(key []byte) (bool, error) { - hash := common.BytesToHash(key) - _, err := rdb.underlying.Reader(hash).Node(common.Hash{}, key) - return err == nil, nil -} - -func (rdb *RecordingDB) Put(key []byte, value []byte) error { - return fmt.Errorf("Put not supported on recording DB") -} - -func (rdb *RecordingDB) Delete(key []byte) error { - return fmt.Errorf("Delete not supported on recording DB") -} - type TransactionRecorder struct { parentChainReader melrunner.ParentChainReader parentChainBlockHash common.Hash preimages map[common.Hash][]byte txs []*types.Transaction + trieDB *triedb.Database } func NewTransactionRecorder( @@ -116,10 +73,14 @@ func (tr *TransactionRecorder) Initialize(ctx context.Context) error { return fmt.Errorf("failed to commit database: %w", err) } tr.txs = txs + tr.trieDB = tdb return nil } func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types.Log) (*types.Transaction, error) { + if tr.trieDB == nil { + return nil, errors.New("TransactionRecorder not initialized") + } if log == nil { return nil, errors.New("transactionByLog got nil log value") } @@ -128,7 +89,7 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. } recorder := NewPreimageRecorder() recordingDB := &RecordingDB{ - underlying: tl.tdb, + underlying: tr.trieDB, recorder: recorder, } recordingTDB := triedb.NewDatabase(recordingDB, nil) @@ -140,10 +101,128 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. if err != nil { return nil, fmt.Errorf("failed to encode index: %w", err) } - if _, err = tr.Get(indexBytes); err != nil { + txBytes, err := txsTrie.Get(indexBytes) + if err != nil { return nil, fmt.Errorf("failed to get transaction from trie: %w", err) } - // TODO: Return the tx itself instead of nil, but also add the + // Return the tx itself instead of nil, but also add the // tx marshaled binary by hash to the preimages map. - return nil, nil + tr.preimages[crypto.Keccak256Hash(txBytes)] = txBytes + tx := new(types.Transaction) + if err = tx.UnmarshalBinary(txBytes); err != nil { + return nil, fmt.Errorf("failed to unmarshal transaction: %w", err) + } + return tx, nil +} + +type PreimageRecorder struct { + preimages map[common.Hash][]byte +} + +func NewPreimageRecorder() *PreimageRecorder { + return &PreimageRecorder{ + preimages: make(map[common.Hash][]byte), + } +} + +func (pr *PreimageRecorder) GetPreimages() map[common.Hash][]byte { + return pr.preimages +} + +type RecordingDB struct { + underlying *triedb.Database + recorder *PreimageRecorder +} + +func (rdb *RecordingDB) Get(key []byte) ([]byte, error) { + hash := common.BytesToHash(key) + value, err := rdb.underlying.Node(hash) + if err != nil { + return nil, err + } + if rdb.recorder != nil { + rdb.recorder.preimages[hash] = value + } + + return value, nil +} +func (rdb *RecordingDB) Has(key []byte) (bool, error) { + hash := common.BytesToHash(key) + _, err := rdb.underlying.Node(hash) + return err == nil, nil +} +func (rdb *RecordingDB) Put(key []byte, value []byte) error { + return fmt.Errorf("Put not supported on recording DB") +} +func (rdb *RecordingDB) Delete(key []byte) error { + return fmt.Errorf("Delete not supported on recording DB") +} +func (rdb *RecordingDB) DeleteRange(start, end []byte) error { + return fmt.Errorf("DeleteRange not supported on recording DB") +} +func (rdb *RecordingDB) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { + return fmt.Errorf("ReadAncients not supported on recording DB") +} +func (rdb *RecordingDB) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { + return 0, fmt.Errorf("ReadAncients not supported on recording DB") +} +func (rdb *RecordingDB) SyncAncient() error { + return fmt.Errorf("SyncAncient not supported on recording DB") +} +func (rdb *RecordingDB) TruncateHead(n uint64) (uint64, error) { + return 0, fmt.Errorf("TruncateHead not supported on recording DB") +} +func (rdb *RecordingDB) TruncateTail(n uint64) (uint64, error) { + return 0, fmt.Errorf("TruncateTail not supported on recording DB") +} +func (rdb *RecordingDB) Append(kind string, number uint64, item interface{}) error { + return fmt.Errorf("Append not supported on recording DB") +} +func (rdb *RecordingDB) AppendRaw(kind string, number uint64, item []byte) error { + return fmt.Errorf("AppendRaw not supported on recording DB") +} +func (rdb *RecordingDB) AncientDatadir() (string, error) { + return "", fmt.Errorf("AncientDatadir not supported on recording DB") +} +func (rdb *RecordingDB) Ancient(kind string, number uint64) ([]byte, error) { + return nil, fmt.Errorf("Ancient not supported on recording DB") +} +func (rdb *RecordingDB) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { + return nil, fmt.Errorf("AncientRange not supported on recording DB") +} +func (rdb *RecordingDB) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { + return nil, fmt.Errorf("AncientBytes not supported on recording DB") +} +func (rdb *RecordingDB) Ancients() (uint64, error) { + return 0, fmt.Errorf("Ancients not supported on recording DB") +} +func (rdb *RecordingDB) Tail() (uint64, error) { + return 0, fmt.Errorf("Tail not supported on recording DB") +} +func (rdb *RecordingDB) AncientSize(kind string) (uint64, error) { + return 0, fmt.Errorf("AncientSize not supported on recording DB") +} +func (rdb *RecordingDB) Compact(start []byte, limit []byte) error { + return nil +} +func (rdb *RecordingDB) SyncKeyValue() error { + return nil +} +func (rdb *RecordingDB) Stat() (string, error) { + return "", nil +} +func (rdb *RecordingDB) WasmDataBase() ethdb.KeyValueStore { + return nil +} +func (rdb *RecordingDB) NewBatch() ethdb.Batch { + return nil +} +func (rdb *RecordingDB) NewBatchWithSize(size int) ethdb.Batch { + return nil +} +func (rdb *RecordingDB) NewIterator(prefix []byte, start []byte) ethdb.Iterator { + return nil +} +func (rdb *RecordingDB) Close() error { + return nil } diff --git a/arbnode/mel/recording/tx_recorder_test.go b/arbnode/mel/recording/tx_recorder_test.go new file mode 100644 index 0000000000..4430b729c0 --- /dev/null +++ b/arbnode/mel/recording/tx_recorder_test.go @@ -0,0 +1,7 @@ +package recording + +import "testing" + +func TestTransactionByLog(t *testing.T) { + +} From 3cbd6a48a590d7ce82bbf63cbd24d716f8863777 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Fri, 12 Dec 2025 14:25:38 -0600 Subject: [PATCH 22/42] add unit test for tx recorder --- arbnode/mel/recording/tx_recorder.go | 9 ++-- arbnode/mel/recording/tx_recorder_test.go | 64 ++++++++++++++++++++++- 2 files changed, 69 insertions(+), 4 deletions(-) diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index 0881c0f56a..2ed62bb91e 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -13,11 +13,14 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/triedb" - melrunner "github.com/offchainlabs/nitro/arbnode/mel/runner" ) +type BlockReader interface { + BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) +} + type TransactionRecorder struct { - parentChainReader melrunner.ParentChainReader + parentChainReader BlockReader parentChainBlockHash common.Hash preimages map[common.Hash][]byte txs []*types.Transaction @@ -25,7 +28,7 @@ type TransactionRecorder struct { } func NewTransactionRecorder( - parentChainReader melrunner.ParentChainReader, + parentChainReader BlockReader, parentChainBlockHash common.Hash, preimages map[common.Hash][]byte, ) *TransactionRecorder { diff --git a/arbnode/mel/recording/tx_recorder_test.go b/arbnode/mel/recording/tx_recorder_test.go index 4430b729c0..0d3182815d 100644 --- a/arbnode/mel/recording/tx_recorder_test.go +++ b/arbnode/mel/recording/tx_recorder_test.go @@ -1,7 +1,69 @@ package recording -import "testing" +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie" + "github.com/stretchr/testify/require" +) + +type mockBlockReader struct { + blocks map[common.Hash]*types.Block +} + +func (mbr *mockBlockReader) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + block, exists := mbr.blocks[hash] + if !exists { + return nil, nil + } + return block, nil +} func TestTransactionByLog(t *testing.T) { + ctx := context.Background() + toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") + blockHeader := &types.Header{} + txs := make([]*types.Transaction, 0) + for i := 1; i < 10; i++ { + txData := &types.DynamicFeeTx{ + To: &toAddr, + Nonce: 1, + GasFeeCap: big.NewInt(1), + GasTipCap: big.NewInt(1), + Gas: 1, + Value: big.NewInt(0), + Data: nil, + } + tx := types.NewTx(txData) + txs = append(txs, tx) + } + blockBody := &types.Body{ + Transactions: txs, + } + receipts := []*types.Receipt{} + block := types.NewBlock( + blockHeader, + blockBody, + receipts, + trie.NewStackTrie(nil), + ) + blockReader := &mockBlockReader{ + blocks: map[common.Hash]*types.Block{ + block.Hash(): block, + }, + } + preimages := make(map[common.Hash][]byte) + recorder := NewTransactionRecorder(blockReader, block.Hash(), preimages) + require.NoError(t, recorder.Initialize(ctx)) + log := &types.Log{ + TxIndex: 5, + } + tx, err := recorder.TransactionByLog(ctx, log) + require.NoError(t, err) + require.Equal(t, txs[5], tx) } From 337bf3c90ad499dae3621f34df892a315b8283f7 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 23 Dec 2025 18:58:41 +0530 Subject: [PATCH 23/42] fix tx recorder --- arbnode/mel/recording/tx_and_logs_database.go | 109 ++++++++++++++ arbnode/mel/recording/tx_recorder.go | 142 +++--------------- arbnode/mel/recording/tx_recorder_test.go | 15 +- 3 files changed, 138 insertions(+), 128 deletions(-) create mode 100644 arbnode/mel/recording/tx_and_logs_database.go diff --git a/arbnode/mel/recording/tx_and_logs_database.go b/arbnode/mel/recording/tx_and_logs_database.go new file mode 100644 index 0000000000..4d683bcfbf --- /dev/null +++ b/arbnode/mel/recording/tx_and_logs_database.go @@ -0,0 +1,109 @@ +package melrecording + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/triedb" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" +) + +type TxAndLogsDatabase struct { + underlying *triedb.Database + recorder daprovider.PreimageRecorder +} + +func (rdb *TxAndLogsDatabase) Get(key []byte) ([]byte, error) { + hash := common.BytesToHash(key) + value, err := rdb.underlying.Node(hash) + if err != nil { + return nil, err + } + if rdb.recorder != nil { + rdb.recorder(hash, value, arbutil.Keccak256PreimageType) + } + + return value, nil +} +func (rdb *TxAndLogsDatabase) Has(key []byte) (bool, error) { + hash := common.BytesToHash(key) + _, err := rdb.underlying.Node(hash) + return err == nil, nil +} +func (rdb *TxAndLogsDatabase) Put(key []byte, value []byte) error { + return fmt.Errorf("Put not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) Delete(key []byte) error { + return fmt.Errorf("Delete not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) DeleteRange(start, end []byte) error { + return fmt.Errorf("DeleteRange not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { + return fmt.Errorf("ReadAncients not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { + return 0, fmt.Errorf("ReadAncients not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) SyncAncient() error { + return fmt.Errorf("SyncAncient not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) TruncateHead(n uint64) (uint64, error) { + return 0, fmt.Errorf("TruncateHead not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) TruncateTail(n uint64) (uint64, error) { + return 0, fmt.Errorf("TruncateTail not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) Append(kind string, number uint64, item interface{}) error { + return fmt.Errorf("Append not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) AppendRaw(kind string, number uint64, item []byte) error { + return fmt.Errorf("AppendRaw not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) AncientDatadir() (string, error) { + return "", fmt.Errorf("AncientDatadir not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) Ancient(kind string, number uint64) ([]byte, error) { + return nil, fmt.Errorf("Ancient not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { + return nil, fmt.Errorf("AncientRange not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { + return nil, fmt.Errorf("AncientBytes not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) Ancients() (uint64, error) { + return 0, fmt.Errorf("Ancients not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) Tail() (uint64, error) { + return 0, fmt.Errorf("Tail not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) AncientSize(kind string) (uint64, error) { + return 0, fmt.Errorf("AncientSize not supported on recording DB") +} +func (rdb *TxAndLogsDatabase) Compact(start []byte, limit []byte) error { + return nil +} +func (rdb *TxAndLogsDatabase) SyncKeyValue() error { + return nil +} +func (rdb *TxAndLogsDatabase) Stat() (string, error) { + return "", nil +} +func (rdb *TxAndLogsDatabase) WasmDataBase() ethdb.KeyValueStore { + return nil +} +func (rdb *TxAndLogsDatabase) NewBatch() ethdb.Batch { + return nil +} +func (rdb *TxAndLogsDatabase) NewBatchWithSize(size int) ethdb.Batch { + return nil +} +func (rdb *TxAndLogsDatabase) NewIterator(prefix []byte, start []byte) ethdb.Iterator { + return nil +} +func (rdb *TxAndLogsDatabase) Close() error { + return nil +} diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index 2ed62bb91e..d02fa78867 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -1,4 +1,4 @@ -package recording +package melrecording import ( "context" @@ -6,13 +6,15 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/triedb" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" ) type BlockReader interface { @@ -22,15 +24,16 @@ type BlockReader interface { type TransactionRecorder struct { parentChainReader BlockReader parentChainBlockHash common.Hash - preimages map[common.Hash][]byte + preimages daprovider.PreimagesMap txs []*types.Transaction trieDB *triedb.Database + blockTxHash common.Hash } func NewTransactionRecorder( parentChainReader BlockReader, parentChainBlockHash common.Hash, - preimages map[common.Hash][]byte, + preimages daprovider.PreimagesMap, ) *TransactionRecorder { return &TransactionRecorder{ parentChainReader: parentChainReader, @@ -44,7 +47,7 @@ func (tr *TransactionRecorder) Initialize(ctx context.Context) error { if err != nil { return err } - tdb := triedb.NewDatabase(nil, &triedb.Config{ + tdb := triedb.NewDatabase(rawdb.NewMemoryDatabase(), &triedb.Config{ Preimages: true, }) txsTrie := trie.NewEmpty(tdb) @@ -77,6 +80,7 @@ func (tr *TransactionRecorder) Initialize(ctx context.Context) error { } tr.txs = txs tr.trieDB = tdb + tr.blockTxHash = root return nil } @@ -90,13 +94,12 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. if int(log.TxIndex) >= len(tr.txs) { return nil, fmt.Errorf("index out of range: %d", log.TxIndex) } - recorder := NewPreimageRecorder() - recordingDB := &RecordingDB{ + recordingDB := &TxAndLogsDatabase{ underlying: tr.trieDB, - recorder: recorder, + recorder: daprovider.RecordPreimagesTo(tr.preimages), // RecordingDB will record relevant preimages into tr.preimages } recordingTDB := triedb.NewDatabase(recordingDB, nil) - txsTrie, err := trie.New(trie.TrieID(log.TxHash), recordingTDB) + txsTrie, err := trie.New(trie.TrieID(tr.blockTxHash), recordingTDB) if err != nil { return nil, fmt.Errorf("failed to create trie: %w", err) } @@ -108,124 +111,17 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. if err != nil { return nil, fmt.Errorf("failed to get transaction from trie: %w", err) } - // Return the tx itself instead of nil, but also add the - // tx marshaled binary by hash to the preimages map. - tr.preimages[crypto.Keccak256Hash(txBytes)] = txBytes + // Return the tx itself instead of nil tx := new(types.Transaction) if err = tx.UnmarshalBinary(txBytes); err != nil { return nil, fmt.Errorf("failed to unmarshal transaction: %w", err) } - return tx, nil -} - -type PreimageRecorder struct { - preimages map[common.Hash][]byte -} - -func NewPreimageRecorder() *PreimageRecorder { - return &PreimageRecorder{ - preimages: make(map[common.Hash][]byte), + // Add the tx marshaled binary by hash to the preimages map + if _, ok := tr.preimages[arbutil.Keccak256PreimageType]; !ok { + tr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) } + tr.preimages[arbutil.Keccak256PreimageType][crypto.Keccak256Hash(txBytes)] = txBytes + return tx, nil } -func (pr *PreimageRecorder) GetPreimages() map[common.Hash][]byte { - return pr.preimages -} - -type RecordingDB struct { - underlying *triedb.Database - recorder *PreimageRecorder -} - -func (rdb *RecordingDB) Get(key []byte) ([]byte, error) { - hash := common.BytesToHash(key) - value, err := rdb.underlying.Node(hash) - if err != nil { - return nil, err - } - if rdb.recorder != nil { - rdb.recorder.preimages[hash] = value - } - - return value, nil -} -func (rdb *RecordingDB) Has(key []byte) (bool, error) { - hash := common.BytesToHash(key) - _, err := rdb.underlying.Node(hash) - return err == nil, nil -} -func (rdb *RecordingDB) Put(key []byte, value []byte) error { - return fmt.Errorf("Put not supported on recording DB") -} -func (rdb *RecordingDB) Delete(key []byte) error { - return fmt.Errorf("Delete not supported on recording DB") -} -func (rdb *RecordingDB) DeleteRange(start, end []byte) error { - return fmt.Errorf("DeleteRange not supported on recording DB") -} -func (rdb *RecordingDB) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { - return fmt.Errorf("ReadAncients not supported on recording DB") -} -func (rdb *RecordingDB) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { - return 0, fmt.Errorf("ReadAncients not supported on recording DB") -} -func (rdb *RecordingDB) SyncAncient() error { - return fmt.Errorf("SyncAncient not supported on recording DB") -} -func (rdb *RecordingDB) TruncateHead(n uint64) (uint64, error) { - return 0, fmt.Errorf("TruncateHead not supported on recording DB") -} -func (rdb *RecordingDB) TruncateTail(n uint64) (uint64, error) { - return 0, fmt.Errorf("TruncateTail not supported on recording DB") -} -func (rdb *RecordingDB) Append(kind string, number uint64, item interface{}) error { - return fmt.Errorf("Append not supported on recording DB") -} -func (rdb *RecordingDB) AppendRaw(kind string, number uint64, item []byte) error { - return fmt.Errorf("AppendRaw not supported on recording DB") -} -func (rdb *RecordingDB) AncientDatadir() (string, error) { - return "", fmt.Errorf("AncientDatadir not supported on recording DB") -} -func (rdb *RecordingDB) Ancient(kind string, number uint64) ([]byte, error) { - return nil, fmt.Errorf("Ancient not supported on recording DB") -} -func (rdb *RecordingDB) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { - return nil, fmt.Errorf("AncientRange not supported on recording DB") -} -func (rdb *RecordingDB) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { - return nil, fmt.Errorf("AncientBytes not supported on recording DB") -} -func (rdb *RecordingDB) Ancients() (uint64, error) { - return 0, fmt.Errorf("Ancients not supported on recording DB") -} -func (rdb *RecordingDB) Tail() (uint64, error) { - return 0, fmt.Errorf("Tail not supported on recording DB") -} -func (rdb *RecordingDB) AncientSize(kind string) (uint64, error) { - return 0, fmt.Errorf("AncientSize not supported on recording DB") -} -func (rdb *RecordingDB) Compact(start []byte, limit []byte) error { - return nil -} -func (rdb *RecordingDB) SyncKeyValue() error { - return nil -} -func (rdb *RecordingDB) Stat() (string, error) { - return "", nil -} -func (rdb *RecordingDB) WasmDataBase() ethdb.KeyValueStore { - return nil -} -func (rdb *RecordingDB) NewBatch() ethdb.Batch { - return nil -} -func (rdb *RecordingDB) NewBatchWithSize(size int) ethdb.Batch { - return nil -} -func (rdb *RecordingDB) NewIterator(prefix []byte, start []byte) ethdb.Iterator { - return nil -} -func (rdb *RecordingDB) Close() error { - return nil -} +func (tr *TransactionRecorder) GetPreimages() daprovider.PreimagesMap { return tr.preimages } diff --git a/arbnode/mel/recording/tx_recorder_test.go b/arbnode/mel/recording/tx_recorder_test.go index 0d3182815d..9acc84e506 100644 --- a/arbnode/mel/recording/tx_recorder_test.go +++ b/arbnode/mel/recording/tx_recorder_test.go @@ -1,4 +1,4 @@ -package recording +package melrecording import ( "context" @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/trie" + "github.com/offchainlabs/nitro/daprovider" "github.com/stretchr/testify/require" ) @@ -28,10 +29,10 @@ func TestTransactionByLog(t *testing.T) { toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") blockHeader := &types.Header{} txs := make([]*types.Transaction, 0) - for i := 1; i < 10; i++ { + for i := uint64(1); i < 10; i++ { txData := &types.DynamicFeeTx{ To: &toAddr, - Nonce: 1, + Nonce: i, GasFeeCap: big.NewInt(1), GasTipCap: big.NewInt(1), Gas: 1, @@ -56,7 +57,7 @@ func TestTransactionByLog(t *testing.T) { block.Hash(): block, }, } - preimages := make(map[common.Hash][]byte) + preimages := make(daprovider.PreimagesMap) recorder := NewTransactionRecorder(blockReader, block.Hash(), preimages) require.NoError(t, recorder.Initialize(ctx)) @@ -65,5 +66,9 @@ func TestTransactionByLog(t *testing.T) { } tx, err := recorder.TransactionByLog(ctx, log) require.NoError(t, err) - require.Equal(t, txs[5], tx) + have, err := tx.MarshalJSON() + require.NoError(t, err) + want, err := block.Transactions()[5].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) } From 6f38c637cc8cda883b5391a2c37d18d64c2c23a9 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 23 Dec 2025 22:46:21 +0530 Subject: [PATCH 24/42] add changelog and fix lint --- arbnode/mel/recording/tx_and_logs_database.go | 1 + arbnode/mel/recording/tx_recorder.go | 3 +++ arbnode/mel/recording/tx_recorder_test.go | 4 +++- changelog/rauljordan-nit-4254.md | 2 ++ 4 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelog/rauljordan-nit-4254.md diff --git a/arbnode/mel/recording/tx_and_logs_database.go b/arbnode/mel/recording/tx_and_logs_database.go index 4d683bcfbf..e6752d928c 100644 --- a/arbnode/mel/recording/tx_and_logs_database.go +++ b/arbnode/mel/recording/tx_and_logs_database.go @@ -6,6 +6,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/triedb" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" ) diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index d02fa78867..ebe53c3b3b 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/triedb" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" ) @@ -53,6 +54,7 @@ func (tr *TransactionRecorder) Initialize(ctx context.Context) error { txsTrie := trie.NewEmpty(tdb) txs := block.Body().Transactions for i, tx := range txs { + // #nosec G115 indexBytes, err := rlp.EncodeToBytes(uint64(i)) if err != nil { return fmt.Errorf("failed to encode index %d: %w", i, err) @@ -91,6 +93,7 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. if log == nil { return nil, errors.New("transactionByLog got nil log value") } + // #nosec G115 if int(log.TxIndex) >= len(tr.txs) { return nil, fmt.Errorf("index out of range: %d", log.TxIndex) } diff --git a/arbnode/mel/recording/tx_recorder_test.go b/arbnode/mel/recording/tx_recorder_test.go index 9acc84e506..88824bb859 100644 --- a/arbnode/mel/recording/tx_recorder_test.go +++ b/arbnode/mel/recording/tx_recorder_test.go @@ -5,11 +5,13 @@ import ( "math/big" "testing" + "github.com/stretchr/testify/require" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/trie" + "github.com/offchainlabs/nitro/daprovider" - "github.com/stretchr/testify/require" ) type mockBlockReader struct { diff --git a/changelog/rauljordan-nit-4254.md b/changelog/rauljordan-nit-4254.md new file mode 100644 index 0000000000..702ec9b01e --- /dev/null +++ b/changelog/rauljordan-nit-4254.md @@ -0,0 +1,2 @@ +### Added + - Implement recording of txs for MEL validation \ No newline at end of file From 790e83f9f311ece25da967e4cb0e6677094edd4d Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 30 Dec 2025 17:41:36 +0530 Subject: [PATCH 25/42] Implement receipt recorder for mel validation --- arbnode/mel/recording/receipt_recorder.go | 150 ++++++++++++++++++ .../mel/recording/receipt_recorder_test.go | 81 ++++++++++ arbnode/mel/recording/tx_recorder.go | 1 + arbnode/mel/recording/tx_recorder_test.go | 11 +- 4 files changed, 242 insertions(+), 1 deletion(-) create mode 100644 arbnode/mel/recording/receipt_recorder.go create mode 100644 arbnode/mel/recording/receipt_recorder_test.go diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go new file mode 100644 index 0000000000..08bfb3b23b --- /dev/null +++ b/arbnode/mel/recording/receipt_recorder.go @@ -0,0 +1,150 @@ +package melrecording + +import ( + "context" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/triedb" + + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" +) + +type ReceiptRecorder struct { + parentChainReader BlockReader + parentChainBlockHash common.Hash + preimages daprovider.PreimagesMap + receipts []*types.Receipt + logs []*types.Log + trieDB *triedb.Database + blockReceiptHash common.Hash +} + +func NewReceiptRecorder( + parentChainReader BlockReader, + parentChainBlockHash common.Hash, + preimages daprovider.PreimagesMap, +) *ReceiptRecorder { + return &ReceiptRecorder{ + parentChainReader: parentChainReader, + parentChainBlockHash: parentChainBlockHash, + preimages: preimages, + } +} + +func (lr *ReceiptRecorder) Initialize(ctx context.Context) error { + block, err := lr.parentChainReader.BlockByHash(ctx, lr.parentChainBlockHash) + if err != nil { + return err + } + tdb := triedb.NewDatabase(rawdb.NewMemoryDatabase(), &triedb.Config{ + Preimages: true, + }) + receiptsTrie := trie.NewEmpty(tdb) + var receipts []*types.Receipt + txs := block.Body().Transactions + for i, tx := range txs { + receipt, err := lr.parentChainReader.TransactionReceipt(ctx, tx.Hash()) + if err != nil { + return fmt.Errorf("error fetching receipt for tx: %v", tx.Hash()) + } + receipts = append(receipts, receipt) + lr.logs = append(lr.logs, receipt.Logs...) + // #nosec G115 + indexBytes, err := rlp.EncodeToBytes(uint64(i)) + if err != nil { + return fmt.Errorf("failed to encode index %d: %w", i, err) + } + receiptBytes, err := receipt.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to marshal receipt %d: %w", i, err) + } + if err := receiptsTrie.Update(indexBytes, receiptBytes); err != nil { + return fmt.Errorf("failed to update trie at index %d: %w", i, err) + } + } + root, nodes := receiptsTrie.Commit(false) + if root != block.ReceiptHash() { + return fmt.Errorf("computed root %s doesn't match header root %s", + root.Hex(), block.ReceiptHash().Hex()) + } + if nodes != nil { + if err := tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { + return fmt.Errorf("failed to commit trie nodes: %w", err) + } + } + if err := tdb.Commit(root, false); err != nil { + return fmt.Errorf("failed to commit database: %w", err) + } + lr.receipts = receipts + lr.trieDB = tdb + lr.blockReceiptHash = root + return nil +} + +func (lr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockHash common.Hash, txIndex uint) ([]*types.Log, error) { + if lr.trieDB == nil { + return nil, errors.New("TransactionRecorder not initialized") + } + if lr.parentChainBlockHash != parentChainBlockHash { + return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", lr.parentChainBlockHash, parentChainBlockHash) + } + // #nosec G115 + if int(txIndex) >= len(lr.receipts) { + return nil, fmt.Errorf("index out of range: %d", txIndex) + } + recordingDB := &TxAndLogsDatabase{ + underlying: lr.trieDB, + recorder: daprovider.RecordPreimagesTo(lr.preimages), // RecordingDB will record relevant preimages into tr.preimages + } + recordingTDB := triedb.NewDatabase(recordingDB, nil) + receiptsTrie, err := trie.New(trie.TrieID(lr.blockReceiptHash), recordingTDB) + if err != nil { + return nil, fmt.Errorf("failed to create trie: %w", err) + } + indexBytes, err := rlp.EncodeToBytes(txIndex) + if err != nil { + return nil, fmt.Errorf("failed to encode index: %w", err) + } + receiptBytes, err := receiptsTrie.Get(indexBytes) + if err != nil { + return nil, fmt.Errorf("failed to get receipt from trie: %w", err) + } + receipt := new(types.Receipt) + if err = receipt.UnmarshalBinary(receiptBytes); err != nil { + return nil, fmt.Errorf("failed to unmarshal receipt: %w", err) + } + // Add the receipt marshaled binary by hash to the preimages map + if _, ok := lr.preimages[arbutil.Keccak256PreimageType]; !ok { + lr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) + } + lr.preimages[arbutil.Keccak256PreimageType][crypto.Keccak256Hash(receiptBytes)] = receiptBytes + // Fill in the TxIndex (give as input to this method) into the logs so that Tx recording + // is possible. This field is one of the derived fields of Log hence won't be stored in trie. + // + // We use this same trick in validation as well in order to link a tx with its logs + for _, log := range receipt.Logs { + log.TxIndex = txIndex + } + return receipt.Logs, nil +} + +func (lr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { + if lr.trieDB == nil { + return nil, errors.New("TransactionRecorder not initialized") + } + if lr.parentChainBlockHash == parentChainBlockHash { + return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", lr.parentChainBlockHash, parentChainBlockHash) + } + return lr.logs, nil +} + +func (tr *ReceiptRecorder) GetPreimages() daprovider.PreimagesMap { return tr.preimages } diff --git a/arbnode/mel/recording/receipt_recorder_test.go b/arbnode/mel/recording/receipt_recorder_test.go new file mode 100644 index 0000000000..6fefb8ebc9 --- /dev/null +++ b/arbnode/mel/recording/receipt_recorder_test.go @@ -0,0 +1,81 @@ +package melrecording + +import ( + "context" + "fmt" + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie" + + "github.com/offchainlabs/nitro/daprovider" +) + +func TestLogsForTxIndex(t *testing.T) { + ctx := context.Background() + blockReader := &mockBlockReader{ + blocks: make(map[common.Hash]*types.Block), + receiptByTxHash: map[common.Hash]*types.Receipt{}, + } + toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") + blockHeader := &types.Header{} + receipts := []*types.Receipt{} + txs := make([]*types.Transaction, 0) + for i := uint64(1); i < 10; i++ { + txData := &types.DynamicFeeTx{ + To: &toAddr, + Nonce: i, + GasFeeCap: big.NewInt(1), + GasTipCap: big.NewInt(1), + Gas: 1, + Value: big.NewInt(0), + Data: nil, + } + tx := types.NewTx(txData) + txs = append(txs, tx) + receipt := &types.Receipt{ + TxHash: tx.Hash(), + TransactionIndex: uint(i - 1), + Type: types.DynamicFeeTxType, + Logs: []*types.Log{ + { + // Consensus fields: + Address: common.HexToAddress("sample"), + Topics: []common.Hash{common.HexToHash("topic1"), common.HexToHash("topic2")}, + Data: common.Hex2Bytes(fmt.Sprintf("data:%d", i)), + + // Derived Fields: + TxIndex: uint(i - 1), + }, + }, + } + receipts = append(receipts, receipt) + blockReader.receiptByTxHash[tx.Hash()] = receipt + } + blockBody := &types.Body{ + Transactions: txs, + } + block := types.NewBlock( + blockHeader, + blockBody, + receipts, + trie.NewStackTrie(nil), + ) + blockReader.blocks[block.Hash()] = block + preimages := make(daprovider.PreimagesMap) + recorder := NewReceiptRecorder(blockReader, block.Hash(), preimages) + require.NoError(t, recorder.Initialize(ctx)) + + txIndex := uint(3) + logs, err := recorder.LogsForTxIndex(ctx, block.Hash(), txIndex) + require.NoError(t, err) + have, err := logs[0].MarshalJSON() + require.NoError(t, err) + want, err := receipts[txIndex].Logs[0].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) +} diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index ebe53c3b3b..3d14eb1da4 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -20,6 +20,7 @@ import ( type BlockReader interface { BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) + TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) } type TransactionRecorder struct { diff --git a/arbnode/mel/recording/tx_recorder_test.go b/arbnode/mel/recording/tx_recorder_test.go index 88824bb859..83582aed48 100644 --- a/arbnode/mel/recording/tx_recorder_test.go +++ b/arbnode/mel/recording/tx_recorder_test.go @@ -15,7 +15,8 @@ import ( ) type mockBlockReader struct { - blocks map[common.Hash]*types.Block + blocks map[common.Hash]*types.Block + receiptByTxHash map[common.Hash]*types.Receipt } func (mbr *mockBlockReader) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { @@ -26,6 +27,14 @@ func (mbr *mockBlockReader) BlockByHash(ctx context.Context, hash common.Hash) ( return block, nil } +func (mbr *mockBlockReader) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { + receipt, exists := mbr.receiptByTxHash[txHash] + if !exists { + return nil, nil + } + return receipt, nil +} + func TestTransactionByLog(t *testing.T) { ctx := context.Background() toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") From 014f26734bb9c7ad81e724f1c8356b4459a868e2 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 30 Dec 2025 17:50:46 +0530 Subject: [PATCH 26/42] code refactor --- arbnode/mel/recording/receipt_recorder.go | 2 +- arbnode/mel/recording/tx_and_logs_database.go | 110 ------------------ arbnode/mel/recording/tx_recorder.go | 2 +- .../recording/txs_and_receipts_database.go | 110 ++++++++++++++++++ 4 files changed, 112 insertions(+), 112 deletions(-) delete mode 100644 arbnode/mel/recording/tx_and_logs_database.go create mode 100644 arbnode/mel/recording/txs_and_receipts_database.go diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index 08bfb3b23b..5cc15613c1 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -101,7 +101,7 @@ func (lr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH if int(txIndex) >= len(lr.receipts) { return nil, fmt.Errorf("index out of range: %d", txIndex) } - recordingDB := &TxAndLogsDatabase{ + recordingDB := &TxsAndReceiptsDatabase{ underlying: lr.trieDB, recorder: daprovider.RecordPreimagesTo(lr.preimages), // RecordingDB will record relevant preimages into tr.preimages } diff --git a/arbnode/mel/recording/tx_and_logs_database.go b/arbnode/mel/recording/tx_and_logs_database.go deleted file mode 100644 index e6752d928c..0000000000 --- a/arbnode/mel/recording/tx_and_logs_database.go +++ /dev/null @@ -1,110 +0,0 @@ -package melrecording - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/triedb" - - "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/daprovider" -) - -type TxAndLogsDatabase struct { - underlying *triedb.Database - recorder daprovider.PreimageRecorder -} - -func (rdb *TxAndLogsDatabase) Get(key []byte) ([]byte, error) { - hash := common.BytesToHash(key) - value, err := rdb.underlying.Node(hash) - if err != nil { - return nil, err - } - if rdb.recorder != nil { - rdb.recorder(hash, value, arbutil.Keccak256PreimageType) - } - - return value, nil -} -func (rdb *TxAndLogsDatabase) Has(key []byte) (bool, error) { - hash := common.BytesToHash(key) - _, err := rdb.underlying.Node(hash) - return err == nil, nil -} -func (rdb *TxAndLogsDatabase) Put(key []byte, value []byte) error { - return fmt.Errorf("Put not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) Delete(key []byte) error { - return fmt.Errorf("Delete not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) DeleteRange(start, end []byte) error { - return fmt.Errorf("DeleteRange not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { - return fmt.Errorf("ReadAncients not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { - return 0, fmt.Errorf("ReadAncients not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) SyncAncient() error { - return fmt.Errorf("SyncAncient not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) TruncateHead(n uint64) (uint64, error) { - return 0, fmt.Errorf("TruncateHead not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) TruncateTail(n uint64) (uint64, error) { - return 0, fmt.Errorf("TruncateTail not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) Append(kind string, number uint64, item interface{}) error { - return fmt.Errorf("Append not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) AppendRaw(kind string, number uint64, item []byte) error { - return fmt.Errorf("AppendRaw not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) AncientDatadir() (string, error) { - return "", fmt.Errorf("AncientDatadir not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) Ancient(kind string, number uint64) ([]byte, error) { - return nil, fmt.Errorf("Ancient not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { - return nil, fmt.Errorf("AncientRange not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { - return nil, fmt.Errorf("AncientBytes not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) Ancients() (uint64, error) { - return 0, fmt.Errorf("Ancients not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) Tail() (uint64, error) { - return 0, fmt.Errorf("Tail not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) AncientSize(kind string) (uint64, error) { - return 0, fmt.Errorf("AncientSize not supported on recording DB") -} -func (rdb *TxAndLogsDatabase) Compact(start []byte, limit []byte) error { - return nil -} -func (rdb *TxAndLogsDatabase) SyncKeyValue() error { - return nil -} -func (rdb *TxAndLogsDatabase) Stat() (string, error) { - return "", nil -} -func (rdb *TxAndLogsDatabase) WasmDataBase() ethdb.KeyValueStore { - return nil -} -func (rdb *TxAndLogsDatabase) NewBatch() ethdb.Batch { - return nil -} -func (rdb *TxAndLogsDatabase) NewBatchWithSize(size int) ethdb.Batch { - return nil -} -func (rdb *TxAndLogsDatabase) NewIterator(prefix []byte, start []byte) ethdb.Iterator { - return nil -} -func (rdb *TxAndLogsDatabase) Close() error { - return nil -} diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index 3d14eb1da4..56472cc457 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -98,7 +98,7 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. if int(log.TxIndex) >= len(tr.txs) { return nil, fmt.Errorf("index out of range: %d", log.TxIndex) } - recordingDB := &TxAndLogsDatabase{ + recordingDB := &TxsAndReceiptsDatabase{ underlying: tr.trieDB, recorder: daprovider.RecordPreimagesTo(tr.preimages), // RecordingDB will record relevant preimages into tr.preimages } diff --git a/arbnode/mel/recording/txs_and_receipts_database.go b/arbnode/mel/recording/txs_and_receipts_database.go new file mode 100644 index 0000000000..1c5aaaef2b --- /dev/null +++ b/arbnode/mel/recording/txs_and_receipts_database.go @@ -0,0 +1,110 @@ +package melrecording + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/triedb" + + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" +) + +type TxsAndReceiptsDatabase struct { + underlying *triedb.Database + recorder daprovider.PreimageRecorder +} + +func (rdb *TxsAndReceiptsDatabase) Get(key []byte) ([]byte, error) { + hash := common.BytesToHash(key) + value, err := rdb.underlying.Node(hash) + if err != nil { + return nil, err + } + if rdb.recorder != nil { + rdb.recorder(hash, value, arbutil.Keccak256PreimageType) + } + + return value, nil +} +func (rdb *TxsAndReceiptsDatabase) Has(key []byte) (bool, error) { + hash := common.BytesToHash(key) + _, err := rdb.underlying.Node(hash) + return err == nil, nil +} +func (rdb *TxsAndReceiptsDatabase) Put(key []byte, value []byte) error { + return fmt.Errorf("Put not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) Delete(key []byte) error { + return fmt.Errorf("Delete not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) DeleteRange(start, end []byte) error { + return fmt.Errorf("DeleteRange not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { + return fmt.Errorf("ReadAncients not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { + return 0, fmt.Errorf("ReadAncients not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) SyncAncient() error { + return fmt.Errorf("SyncAncient not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) TruncateHead(n uint64) (uint64, error) { + return 0, fmt.Errorf("TruncateHead not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) TruncateTail(n uint64) (uint64, error) { + return 0, fmt.Errorf("TruncateTail not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) Append(kind string, number uint64, item interface{}) error { + return fmt.Errorf("Append not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) AppendRaw(kind string, number uint64, item []byte) error { + return fmt.Errorf("AppendRaw not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) AncientDatadir() (string, error) { + return "", fmt.Errorf("AncientDatadir not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) Ancient(kind string, number uint64) ([]byte, error) { + return nil, fmt.Errorf("Ancient not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { + return nil, fmt.Errorf("AncientRange not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { + return nil, fmt.Errorf("AncientBytes not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) Ancients() (uint64, error) { + return 0, fmt.Errorf("Ancients not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) Tail() (uint64, error) { + return 0, fmt.Errorf("Tail not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) AncientSize(kind string) (uint64, error) { + return 0, fmt.Errorf("AncientSize not supported on recording DB") +} +func (rdb *TxsAndReceiptsDatabase) Compact(start []byte, limit []byte) error { + return nil +} +func (rdb *TxsAndReceiptsDatabase) SyncKeyValue() error { + return nil +} +func (rdb *TxsAndReceiptsDatabase) Stat() (string, error) { + return "", nil +} +func (rdb *TxsAndReceiptsDatabase) WasmDataBase() ethdb.KeyValueStore { + return nil +} +func (rdb *TxsAndReceiptsDatabase) NewBatch() ethdb.Batch { + return nil +} +func (rdb *TxsAndReceiptsDatabase) NewBatchWithSize(size int) ethdb.Batch { + return nil +} +func (rdb *TxsAndReceiptsDatabase) NewIterator(prefix []byte, start []byte) ethdb.Iterator { + return nil +} +func (rdb *TxsAndReceiptsDatabase) Close() error { + return nil +} From d7aa9fcef11f28abe396f321d9e139901dd246e7 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 13:04:32 +0530 Subject: [PATCH 27/42] refactor --- arbnode/mel/recording/receipt_recorder.go | 46 +++++++++++------------ 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index 5cc15613c1..c9d71554e7 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -40,8 +40,8 @@ func NewReceiptRecorder( } } -func (lr *ReceiptRecorder) Initialize(ctx context.Context) error { - block, err := lr.parentChainReader.BlockByHash(ctx, lr.parentChainBlockHash) +func (rr *ReceiptRecorder) Initialize(ctx context.Context) error { + block, err := rr.parentChainReader.BlockByHash(ctx, rr.parentChainBlockHash) if err != nil { return err } @@ -52,12 +52,12 @@ func (lr *ReceiptRecorder) Initialize(ctx context.Context) error { var receipts []*types.Receipt txs := block.Body().Transactions for i, tx := range txs { - receipt, err := lr.parentChainReader.TransactionReceipt(ctx, tx.Hash()) + receipt, err := rr.parentChainReader.TransactionReceipt(ctx, tx.Hash()) if err != nil { return fmt.Errorf("error fetching receipt for tx: %v", tx.Hash()) } receipts = append(receipts, receipt) - lr.logs = append(lr.logs, receipt.Logs...) + rr.logs = append(rr.logs, receipt.Logs...) // #nosec G115 indexBytes, err := rlp.EncodeToBytes(uint64(i)) if err != nil { @@ -84,29 +84,29 @@ func (lr *ReceiptRecorder) Initialize(ctx context.Context) error { if err := tdb.Commit(root, false); err != nil { return fmt.Errorf("failed to commit database: %w", err) } - lr.receipts = receipts - lr.trieDB = tdb - lr.blockReceiptHash = root + rr.receipts = receipts + rr.trieDB = tdb + rr.blockReceiptHash = root return nil } -func (lr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockHash common.Hash, txIndex uint) ([]*types.Log, error) { - if lr.trieDB == nil { +func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockHash common.Hash, txIndex uint) ([]*types.Log, error) { + if rr.trieDB == nil { return nil, errors.New("TransactionRecorder not initialized") } - if lr.parentChainBlockHash != parentChainBlockHash { - return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", lr.parentChainBlockHash, parentChainBlockHash) + if rr.parentChainBlockHash != parentChainBlockHash { + return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", rr.parentChainBlockHash, parentChainBlockHash) } // #nosec G115 - if int(txIndex) >= len(lr.receipts) { + if int(txIndex) >= len(rr.receipts) { return nil, fmt.Errorf("index out of range: %d", txIndex) } recordingDB := &TxsAndReceiptsDatabase{ - underlying: lr.trieDB, - recorder: daprovider.RecordPreimagesTo(lr.preimages), // RecordingDB will record relevant preimages into tr.preimages + underlying: rr.trieDB, + recorder: daprovider.RecordPreimagesTo(rr.preimages), // RecordingDB will record relevant preimages into tr.preimages } recordingTDB := triedb.NewDatabase(recordingDB, nil) - receiptsTrie, err := trie.New(trie.TrieID(lr.blockReceiptHash), recordingTDB) + receiptsTrie, err := trie.New(trie.TrieID(rr.blockReceiptHash), recordingTDB) if err != nil { return nil, fmt.Errorf("failed to create trie: %w", err) } @@ -123,10 +123,10 @@ func (lr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH return nil, fmt.Errorf("failed to unmarshal receipt: %w", err) } // Add the receipt marshaled binary by hash to the preimages map - if _, ok := lr.preimages[arbutil.Keccak256PreimageType]; !ok { - lr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) + if _, ok := rr.preimages[arbutil.Keccak256PreimageType]; !ok { + rr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) } - lr.preimages[arbutil.Keccak256PreimageType][crypto.Keccak256Hash(receiptBytes)] = receiptBytes + rr.preimages[arbutil.Keccak256PreimageType][crypto.Keccak256Hash(receiptBytes)] = receiptBytes // Fill in the TxIndex (give as input to this method) into the logs so that Tx recording // is possible. This field is one of the derived fields of Log hence won't be stored in trie. // @@ -137,14 +137,14 @@ func (lr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH return receipt.Logs, nil } -func (lr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { - if lr.trieDB == nil { +func (rr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { + if rr.trieDB == nil { return nil, errors.New("TransactionRecorder not initialized") } - if lr.parentChainBlockHash == parentChainBlockHash { - return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", lr.parentChainBlockHash, parentChainBlockHash) + if rr.parentChainBlockHash != parentChainBlockHash { + return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", rr.parentChainBlockHash, parentChainBlockHash) } - return lr.logs, nil + return rr.logs, nil } func (tr *ReceiptRecorder) GetPreimages() daprovider.PreimagesMap { return tr.preimages } From 59b75cadf7f1b7e0815a1125786290dcc3f145f7 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 2 Jan 2026 19:22:00 +0530 Subject: [PATCH 28/42] Make tx and receipt fetcher in mel-replay to work with recorded preimages --- arbnode/mel/recording/receipt_recorder.go | 32 ++- .../mel/recording/receipt_recorder_test.go | 81 ------- arbnode/mel/recording/tx_recorder.go | 3 +- arbnode/mel/recording/tx_recorder_test.go | 85 ------- changelog/ganeshvanahalli-nit-4276.md | 3 + cmd/mel-replay/receipt_fetcher.go | 223 +++++++++--------- cmd/mel-replay/receipt_fetcher_test.go | 123 ---------- ...ceipt_recorder_and_receipt_fetcher_test.go | 133 +++++++++++ cmd/mel-replay/trie_fetcher.go | 147 ++++++++++++ cmd/mel-replay/tx_fetcher.go | 23 ++ .../tx_recorder_and_tx_fetcher_test.go | 105 +++++++++ cmd/mel-replay/txs_fetcher.go | 90 ------- cmd/mel-replay/txs_fetcher_test.go | 77 ------ 13 files changed, 539 insertions(+), 586 deletions(-) delete mode 100644 arbnode/mel/recording/receipt_recorder_test.go delete mode 100644 arbnode/mel/recording/tx_recorder_test.go create mode 100644 changelog/ganeshvanahalli-nit-4276.md delete mode 100644 cmd/mel-replay/receipt_fetcher_test.go create mode 100644 cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go create mode 100644 cmd/mel-replay/trie_fetcher.go create mode 100644 cmd/mel-replay/tx_fetcher.go create mode 100644 cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go delete mode 100644 cmd/mel-replay/txs_fetcher.go delete mode 100644 cmd/mel-replay/txs_fetcher_test.go diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index c9d71554e7..c3d7c75eaa 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -1,6 +1,7 @@ package melrecording import ( + "bytes" "context" "errors" "fmt" @@ -18,25 +19,28 @@ import ( "github.com/offchainlabs/nitro/daprovider" ) +// maps to an array of uints representing the relevant txIndexes of receipts needed for message extraction +var RELEVANT_LOGS_TXINDEXES_KEY common.Hash = common.HexToHash("123534") + type ReceiptRecorder struct { - parentChainReader BlockReader - parentChainBlockHash common.Hash - preimages daprovider.PreimagesMap - receipts []*types.Receipt - logs []*types.Log - trieDB *triedb.Database - blockReceiptHash common.Hash + parentChainReader BlockReader + parentChainBlockHash common.Hash + preimages daprovider.PreimagesMap + receipts []*types.Receipt + logs []*types.Log + relevantLogsTxIndexes []uint + trieDB *triedb.Database + blockReceiptHash common.Hash } func NewReceiptRecorder( parentChainReader BlockReader, parentChainBlockHash common.Hash, - preimages daprovider.PreimagesMap, ) *ReceiptRecorder { return &ReceiptRecorder{ parentChainReader: parentChainReader, parentChainBlockHash: parentChainBlockHash, - preimages: preimages, + preimages: make(daprovider.PreimagesMap), } } @@ -134,6 +138,7 @@ func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH for _, log := range receipt.Logs { log.TxIndex = txIndex } + lr.relevantLogsTxIndexes = append(lr.relevantLogsTxIndexes, txIndex) return receipt.Logs, nil } @@ -147,4 +152,11 @@ func (rr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBloc return rr.logs, nil } -func (tr *ReceiptRecorder) GetPreimages() daprovider.PreimagesMap { return tr.preimages } +func (tr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { + var buf bytes.Buffer + if err := rlp.Encode(&buf, tr.relevantLogsTxIndexes); err != nil { + return nil, err + } + tr.preimages[arbutil.Keccak256PreimageType][RELEVANT_LOGS_TXINDEXES_KEY] = buf.Bytes() + return tr.preimages, nil +} diff --git a/arbnode/mel/recording/receipt_recorder_test.go b/arbnode/mel/recording/receipt_recorder_test.go deleted file mode 100644 index 6fefb8ebc9..0000000000 --- a/arbnode/mel/recording/receipt_recorder_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package melrecording - -import ( - "context" - "fmt" - "math/big" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/trie" - - "github.com/offchainlabs/nitro/daprovider" -) - -func TestLogsForTxIndex(t *testing.T) { - ctx := context.Background() - blockReader := &mockBlockReader{ - blocks: make(map[common.Hash]*types.Block), - receiptByTxHash: map[common.Hash]*types.Receipt{}, - } - toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") - blockHeader := &types.Header{} - receipts := []*types.Receipt{} - txs := make([]*types.Transaction, 0) - for i := uint64(1); i < 10; i++ { - txData := &types.DynamicFeeTx{ - To: &toAddr, - Nonce: i, - GasFeeCap: big.NewInt(1), - GasTipCap: big.NewInt(1), - Gas: 1, - Value: big.NewInt(0), - Data: nil, - } - tx := types.NewTx(txData) - txs = append(txs, tx) - receipt := &types.Receipt{ - TxHash: tx.Hash(), - TransactionIndex: uint(i - 1), - Type: types.DynamicFeeTxType, - Logs: []*types.Log{ - { - // Consensus fields: - Address: common.HexToAddress("sample"), - Topics: []common.Hash{common.HexToHash("topic1"), common.HexToHash("topic2")}, - Data: common.Hex2Bytes(fmt.Sprintf("data:%d", i)), - - // Derived Fields: - TxIndex: uint(i - 1), - }, - }, - } - receipts = append(receipts, receipt) - blockReader.receiptByTxHash[tx.Hash()] = receipt - } - blockBody := &types.Body{ - Transactions: txs, - } - block := types.NewBlock( - blockHeader, - blockBody, - receipts, - trie.NewStackTrie(nil), - ) - blockReader.blocks[block.Hash()] = block - preimages := make(daprovider.PreimagesMap) - recorder := NewReceiptRecorder(blockReader, block.Hash(), preimages) - require.NoError(t, recorder.Initialize(ctx)) - - txIndex := uint(3) - logs, err := recorder.LogsForTxIndex(ctx, block.Hash(), txIndex) - require.NoError(t, err) - have, err := logs[0].MarshalJSON() - require.NoError(t, err) - want, err := receipts[txIndex].Logs[0].MarshalJSON() - require.NoError(t, err) - require.Equal(t, want, have) -} diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index 56472cc457..5688b3a5af 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -35,12 +35,11 @@ type TransactionRecorder struct { func NewTransactionRecorder( parentChainReader BlockReader, parentChainBlockHash common.Hash, - preimages daprovider.PreimagesMap, ) *TransactionRecorder { return &TransactionRecorder{ parentChainReader: parentChainReader, parentChainBlockHash: parentChainBlockHash, - preimages: preimages, + preimages: make(daprovider.PreimagesMap), } } diff --git a/arbnode/mel/recording/tx_recorder_test.go b/arbnode/mel/recording/tx_recorder_test.go deleted file mode 100644 index 83582aed48..0000000000 --- a/arbnode/mel/recording/tx_recorder_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package melrecording - -import ( - "context" - "math/big" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/trie" - - "github.com/offchainlabs/nitro/daprovider" -) - -type mockBlockReader struct { - blocks map[common.Hash]*types.Block - receiptByTxHash map[common.Hash]*types.Receipt -} - -func (mbr *mockBlockReader) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - block, exists := mbr.blocks[hash] - if !exists { - return nil, nil - } - return block, nil -} - -func (mbr *mockBlockReader) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { - receipt, exists := mbr.receiptByTxHash[txHash] - if !exists { - return nil, nil - } - return receipt, nil -} - -func TestTransactionByLog(t *testing.T) { - ctx := context.Background() - toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") - blockHeader := &types.Header{} - txs := make([]*types.Transaction, 0) - for i := uint64(1); i < 10; i++ { - txData := &types.DynamicFeeTx{ - To: &toAddr, - Nonce: i, - GasFeeCap: big.NewInt(1), - GasTipCap: big.NewInt(1), - Gas: 1, - Value: big.NewInt(0), - Data: nil, - } - tx := types.NewTx(txData) - txs = append(txs, tx) - } - blockBody := &types.Body{ - Transactions: txs, - } - receipts := []*types.Receipt{} - block := types.NewBlock( - blockHeader, - blockBody, - receipts, - trie.NewStackTrie(nil), - ) - blockReader := &mockBlockReader{ - blocks: map[common.Hash]*types.Block{ - block.Hash(): block, - }, - } - preimages := make(daprovider.PreimagesMap) - recorder := NewTransactionRecorder(blockReader, block.Hash(), preimages) - require.NoError(t, recorder.Initialize(ctx)) - - log := &types.Log{ - TxIndex: 5, - } - tx, err := recorder.TransactionByLog(ctx, log) - require.NoError(t, err) - have, err := tx.MarshalJSON() - require.NoError(t, err) - want, err := block.Transactions()[5].MarshalJSON() - require.NoError(t, err) - require.Equal(t, want, have) -} diff --git a/changelog/ganeshvanahalli-nit-4276.md b/changelog/ganeshvanahalli-nit-4276.md new file mode 100644 index 0000000000..ae0080656a --- /dev/null +++ b/changelog/ganeshvanahalli-nit-4276.md @@ -0,0 +1,3 @@ +### Fixed +- Update implementation of receipts and txs fetching in mel-replay +- Added testing for recording and fetching of logs and txs needed for MEL validation \ No newline at end of file diff --git a/cmd/mel-replay/receipt_fetcher.go b/cmd/mel-replay/receipt_fetcher.go index d8c499ea36..64ce26633f 100644 --- a/cmd/mel-replay/receipt_fetcher.go +++ b/cmd/mel-replay/receipt_fetcher.go @@ -4,162 +4,149 @@ package main import ( - "bytes" "context" "errors" "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/triedb" "github.com/offchainlabs/nitro/arbutil" ) +// maps to an array of uints representing the relevant txIndexes of receipts needed for message extraction +var RELEVANT_LOGS_TXINDEXES_KEY common.Hash = common.HexToHash("123534") + type receiptFetcherForBlock struct { header *types.Header preimageResolver preimageResolver } -// ReceiptForTransactionIndex fetches a receipt for a specific transaction index by walking +// LogsForTxIndex fetches logs for a specific transaction index by walking // the receipt trie of the block header. It uses the preimage resolver to fetch the preimages // of the trie nodes as needed. -func (rf *receiptFetcherForBlock) ReceiptForTransactionIndex( - ctx context.Context, - txIndex uint, -) (*types.Receipt, error) { - return fetchReceiptFromBlock(rf.header.ReceiptHash, txIndex, rf.preimageResolver) +func (rf *receiptFetcherForBlock) LogsForTxIndex(ctx context.Context, parentChainBlockHash common.Hash, txIndex uint) ([]*types.Log, error) { + if rf.header.Hash() != parentChainBlockHash { + return nil, errors.New("parentChainBlockHash mismatch") + } + receipt, err := fetchObjectFromTrie[types.Receipt](rf.header.ReceiptHash, txIndex, rf.preimageResolver) + if err != nil { + return nil, err + } + // This is needed to enable fetching corresponding tx from the txFetcher + for _, log := range receipt.Logs { + log.TxIndex = txIndex + } + return receipt.Logs, nil } -// Fetches a specific receipt index from a block's receipt trie by navigating its -// Merkle Patricia Trie structure. It uses the preimage resolver to fetch preimages -// of trie nodes as needed, and determines how to navigate depending on the structure of the trie nodes. -func fetchReceiptFromBlock( - receiptsRoot common.Hash, - receiptIndex uint, - preimageResolver preimageResolver, -) (*types.Receipt, error) { - currentNodeHash := receiptsRoot - currentPath := []byte{} // Track nibbles consumed so far. - receiptKey, err := rlp.EncodeToBytes(receiptIndex) +// LogsForBlockHash first gets the txIndexes corresponding to the relevant logs by reading +// RELEVANT_LOGS_TXINDEXES_KEY from the preimages and then fetches logs for each of these txIndexes +func (rf *receiptFetcherForBlock) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { + if rf.header.Hash() != parentChainBlockHash { + return nil, errors.New("parentChainBlockHash mismatch") + } + txIndexData, err := rf.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, RELEVANT_LOGS_TXINDEXES_KEY) if err != nil { return nil, err } - targetNibbles := keyToNibbles(receiptKey) - for { - nodeData, err := preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentNodeHash) + var txIndexes []uint + if err := rlp.DecodeBytes(txIndexData, &txIndexes); err != nil { + return nil, err + } + var relevantLogs []*types.Log + for _, txIndex := range txIndexes { + logs, err := rf.LogsForTxIndex(ctx, parentChainBlockHash, txIndex) if err != nil { return nil, err } - var node []any - if err = rlp.DecodeBytes(nodeData, &node); err != nil { - return nil, fmt.Errorf("failed to decode RLP node: %w", err) - } - switch len(node) { - case 17: - // We hit a branch node, which has 16 children and a value. - if len(currentPath) == len(targetNibbles) { - // A branch node's 17th item could be the value, so we check if it contains the receipt. - if valueBytes, ok := node[16].([]byte); ok && len(valueBytes) > 0 { - // This branch node has the actual value as the last item, so we decode the receipt - return decodeReceipt(valueBytes) - } - return nil, fmt.Errorf("no receipt found at target key") - } - // Get the next nibble to follow. - targetNibble := targetNibbles[len(currentPath)] - childData, ok := node[targetNibble].([]byte) - if !ok || len(childData) == 0 { - return nil, fmt.Errorf("no child at nibble %d", targetNibble) - } - // Move to the child node, which is the next hash we have to navigate. - currentNodeHash = common.BytesToHash(childData) - currentPath = append(currentPath, targetNibble) - case 2: - keyPath, ok := node[0].([]byte) - if !ok { - return nil, fmt.Errorf("invalid key path in node") - } - key := extractKeyNibbles(keyPath) - expectedPath := make([]byte, 0) - expectedPath = append(expectedPath, currentPath...) - expectedPath = append(expectedPath, key...) - - // Check if it is a leaf or extension node. - leaf, err := isLeaf(keyPath) - if err != nil { - return nil, err - } - if leaf { - // Check that the keyPath matches the target nibbles, - // otherwise, the receipt does not exist in the trie. - if !bytes.Equal(expectedPath, targetNibbles) { - return nil, fmt.Errorf("leaf key does not match target nibbles") - } - rawData, ok := node[1].([]byte) - if !ok { - return nil, fmt.Errorf("invalid receipt data in leaf node") - } - return decodeReceipt(rawData) - } - // If the node is not a leaf node, it is an extension node. - // Check if our target key matches this extension path. - if len(expectedPath) > len(targetNibbles) || !bytes.Equal(expectedPath, targetNibbles[:len(expectedPath)]) { - return nil, fmt.Errorf("extension path mismatch") - } - nextNodeBytes, ok := node[1].([]byte) - if !ok { - return nil, fmt.Errorf("invalid next node in extension") - } - // We navigate to the next node in the trie. - currentNodeHash = common.BytesToHash(nextNodeBytes) - currentPath = expectedPath - default: - return nil, fmt.Errorf("invalid node structure: unexpected length %d", len(node)) - } + relevantLogs = append(relevantLogs, logs...) } + return relevantLogs, nil } -// Converts a byte slice key into a slice of nibbles (4-bit values). -// Keys are encoded in big endian format, which is required by Ethereum MPTs. -func keyToNibbles(key []byte) []byte { - nibbles := make([]byte, len(key)*2) - for i, b := range key { - nibbles[i*2] = b >> 4 - nibbles[i*2+1] = b & 0x0f +// LogsForBlockHashAllLogs is kept, in case we go with an implementation of returning all logs present in a block +func (rf *receiptFetcherForBlock) LogsForBlockHashAllLogs(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { + if rf.header.Hash() != parentChainBlockHash { + return nil, errors.New("parentChainBlockHash mismatch") + } + preimageDB := &DB{ + resolver: rf.preimageResolver, + } + tdb := triedb.NewDatabase(preimageDB, nil) + receiptsTrie, err := trie.New(trie.TrieID(rf.header.ReceiptHash), tdb) + if err != nil { + return nil, err + } + entries, indices := collectTrieEntries(receiptsTrie) + fmt.Println("indices ", indices, len(entries)) + rawReceipts := reconstructOrderedData(entries, indices) + receipts, err := decodeReceiptsData(rawReceipts) + if err != nil { + return nil, err } - return nibbles + var relevantLogs []*types.Log + for _, receipt := range receipts { + relevantLogs = append(relevantLogs, receipt.Logs...) + } + return relevantLogs, nil } -// Extracts the key nibbles from a key path, handling odd/even length cases. -func extractKeyNibbles(keyPath []byte) []byte { - if len(keyPath) == 0 { - return nil +func collectTrieEntries(txTrie *trie.Trie) ([][]byte, []uint64) { + nodeIterator, iterErr := txTrie.NodeIterator(nil) + if iterErr != nil { + panic(iterErr) } - nibbles := keyToNibbles(keyPath) - if nibbles[0]&1 != 0 { - return nibbles[1:] + + var rawValues [][]byte + var indexKeys []uint64 + + for nodeIterator.Next(true) { + if !nodeIterator.Leaf() { + continue + } + + leafKey := nodeIterator.LeafKey() + var decodedIndex uint64 + + decodeErr := rlp.DecodeBytes(leafKey, &decodedIndex) + if decodeErr != nil { + panic(fmt.Errorf("key decoding error: %w", decodeErr)) + } + + indexKeys = append(indexKeys, decodedIndex) + rawValues = append(rawValues, nodeIterator.LeafBlob()) } - return nibbles[2:] + + return rawValues, indexKeys } -func isLeaf(keyPath []byte) (bool, error) { - firstByte := keyPath[0] - firstNibble := firstByte >> 4 - // 2 or 3 indicates leaf, while 0 or 1 indicates extension nodes in the Ethereum MPT specification. - if firstNibble > 3 { - return false, errors.New("first nibble cannot be greater than 3") +func reconstructOrderedData(rawValues [][]byte, indices []uint64) []hexutil.Bytes { + orderedData := make([]hexutil.Bytes, len(rawValues)) + for position, index := range indices { + if index >= uint64(len(rawValues)) { + panic(fmt.Sprintf("index out of bounds: %d", index-1)) + } + if orderedData[index] != nil { + panic(fmt.Sprintf("index collision detected: %d", index-1)) + } + orderedData[index] = rawValues[position] } - return firstNibble >= 2, nil + return orderedData } -func decodeReceipt(data []byte) (*types.Receipt, error) { - if len(data) == 0 { - return nil, errors.New("empty data cannot be decoded into receipt") - } - rpt := new(types.Receipt) - if err := rpt.UnmarshalBinary(data); err != nil { - return nil, err +func decodeReceiptsData(encodedData []hexutil.Bytes) (types.Receipts, error) { + receiptList := make(types.Receipts, 0, len(encodedData)) + for _, encodedReceipt := range encodedData { + decodedReceipt := new(types.Receipt) + if decodeErr := decodedReceipt.UnmarshalBinary(encodedReceipt); decodeErr != nil { + return nil, fmt.Errorf("receipt decoding failed: %w", decodeErr) + } + receiptList = append(receiptList, decodedReceipt) } - return rpt, nil + return receiptList, nil } diff --git a/cmd/mel-replay/receipt_fetcher_test.go b/cmd/mel-replay/receipt_fetcher_test.go deleted file mode 100644 index bec77c9ea9..0000000000 --- a/cmd/mel-replay/receipt_fetcher_test.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2025-2026, Offchain Labs, Inc. -// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md - -package main - -import ( - "context" - "fmt" - "math/big" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/trie" - - "github.com/offchainlabs/nitro/arbutil" -) - -func TestFetchReceiptFromBlock_Multiple(t *testing.T) { - ctx := context.Background() - // Creates a block with 42 transactions and receipts. - numReceipts := 42 - receipts := createTestReceipts(numReceipts) - hasher := newRecordingHasher() - receiptsRoot := types.DeriveSha(types.Receipts(receipts), hasher) - header := &types.Header{} - txes := make([]*types.Transaction, numReceipts) - for i := 0; i < numReceipts; i++ { - txes[i] = types.NewTransaction(uint64(i), common.Address{}, big.NewInt(0), 21000, big.NewInt(1), nil) // #nosec G115 - } - body := &types.Body{ - Transactions: txes, - } - blk := types.NewBlock(header, body, receipts, hasher) - require.Equal(t, blk.ReceiptHash(), receiptsRoot) - preimages := hasher.GetPreimages() - mockPreimageResolver := &mockPreimageResolver{ - preimages: preimages, - } - receiptFetcher := &receiptFetcherForBlock{ - header: blk.Header(), - preimageResolver: mockPreimageResolver, - } - for i := 0; i < numReceipts; i++ { - receipt, err := receiptFetcher.ReceiptForTransactionIndex(ctx, uint(i)) // #nosec G115 - require.NoError(t, err) - require.Equal(t, receipts[i].CumulativeGasUsed, receipt.CumulativeGasUsed) - } -} - -type mockPreimageResolver struct { - preimages map[common.Hash][]byte -} - -func (m *mockPreimageResolver) ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) { - if preimage, exists := m.preimages[hash]; exists { - return preimage, nil - } - return nil, fmt.Errorf("preimage not found for hash: %s", hash.Hex()) -} - -// Implements a hasher that captures preimages of hashes as it computes them. -type preimageRecordingHasher struct { - trie *trie.StackTrie - preimages map[common.Hash][]byte -} - -func newRecordingHasher() *preimageRecordingHasher { - h := &preimageRecordingHasher{ - preimages: make(map[common.Hash][]byte), - } - // OnTrieNode callback captures all trie nodes. - onTrieNode := func(path []byte, hash common.Hash, blob []byte) { - // Deep copy the blob since the callback warns contents may change, so this is required. - h.preimages[hash] = common.CopyBytes(blob) - } - - h.trie = trie.NewStackTrie(onTrieNode) - return h -} - -func (h *preimageRecordingHasher) Reset() { - onTrieNode := func(path []byte, hash common.Hash, blob []byte) { - h.preimages[hash] = common.CopyBytes(blob) - } - h.trie = trie.NewStackTrie(onTrieNode) -} - -func (h *preimageRecordingHasher) Update(key, value []byte) error { - valueHash := crypto.Keccak256Hash(value) - h.preimages[valueHash] = common.CopyBytes(value) - return h.trie.Update(key, value) -} - -func (h *preimageRecordingHasher) Hash() common.Hash { - return h.trie.Hash() -} - -func (h *preimageRecordingHasher) GetPreimages() map[common.Hash][]byte { - return h.preimages -} - -func createTestReceipts(count int) types.Receipts { - receipts := make(types.Receipts, count) - for i := 0; i < count; i++ { - receipt := &types.Receipt{ - Status: 1, - CumulativeGasUsed: 50_000 + uint64(i), // #nosec G115 - TxHash: common.Hash{}, - ContractAddress: common.Address{}, - Logs: []*types.Log{}, - BlockHash: common.BytesToHash([]byte("foobar")), - BlockNumber: big.NewInt(100), - TransactionIndex: uint(i), // #nosec G115 - } - receipt.Bloom = types.BytesToBloom(make([]byte, 256)) - receipts[i] = receipt - } - return receipts -} diff --git a/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go b/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go new file mode 100644 index 0000000000..b85c419ee4 --- /dev/null +++ b/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go @@ -0,0 +1,133 @@ +// Copyright 2025-2026, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package main + +import ( + "context" + "fmt" + "math/big" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie" + + melrecording "github.com/offchainlabs/nitro/arbnode/mel/recording" + "github.com/offchainlabs/nitro/arbutil" +) + +type mockPreimageResolver struct { + preimages map[common.Hash][]byte +} + +func (m *mockPreimageResolver) ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) { + if preimage, exists := m.preimages[hash]; exists { + return preimage, nil + } + return nil, fmt.Errorf("preimage not found for hash: %s", hash.Hex()) +} + +func TestRecordingOfReceiptPreimagesAndFetchingLogsFromPreimages(t *testing.T) { + ctx := context.Background() + blockReader := &mockBlockReader{ + blocks: make(map[common.Hash]*types.Block), + receiptByTxHash: map[common.Hash]*types.Receipt{}, + } + toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") + blockHeader := &types.Header{} + receipts := []*types.Receipt{} + txs := make([]*types.Transaction, 0) + for i := range uint64(50) { + txData := &types.DynamicFeeTx{ + To: &toAddr, + Nonce: i, + GasFeeCap: big.NewInt(1), + GasTipCap: big.NewInt(1), + Gas: 1, + Value: big.NewInt(0), + Data: nil, + } + tx := types.NewTx(txData) + txs = append(txs, tx) + receipt := &types.Receipt{ + TxHash: tx.Hash(), + TransactionIndex: uint(i), + Type: types.DynamicFeeTxType, + Logs: []*types.Log{ + { + // Consensus fields: + Address: common.HexToAddress(fmt.Sprintf("%d", i)), + Topics: []common.Hash{common.HexToHash("topic1"), common.HexToHash("topic2")}, + Data: common.Hex2Bytes(fmt.Sprintf("%d", i)), + + // Derived Fields: + TxIndex: uint(i), + }, + }, + } + receipts = append(receipts, receipt) + blockReader.receiptByTxHash[tx.Hash()] = receipt + } + blockBody := &types.Body{Transactions: txs} + block := types.NewBlock(blockHeader, blockBody, receipts, trie.NewStackTrie(nil)) + blockReader.blocks[block.Hash()] = block + recorder := melrecording.NewReceiptRecorder(blockReader, block.Hash()) + require.NoError(t, recorder.Initialize(ctx)) + + // Test recording of preimages + recordStart := uint(6) + recordEnd := uint(20) + for i := recordStart; i <= recordEnd; i++ { + logs, err := recorder.LogsForTxIndex(ctx, block.Hash(), i) + require.NoError(t, err) + have, err := logs[0].MarshalJSON() + require.NoError(t, err) + want, err := receipts[i].Logs[0].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) + } + + // Test reading of logs from the recorded preimages + preimages, err := recorder.GetPreimages() + require.NoError(t, err) + receiptFetcher := &receiptFetcherForBlock{ + header: block.Header(), + preimageResolver: &testPreimageResolver{ + preimages: preimages[arbutil.Keccak256PreimageType], + }, + } + // Test LogsForBlockHash + logs, err := receiptFetcher.LogsForBlockHash(ctx, block.Hash()) + require.NoError(t, err) + // #nosec G115 + if len(logs) != int(recordEnd-recordStart+1) { + t.Fatalf("number of logs from LogsForBlockHash mismatch. Want: %d, Got: %d", recordEnd-recordStart+1, len(logs)) + } + for _, log := range logs { + have, err := log.MarshalJSON() + require.NoError(t, err) + want, err := receipts[log.TxIndex].Logs[0].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) + } + // Test LogsForTxIndex + for i := recordStart; i <= recordEnd; i++ { + logs, err := receiptFetcher.LogsForTxIndex(ctx, block.Hash(), i) + require.NoError(t, err) + have, err := logs[0].MarshalJSON() + require.NoError(t, err) + want, err := receipts[i].Logs[0].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) + } + + // Logs fetching should fail for not recorded ones + _, err = receiptFetcher.LogsForTxIndex(ctx, block.Hash(), recordStart-1) + if err == nil || !strings.Contains(err.Error(), "preimage not found for hash") { + t.Fatalf("failed with unexpected error: %v", err) + } +} diff --git a/cmd/mel-replay/trie_fetcher.go b/cmd/mel-replay/trie_fetcher.go new file mode 100644 index 0000000000..277ab65e0a --- /dev/null +++ b/cmd/mel-replay/trie_fetcher.go @@ -0,0 +1,147 @@ +package main + +import ( + "bytes" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" + + "github.com/offchainlabs/nitro/arbutil" +) + +// Fetches a specific object at index from a block's Receipt/Tx trie by navigating its +// Merkle Patricia Trie structure. It uses the preimage resolver to fetch preimages +// of trie nodes as needed, and determines how to navigate depending on the structure of the trie nodes. +func fetchObjectFromTrie[T any](root common.Hash, index uint, preimageResolver preimageResolver) (*T, error) { + var empty *T + currentNodeHash := root + currentPath := []byte{} // Track nibbles consumed so far. + receiptKey, err := rlp.EncodeToBytes(index) + if err != nil { + return empty, err + } + targetNibbles := keyToNibbles(receiptKey) + for { + nodeData, err := preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentNodeHash) + if err != nil { + return empty, err + } + var node []any + if err = rlp.DecodeBytes(nodeData, &node); err != nil { + return empty, fmt.Errorf("failed to decode RLP node: %w", err) + } + switch len(node) { + case 17: + // We hit a branch node, which has 16 children and a value. + if len(currentPath) == len(targetNibbles) { + // A branch node's 17th item could be the value, so we check if it contains the receipt. + if valueBytes, ok := node[16].([]byte); ok && len(valueBytes) > 0 { + // This branch node has the actual value as the last item, so we decode the receipt + return decodeBinary[T](valueBytes) + } + return empty, fmt.Errorf("no receipt found at target key") + } + // Get the next nibble to follow. + targetNibble := targetNibbles[len(currentPath)] + childData, ok := node[targetNibble].([]byte) + if !ok || len(childData) == 0 { + return empty, fmt.Errorf("no child at nibble %d", targetNibble) + } + // Move to the child node, which is the next hash we have to navigate. + currentNodeHash = common.BytesToHash(childData) + currentPath = append(currentPath, targetNibble) + case 2: + keyPath, ok := node[0].([]byte) + if !ok { + return empty, fmt.Errorf("invalid key path in node") + } + key := extractKeyNibbles(keyPath) + expectedPath := make([]byte, 0) + expectedPath = append(expectedPath, currentPath...) + expectedPath = append(expectedPath, key...) + + // Check if it is a leaf or extension node. + leaf, err := isLeaf(keyPath) + if err != nil { + return empty, err + } + if leaf { + // Check that the keyPath matches the target nibbles, + // otherwise, the receipt does not exist in the trie. + if !bytes.Equal(expectedPath, targetNibbles) { + return empty, fmt.Errorf("leaf key does not match target nibbles") + } + rawData, ok := node[1].([]byte) + if !ok { + return empty, fmt.Errorf("invalid receipt data in leaf node") + } + return decodeBinary[T](rawData) + } + // If the node is not a leaf node, it is an extension node. + // Check if our target key matches this extension path. + if len(expectedPath) > len(targetNibbles) || !bytes.Equal(expectedPath, targetNibbles[:len(expectedPath)]) { + return empty, fmt.Errorf("extension path mismatch") + } + nextNodeBytes, ok := node[1].([]byte) + if !ok { + return empty, fmt.Errorf("invalid next node in extension") + } + // We navigate to the next node in the trie. + currentNodeHash = common.BytesToHash(nextNodeBytes) + currentPath = expectedPath + default: + return empty, fmt.Errorf("invalid node structure: unexpected length %d", len(node)) + } + } +} + +// Converts a byte slice key into a slice of nibbles (4-bit values). +// Keys are encoded in big endian format, which is required by Ethereum MPTs. +func keyToNibbles(key []byte) []byte { + nibbles := make([]byte, len(key)*2) + for i, b := range key { + nibbles[i*2] = b >> 4 + nibbles[i*2+1] = b & 0x0f + } + return nibbles +} + +// Extracts the key nibbles from a key path, handling odd/even length cases. +func extractKeyNibbles(keyPath []byte) []byte { + if len(keyPath) == 0 { + return nil + } + nibbles := keyToNibbles(keyPath) + if nibbles[0]&1 != 0 { + return nibbles[1:] + } + return nibbles[2:] +} + +func isLeaf(keyPath []byte) (bool, error) { + firstByte := keyPath[0] + firstNibble := firstByte >> 4 + // 2 or 3 indicates leaf, while 0 or 1 indicates extension nodes in the Ethereum MPT specification. + if firstNibble > 3 { + return false, errors.New("first nibble cannot be greater than 3") + } + return firstNibble >= 2, nil +} + +func decodeBinary[T any](data []byte) (*T, error) { + var empty *T + if len(data) == 0 { + return empty, errors.New("empty data cannot be decoded") + } + v := new(T) + u, ok := any(v).(interface{ UnmarshalBinary([]byte) error }) + if !ok { + return empty, errors.New("decodeBinary is called on a type that doesnt implement UnmarshalBinary") + } + if err := u.UnmarshalBinary(data); err != nil { + return empty, err + } + return v, nil +} diff --git a/cmd/mel-replay/tx_fetcher.go b/cmd/mel-replay/tx_fetcher.go new file mode 100644 index 0000000000..be7290b0c3 --- /dev/null +++ b/cmd/mel-replay/tx_fetcher.go @@ -0,0 +1,23 @@ +package main + +import ( + "context" + + "github.com/ethereum/go-ethereum/core/types" +) + +type txFetcherForBlock struct { + header *types.Header + preimageResolver preimageResolver +} + +// TransactionByLog fetches the tx for a specific transaction index by walking +// the tx trie of the block header. It uses the preimage resolver to fetch the preimages +// of the trie nodes as needed. +func (tf *txFetcherForBlock) TransactionByLog(ctx context.Context, log *types.Log) (*types.Transaction, error) { + tx, err := fetchObjectFromTrie[types.Transaction](tf.header.TxHash, log.TxIndex, tf.preimageResolver) + if err != nil { + return nil, err + } + return tx, err +} diff --git a/cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go b/cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go new file mode 100644 index 0000000000..b0e441a85a --- /dev/null +++ b/cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go @@ -0,0 +1,105 @@ +package main + +import ( + "context" + "math/big" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie" + + "github.com/offchainlabs/nitro/arbnode/mel/recording" + "github.com/offchainlabs/nitro/arbutil" +) + +type mockBlockReader struct { + blocks map[common.Hash]*types.Block + receiptByTxHash map[common.Hash]*types.Receipt +} + +func (mbr *mockBlockReader) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + block, exists := mbr.blocks[hash] + if !exists { + return nil, nil + } + return block, nil +} + +func (mbr *mockBlockReader) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { + receipt, exists := mbr.receiptByTxHash[txHash] + if !exists { + return nil, nil + } + return receipt, nil +} + +func TestRecordingOfTxPreimagesAndFetchingTxsFromPreimages(t *testing.T) { + ctx := context.Background() + toAddr := common.HexToAddress("0x0000000000000000000000000000000000DeaDBeef") + blockHeader := &types.Header{} + txs := make([]*types.Transaction, 0) + for i := range uint64(50) { + txData := &types.DynamicFeeTx{ + To: &toAddr, + Nonce: i, + GasFeeCap: big.NewInt(1), + GasTipCap: big.NewInt(1), + Gas: 1, + Value: big.NewInt(0), + Data: nil, + } + tx := types.NewTx(txData) + txs = append(txs, tx) + } + blockBody := &types.Body{Transactions: txs} + receipts := []*types.Receipt{} + block := types.NewBlock(blockHeader, blockBody, receipts, trie.NewStackTrie(nil)) + blockReader := &mockBlockReader{ + blocks: map[common.Hash]*types.Block{ + block.Hash(): block, + }, + } + recorder := melrecording.NewTransactionRecorder(blockReader, block.Hash()) + require.NoError(t, recorder.Initialize(ctx)) + + // Test recording of preimages + recordStart := uint(9) + recordEnd := uint(27) + for i := recordStart; i <= recordEnd; i++ { + tx, err := recorder.TransactionByLog(ctx, &types.Log{TxIndex: i}) + require.NoError(t, err) + have, err := tx.MarshalJSON() + require.NoError(t, err) + want, err := block.Transactions()[i].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) + } + + // Test reading of txs from the recorded preimages + preimages := recorder.GetPreimages() + txsFetcher := &txFetcherForBlock{ + header: block.Header(), + preimageResolver: &testPreimageResolver{ + preimages: preimages[arbutil.Keccak256PreimageType], + }, + } + for i := recordStart; i <= recordEnd; i++ { + tx, err := txsFetcher.TransactionByLog(ctx, &types.Log{TxIndex: i}) + require.NoError(t, err) + have, err := tx.MarshalJSON() + require.NoError(t, err) + want, err := block.Transactions()[i].MarshalJSON() + require.NoError(t, err) + require.Equal(t, want, have) + } + + // Tx fetching should fail for not recorded ones + _, err := txsFetcher.TransactionByLog(ctx, &types.Log{TxIndex: recordStart - 1}) + if err == nil || !strings.Contains(err.Error(), "preimage not found for hash") { + t.Fatalf("failed with unexpected error: %v", err) + } +} diff --git a/cmd/mel-replay/txs_fetcher.go b/cmd/mel-replay/txs_fetcher.go deleted file mode 100644 index 2307ba0792..0000000000 --- a/cmd/mel-replay/txs_fetcher.go +++ /dev/null @@ -1,90 +0,0 @@ -package main - -import ( - "context" - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/trie" - "github.com/ethereum/go-ethereum/triedb" -) - -type txsFetcherForBlock struct { - header *types.Header - preimageResolver preimageResolver -} - -func (tf *txsFetcherForBlock) TransactionsByHeader( - ctx context.Context, - parentChainHeaderHash common.Hash, -) (types.Transactions, error) { - preimageDB := &DB{ - resolver: tf.preimageResolver, - } - tdb := triedb.NewDatabase(preimageDB, nil) - tr, err := trie.New(trie.TrieID(tf.header.TxHash), tdb) - if err != nil { - panic(err) - } - entries, indices := tf.collectTrieEntries(tr) - rawTxs := tf.reconstructOrderedData(entries, indices) - return tf.decodeTransactionData(rawTxs) -} - -func (btr *txsFetcherForBlock) collectTrieEntries(txTrie *trie.Trie) ([][]byte, []uint64) { - nodeIterator, iterErr := txTrie.NodeIterator(nil) - if iterErr != nil { - panic(iterErr) - } - - var rawValues [][]byte - var indexKeys []uint64 - - for nodeIterator.Next(true) { - if !nodeIterator.Leaf() { - continue - } - - leafKey := nodeIterator.LeafKey() - var decodedIndex uint64 - - decodeErr := rlp.DecodeBytes(leafKey, &decodedIndex) - if decodeErr != nil { - panic(fmt.Errorf("key decoding error: %w", decodeErr)) - } - - indexKeys = append(indexKeys, decodedIndex) - rawValues = append(rawValues, nodeIterator.LeafBlob()) - } - - return rawValues, indexKeys -} - -func (btr *txsFetcherForBlock) reconstructOrderedData(rawValues [][]byte, indices []uint64) []hexutil.Bytes { - orderedData := make([]hexutil.Bytes, len(rawValues)) - for position, index := range indices { - if index >= uint64(len(rawValues)) { - panic(fmt.Sprintf("index out of bounds: %d", index)) - } - if orderedData[index] != nil { - panic(fmt.Sprintf("index collision detected: %d", index)) - } - orderedData[index] = rawValues[position] - } - return orderedData -} - -func (btr *txsFetcherForBlock) decodeTransactionData(encodedData []hexutil.Bytes) (types.Transactions, error) { - transactionList := make(types.Transactions, 0, len(encodedData)) - for _, encodedTx := range encodedData { - decodedTx := new(types.Transaction) - if decodeErr := decodedTx.UnmarshalBinary(encodedTx); decodeErr != nil { - return nil, fmt.Errorf("transaction decoding failed: %w", decodeErr) - } - transactionList = append(transactionList, decodedTx) - } - return transactionList, nil -} diff --git a/cmd/mel-replay/txs_fetcher_test.go b/cmd/mel-replay/txs_fetcher_test.go deleted file mode 100644 index ec8c651516..0000000000 --- a/cmd/mel-replay/txs_fetcher_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package main - -import ( - "context" - "math/big" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" -) - -func TestFetchTransactionsForBlockHeader_DynamicFeeTxs(t *testing.T) { - ctx := context.Background() - total := uint64(42) - txes := make([]*types.Transaction, total) - for i := uint64(0); i < total; i++ { - txData := types.DynamicFeeTx{ - Nonce: i, - To: nil, - Gas: 21000, - GasTipCap: big.NewInt(1), - GasFeeCap: big.NewInt(1), - } - txes[i] = types.NewTx(&txData) - } - hasher := newRecordingHasher() - txsRoot := types.DeriveSha(types.Transactions(txes), hasher) - header := &types.Header{ - TxHash: txsRoot, - } - preimages := hasher.GetPreimages() - mockPreimageResolver := &mockPreimageResolver{ - preimages: preimages, - } - txsFetcher := &txsFetcherForBlock{ - header: header, - preimageResolver: mockPreimageResolver, - } - fetched, err := txsFetcher.TransactionsByHeader(ctx, header.Hash()) - require.NoError(t, err) - require.True(t, uint64(len(fetched)) == total) // #nosec G115 - for i, tx := range fetched { - require.Equal(t, txes[i].Hash(), tx.Hash()) - require.Equal(t, uint64(i), tx.Nonce()) // #nosec G115 - } -} - -func TestFetchTransactionsForBlockHeader_LegacyTxs(t *testing.T) { - ctx := context.Background() - total := uint64(42) - txes := make([]*types.Transaction, total) - for i := uint64(0); i < total; i++ { - txes[i] = types.NewTransaction(i, common.Address{}, big.NewInt(0), 21000, big.NewInt(1), nil) - } - hasher := newRecordingHasher() - txsRoot := types.DeriveSha(types.Transactions(txes), hasher) - header := &types.Header{ - TxHash: txsRoot, - } - preimages := hasher.GetPreimages() - mockPreimageResolver := &mockPreimageResolver{ - preimages: preimages, - } - txsFetcher := &txsFetcherForBlock{ - header: header, - preimageResolver: mockPreimageResolver, - } - fetched, err := txsFetcher.TransactionsByHeader(ctx, header.Hash()) - require.NoError(t, err) - require.True(t, uint64(len(fetched)) == total) // #nosec G115 - for i, tx := range fetched { - require.Equal(t, txes[i].Hash(), tx.Hash()) - require.Equal(t, uint64(i), tx.Nonce()) // #nosec G115 - } -} From ccf0e22172e6941a0bcf0cb495540a181f45ff88 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 12:45:04 +0530 Subject: [PATCH 29/42] remove debug statement --- cmd/mel-replay/receipt_fetcher.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/mel-replay/receipt_fetcher.go b/cmd/mel-replay/receipt_fetcher.go index 64ce26633f..7aa83b39fd 100644 --- a/cmd/mel-replay/receipt_fetcher.go +++ b/cmd/mel-replay/receipt_fetcher.go @@ -83,7 +83,6 @@ func (rf *receiptFetcherForBlock) LogsForBlockHashAllLogs(ctx context.Context, p return nil, err } entries, indices := collectTrieEntries(receiptsTrie) - fmt.Println("indices ", indices, len(entries)) rawReceipts := reconstructOrderedData(entries, indices) receipts, err := decodeReceiptsData(rawReceipts) if err != nil { From 439c59d788d03d5653ce0ad53ded68f6d8593a09 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 13:07:33 +0530 Subject: [PATCH 30/42] code refactor --- arbnode/mel/recording/receipt_recorder.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index c3d7c75eaa..c4a6d34df1 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -138,7 +138,7 @@ func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH for _, log := range receipt.Logs { log.TxIndex = txIndex } - lr.relevantLogsTxIndexes = append(lr.relevantLogsTxIndexes, txIndex) + rr.relevantLogsTxIndexes = append(rr.relevantLogsTxIndexes, txIndex) return receipt.Logs, nil } @@ -152,11 +152,17 @@ func (rr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBloc return rr.logs, nil } -func (tr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { +func (rr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { + if len(rr.relevantLogsTxIndexes) == 0 { + return nil, nil + } var buf bytes.Buffer - if err := rlp.Encode(&buf, tr.relevantLogsTxIndexes); err != nil { + if err := rlp.Encode(&buf, rr.relevantLogsTxIndexes); err != nil { return nil, err } - tr.preimages[arbutil.Keccak256PreimageType][RELEVANT_LOGS_TXINDEXES_KEY] = buf.Bytes() - return tr.preimages, nil + if _, ok := rr.preimages[arbutil.Keccak256PreimageType]; !ok { + rr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) + } + rr.preimages[arbutil.Keccak256PreimageType][RELEVANT_LOGS_TXINDEXES_KEY] = buf.Bytes() + return rr.preimages, nil } From 36e255f42ce0ab3a58fb979822a82f742b26f363 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 13:24:21 +0530 Subject: [PATCH 31/42] update impl of GetPreimages --- arbnode/mel/recording/receipt_recorder.go | 11 ++++------- cmd/mel-replay/receipt_fetcher.go | 7 ++----- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index c4a6d34df1..38207ebff6 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -19,9 +19,6 @@ import ( "github.com/offchainlabs/nitro/daprovider" ) -// maps to an array of uints representing the relevant txIndexes of receipts needed for message extraction -var RELEVANT_LOGS_TXINDEXES_KEY common.Hash = common.HexToHash("123534") - type ReceiptRecorder struct { parentChainReader BlockReader parentChainBlockHash common.Hash @@ -152,10 +149,10 @@ func (rr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBloc return rr.logs, nil } +// GetPreimages returns the preimages of recorded receipts, and also adds the array of relevant tx indexes +// to the preimages map as a value to the key represented by parentChainBlockHash. +// TODO: If we use parentChainBlockHash as the key for header- then we need to modify this implementation func (rr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { - if len(rr.relevantLogsTxIndexes) == 0 { - return nil, nil - } var buf bytes.Buffer if err := rlp.Encode(&buf, rr.relevantLogsTxIndexes); err != nil { return nil, err @@ -163,6 +160,6 @@ func (rr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { if _, ok := rr.preimages[arbutil.Keccak256PreimageType]; !ok { rr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) } - rr.preimages[arbutil.Keccak256PreimageType][RELEVANT_LOGS_TXINDEXES_KEY] = buf.Bytes() + rr.preimages[arbutil.Keccak256PreimageType][rr.parentChainBlockHash] = buf.Bytes() return rr.preimages, nil } diff --git a/cmd/mel-replay/receipt_fetcher.go b/cmd/mel-replay/receipt_fetcher.go index 7aa83b39fd..d52727df9b 100644 --- a/cmd/mel-replay/receipt_fetcher.go +++ b/cmd/mel-replay/receipt_fetcher.go @@ -18,9 +18,6 @@ import ( "github.com/offchainlabs/nitro/arbutil" ) -// maps to an array of uints representing the relevant txIndexes of receipts needed for message extraction -var RELEVANT_LOGS_TXINDEXES_KEY common.Hash = common.HexToHash("123534") - type receiptFetcherForBlock struct { header *types.Header preimageResolver preimageResolver @@ -45,12 +42,12 @@ func (rf *receiptFetcherForBlock) LogsForTxIndex(ctx context.Context, parentChai } // LogsForBlockHash first gets the txIndexes corresponding to the relevant logs by reading -// RELEVANT_LOGS_TXINDEXES_KEY from the preimages and then fetches logs for each of these txIndexes +// the key `parentChainBlockHash` from the preimages and then fetches logs for each of these txIndexes func (rf *receiptFetcherForBlock) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { if rf.header.Hash() != parentChainBlockHash { return nil, errors.New("parentChainBlockHash mismatch") } - txIndexData, err := rf.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, RELEVANT_LOGS_TXINDEXES_KEY) + txIndexData, err := rf.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, rf.header.Hash()) if err != nil { return nil, err } From 472150c572f1cb4d7e621e4ff57f522eca134900 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 21:44:47 +0530 Subject: [PATCH 32/42] reduce code diff --- arbnode/mel/extraction/batch_lookup.go | 5 +++ .../mel/extraction/delayed_message_lookup.go | 8 ++++ arbnode/mel/recording/dap_reader_source.go | 24 ++++++------ arbnode/mel/recording/receipt_recorder.go | 38 ++++++++++++------- cmd/mel-replay/receipt_fetcher.go | 2 + 5 files changed, 52 insertions(+), 25 deletions(-) diff --git a/arbnode/mel/extraction/batch_lookup.go b/arbnode/mel/extraction/batch_lookup.go index 06c6692793..4d535317c8 100644 --- a/arbnode/mel/extraction/batch_lookup.go +++ b/arbnode/mel/extraction/batch_lookup.go @@ -58,6 +58,11 @@ func ParseBatchesFromBlock( return nil, nil, fmt.Errorf("error fetching tx by hash: %v in ParseBatchesFromBlock: %w ", log.TxHash, err) } + // Record this log for MEL validation + if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), log.TxIndex); err != nil { + return nil, nil, fmt.Errorf("error recording relevant logs: %w", err) + } + batch := &mel.SequencerInboxBatch{ BlockHash: log.BlockHash, ParentChainBlockNumber: log.BlockNumber, diff --git a/arbnode/mel/extraction/delayed_message_lookup.go b/arbnode/mel/extraction/delayed_message_lookup.go index c7bc40cacf..a7da8b16bb 100644 --- a/arbnode/mel/extraction/delayed_message_lookup.go +++ b/arbnode/mel/extraction/delayed_message_lookup.go @@ -37,6 +37,10 @@ func parseDelayedMessagesFromBlock( // On Arbitrum One, this is the bridge contract which emits a MessageDelivered event. if log.Address == melState.DelayedMessagePostingTargetAddress { relevantLogs = append(relevantLogs, log) + // Record this log for MEL validation + if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), log.TxIndex); err != nil { + return nil, fmt.Errorf("error recording relevant logs: %w", err) + } } } if len(relevantLogs) > 0 { @@ -76,6 +80,10 @@ func parseDelayedMessagesFromBlock( return nil, err } messageData[common.BigToHash(msgNum)] = msg + // Record this log for MEL validation + if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), inboxMsgLog.TxIndex); err != nil { + return nil, fmt.Errorf("error recording relevant logs: %w", err) + } } for i, parsedLog := range messageDeliveredEvents { msgKey := common.BigToHash(parsedLog.MessageIndex) diff --git a/arbnode/mel/recording/dap_reader_source.go b/arbnode/mel/recording/dap_reader_source.go index bc6ad1db29..24c8f05422 100644 --- a/arbnode/mel/recording/dap_reader_source.go +++ b/arbnode/mel/recording/dap_reader_source.go @@ -11,16 +11,16 @@ import ( "github.com/offchainlabs/nitro/validator" ) -// RecordingDAPReader implements recording of preimages when melextraction.ExtractMessages function is called by MEL validator for creation +// DAPReader implements recording of preimages when melextraction.ExtractMessages function is called by MEL validator for creation // of validation entry. Since ExtractMessages function would use daprovider.Reader interface to fetch the sequencer batch via RecoverPayload // we implement collecting of preimages as well in the same method and record it -type RecordingDAPReader struct { +type DAPReader struct { validatorCtx context.Context reader daprovider.Reader preimages daprovider.PreimagesMap } -func (r *RecordingDAPReader) RecoverPayload(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadResult] { +func (r *DAPReader) RecoverPayload(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadResult] { promise := r.reader.RecoverPayloadAndPreimages(batchNum, batchBlockHash, sequencerMsg) result, err := promise.Await(r.validatorCtx) if err != nil { @@ -30,38 +30,38 @@ func (r *RecordingDAPReader) RecoverPayload(batchNum uint64, batchBlockHash comm return containers.NewReadyPromise(daprovider.PayloadResult{Payload: result.Payload}, nil) } -func (r *RecordingDAPReader) CollectPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PreimagesResult] { +func (r *DAPReader) CollectPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PreimagesResult] { return r.reader.CollectPreimages(batchNum, batchBlockHash, sequencerMsg) } -func (r *RecordingDAPReader) RecoverPayloadAndPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadAndPreimagesResult] { +func (r *DAPReader) RecoverPayloadAndPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadAndPreimagesResult] { return r.reader.RecoverPayloadAndPreimages(batchNum, batchBlockHash, sequencerMsg) } -// RecordingDAPReaderSource is used for recording preimages related to sequencer batches stored by da providers, given a +// DAPReaderSource is used for recording preimages related to sequencer batches stored by da providers, given a // DapReaderSource it implements GetReader method to return a daprovider.Reader interface that records preimgaes. It takes // in a context variable (corresponding to creation of validation entry) from the MEL validator -type RecordingDAPReaderSource struct { +type DAPReaderSource struct { validatorCtx context.Context dapReaders arbstate.DapReaderSource preimages daprovider.PreimagesMap } -func NewRecordingDAPReaderSource(validatorCtx context.Context, dapReaders arbstate.DapReaderSource) *RecordingDAPReaderSource { - return &RecordingDAPReaderSource{ +func NewDAPReaderSource(validatorCtx context.Context, dapReaders arbstate.DapReaderSource) *DAPReaderSource { + return &DAPReaderSource{ validatorCtx: validatorCtx, dapReaders: dapReaders, preimages: make(daprovider.PreimagesMap), } } -func (s *RecordingDAPReaderSource) GetReader(headerByte byte) daprovider.Reader { +func (s *DAPReaderSource) GetReader(headerByte byte) daprovider.Reader { reader := s.dapReaders.GetReader(headerByte) - return &RecordingDAPReader{ + return &DAPReader{ validatorCtx: s.validatorCtx, reader: reader, preimages: s.preimages, } } -func (s *RecordingDAPReaderSource) Preimages() daprovider.PreimagesMap { return s.preimages } +func (s *DAPReaderSource) Preimages() daprovider.PreimagesMap { return s.preimages } diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index 38207ebff6..4ed1a73162 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -20,14 +20,15 @@ import ( ) type ReceiptRecorder struct { - parentChainReader BlockReader - parentChainBlockHash common.Hash - preimages daprovider.PreimagesMap - receipts []*types.Receipt - logs []*types.Log - relevantLogsTxIndexes []uint - trieDB *triedb.Database - blockReceiptHash common.Hash + parentChainReader BlockReader + parentChainBlockHash common.Hash + parentChainBlockNumber uint64 + preimages daprovider.PreimagesMap + receipts []*types.Receipt + logs []*types.Log + relevantLogsTxIndexes map[uint]struct{} + trieDB *triedb.Database + blockReceiptHash common.Hash } func NewReceiptRecorder( @@ -35,9 +36,10 @@ func NewReceiptRecorder( parentChainBlockHash common.Hash, ) *ReceiptRecorder { return &ReceiptRecorder{ - parentChainReader: parentChainReader, - parentChainBlockHash: parentChainBlockHash, - preimages: make(daprovider.PreimagesMap), + parentChainReader: parentChainReader, + parentChainBlockHash: parentChainBlockHash, + preimages: make(daprovider.PreimagesMap), + relevantLogsTxIndexes: make(map[uint]struct{}), } } @@ -88,6 +90,7 @@ func (rr *ReceiptRecorder) Initialize(ctx context.Context) error { rr.receipts = receipts rr.trieDB = tdb rr.blockReceiptHash = root + rr.parentChainBlockNumber = block.NumberU64() return nil } @@ -98,6 +101,9 @@ func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH if rr.parentChainBlockHash != parentChainBlockHash { return nil, fmt.Errorf("parentChainBlockHash mismatch. expected: %v got: %v", rr.parentChainBlockHash, parentChainBlockHash) } + if _, recorded := rr.relevantLogsTxIndexes[txIndex]; recorded { + return rr.receipts[txIndex].Logs, nil + } // #nosec G115 if int(txIndex) >= len(rr.receipts) { return nil, fmt.Errorf("index out of range: %d", txIndex) @@ -134,8 +140,10 @@ func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH // We use this same trick in validation as well in order to link a tx with its logs for _, log := range receipt.Logs { log.TxIndex = txIndex + log.BlockHash = parentChainBlockHash + log.BlockNumber = rr.parentChainBlockNumber } - rr.relevantLogsTxIndexes = append(rr.relevantLogsTxIndexes, txIndex) + rr.relevantLogsTxIndexes[txIndex] = struct{}{} return receipt.Logs, nil } @@ -153,8 +161,12 @@ func (rr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBloc // to the preimages map as a value to the key represented by parentChainBlockHash. // TODO: If we use parentChainBlockHash as the key for header- then we need to modify this implementation func (rr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { + var relevantLogsTxIndexes []uint + for k := range rr.relevantLogsTxIndexes { + relevantLogsTxIndexes = append(relevantLogsTxIndexes, k) + } var buf bytes.Buffer - if err := rlp.Encode(&buf, rr.relevantLogsTxIndexes); err != nil { + if err := rlp.Encode(&buf, relevantLogsTxIndexes); err != nil { return nil, err } if _, ok := rr.preimages[arbutil.Keccak256PreimageType]; !ok { diff --git a/cmd/mel-replay/receipt_fetcher.go b/cmd/mel-replay/receipt_fetcher.go index d52727df9b..cb6dd20943 100644 --- a/cmd/mel-replay/receipt_fetcher.go +++ b/cmd/mel-replay/receipt_fetcher.go @@ -37,6 +37,8 @@ func (rf *receiptFetcherForBlock) LogsForTxIndex(ctx context.Context, parentChai // This is needed to enable fetching corresponding tx from the txFetcher for _, log := range receipt.Logs { log.TxIndex = txIndex + log.BlockHash = rf.header.Hash() + log.BlockNumber = rf.header.Number.Uint64() } return receipt.Logs, nil } From ddbd9f43651e3164d2bc0d8af00c483a2a5239d0 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 23:07:33 +0530 Subject: [PATCH 33/42] fix test --- .../receipt_recorder_and_receipt_fetcher_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go b/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go index b85c419ee4..57f5c64720 100644 --- a/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go +++ b/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go @@ -75,6 +75,13 @@ func TestRecordingOfReceiptPreimagesAndFetchingLogsFromPreimages(t *testing.T) { blockBody := &types.Body{Transactions: txs} block := types.NewBlock(blockHeader, blockBody, receipts, trie.NewStackTrie(nil)) blockReader.blocks[block.Hash()] = block + // Fill in blockHash and BlockNumber fields of the logs + for _, receipt := range receipts { + for _, log := range receipt.Logs { + log.BlockHash = block.Hash() + log.BlockNumber = block.NumberU64() + } + } recorder := melrecording.NewReceiptRecorder(blockReader, block.Hash()) require.NoError(t, recorder.Initialize(ctx)) From c9c2421d4d13c0010e5d15728ab84412f67b500b Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 5 Jan 2026 23:07:33 +0530 Subject: [PATCH 34/42] fix test --- .../receipt_recorder_and_receipt_fetcher_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go b/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go index b85c419ee4..57f5c64720 100644 --- a/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go +++ b/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go @@ -75,6 +75,13 @@ func TestRecordingOfReceiptPreimagesAndFetchingLogsFromPreimages(t *testing.T) { blockBody := &types.Body{Transactions: txs} block := types.NewBlock(blockHeader, blockBody, receipts, trie.NewStackTrie(nil)) blockReader.blocks[block.Hash()] = block + // Fill in blockHash and BlockNumber fields of the logs + for _, receipt := range receipts { + for _, log := range receipt.Logs { + log.BlockHash = block.Hash() + log.BlockNumber = block.NumberU64() + } + } recorder := melrecording.NewReceiptRecorder(blockReader, block.Hash()) require.NoError(t, recorder.Initialize(ctx)) From a015068d9806dc5fefed5f254b83ddb0102b0e1d Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 6 Jan 2026 12:25:37 +0530 Subject: [PATCH 35/42] address PR comments --- arbnode/mel/recording/dap_reader_source.go | 14 +++++---- arbnode/mel/recording/delayed_msg_database.go | 24 +++++++++++---- arbnode/mel/recording/receipt_recorder.go | 30 +++++++++++-------- .../mel/recording/receipt_recorder_test.go | 3 +- arbnode/mel/recording/tx_recorder.go | 28 ++++++++++------- arbnode/mel/recording/tx_recorder_test.go | 3 +- cmd/mel-replay/delayed_message_db_test.go | 7 +++-- 7 files changed, 71 insertions(+), 38 deletions(-) diff --git a/arbnode/mel/recording/dap_reader_source.go b/arbnode/mel/recording/dap_reader_source.go index bc6ad1db29..590b24b630 100644 --- a/arbnode/mel/recording/dap_reader_source.go +++ b/arbnode/mel/recording/dap_reader_source.go @@ -2,6 +2,7 @@ package melrecording import ( "context" + "errors" "github.com/ethereum/go-ethereum/common" @@ -47,12 +48,17 @@ type RecordingDAPReaderSource struct { preimages daprovider.PreimagesMap } -func NewRecordingDAPReaderSource(validatorCtx context.Context, dapReaders arbstate.DapReaderSource) *RecordingDAPReaderSource { +// NewRecordingDAPReaderSource returns RecordingDAPReaderSource that records +// preimages related to sequencer batches posted to DA into the given preimages map +func NewRecordingDAPReaderSource(validatorCtx context.Context, dapReaders arbstate.DapReaderSource, preimages daprovider.PreimagesMap) (*RecordingDAPReaderSource, error) { + if preimages == nil { + return nil, errors.New("preimages recording destination cannot be nil") + } return &RecordingDAPReaderSource{ validatorCtx: validatorCtx, dapReaders: dapReaders, - preimages: make(daprovider.PreimagesMap), - } + preimages: preimages, + }, nil } func (s *RecordingDAPReaderSource) GetReader(headerByte byte) daprovider.Reader { @@ -63,5 +69,3 @@ func (s *RecordingDAPReaderSource) GetReader(headerByte byte) daprovider.Reader preimages: s.preimages, } } - -func (s *RecordingDAPReaderSource) Preimages() daprovider.PreimagesMap { return s.preimages } diff --git a/arbnode/mel/recording/delayed_msg_database.go b/arbnode/mel/recording/delayed_msg_database.go index c6852b50d9..6e69267f99 100644 --- a/arbnode/mel/recording/delayed_msg_database.go +++ b/arbnode/mel/recording/delayed_msg_database.go @@ -14,6 +14,8 @@ import ( "github.com/offchainlabs/nitro/arbnode/db/schema" "github.com/offchainlabs/nitro/arbnode/mel" "github.com/offchainlabs/nitro/arbos/merkleAccumulator" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" ) // DelayedMsgDatabase holds an ethdb.KeyValueStore that contains delayed messages stored by native MEL and implements DelayedMessageDatabase @@ -24,14 +26,26 @@ type DelayedMsgDatabase struct { initialized bool } -func NewDelayedMsgDatabase(db ethdb.KeyValueStore) *DelayedMsgDatabase { - return &DelayedMsgDatabase{db, make(map[common.Hash][]byte), false} +// NewDelayedMsgDatabase returns DelayedMsgDatabase that records preimages related +// to the delayed messages needed for MEL validation into the given preimages map +func NewDelayedMsgDatabase(db ethdb.KeyValueStore, preimages daprovider.PreimagesMap) (*DelayedMsgDatabase, error) { + if preimages == nil { + return nil, errors.New("preimages recording destination cannot be nil") + } + if _, ok := preimages[arbutil.Keccak256PreimageType]; !ok { + preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) + } + return &DelayedMsgDatabase{ + db: db, + preimages: preimages[arbutil.Keccak256PreimageType], + initialized: false, + }, nil } func (r *DelayedMsgDatabase) initialize(ctx context.Context, state *mel.State) error { var acc *merkleAccumulator.MerkleAccumulator for i := state.ParentChainBlockNumber; i > 0; i-- { - seenState, err := getState(ctx, r.db, i) + seenState, err := getState(r.db, i) if err != nil { return err } @@ -83,8 +97,6 @@ func (r *DelayedMsgDatabase) initialize(ctx context.Context, state *mel.State) e return nil } -func (r *DelayedMsgDatabase) Preimages() map[common.Hash][]byte { return r.preimages } - func (r *DelayedMsgDatabase) ReadDelayedMessage(ctx context.Context, state *mel.State, index uint64) (*mel.DelayedInboxMessage, error) { if index == 0 { // Init message // This message cannot be found in the database as it is supposed to be seen and read in the same block, so we persist that in DelayedMessageBacklog @@ -117,7 +129,7 @@ func fetchDelayedMessage(db ethdb.KeyValueStore, index uint64) (*mel.DelayedInbo return &delayed, nil } -func getState(ctx context.Context, db ethdb.KeyValueStore, parentChainBlockNumber uint64) (*mel.State, error) { +func getState(db ethdb.KeyValueStore, parentChainBlockNumber uint64) (*mel.State, error) { state, err := read.Value[mel.State](db, read.Key(schema.MelStatePrefix, parentChainBlockNumber)) if err != nil { return nil, err diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index c9d71554e7..cdbaed3a91 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -18,28 +18,39 @@ import ( "github.com/offchainlabs/nitro/daprovider" ) +// ReceiptRecorder records preimages corresponding to the receipts of a parent chain block +// needed during the message extraction. These preimages are needed for MEL validation and +// is used in creation of the validation entries by the MEL validator type ReceiptRecorder struct { parentChainReader BlockReader parentChainBlockHash common.Hash - preimages daprovider.PreimagesMap + recordPreimages daprovider.PreimageRecorder receipts []*types.Receipt logs []*types.Log trieDB *triedb.Database blockReceiptHash common.Hash } +// NewReceiptRecorder returns ReceiptRecorder that records +// the receipt preimages into the given preimages map func NewReceiptRecorder( parentChainReader BlockReader, parentChainBlockHash common.Hash, preimages daprovider.PreimagesMap, -) *ReceiptRecorder { +) (*ReceiptRecorder, error) { + if preimages == nil { + return nil, errors.New("preimages recording destination cannot be nil") + } return &ReceiptRecorder{ parentChainReader: parentChainReader, parentChainBlockHash: parentChainBlockHash, - preimages: preimages, - } + recordPreimages: daprovider.RecordPreimagesTo(preimages), + }, nil } +// Initialize must be called first to setup the recording trie database and store all the +// block receipts into the triedb. Without this, preimage recording is not possible and +// the other functions will error out if called beforehand func (rr *ReceiptRecorder) Initialize(ctx context.Context) error { block, err := rr.parentChainReader.BlockByHash(ctx, rr.parentChainBlockHash) if err != nil { @@ -54,7 +65,7 @@ func (rr *ReceiptRecorder) Initialize(ctx context.Context) error { for i, tx := range txs { receipt, err := rr.parentChainReader.TransactionReceipt(ctx, tx.Hash()) if err != nil { - return fmt.Errorf("error fetching receipt for tx: %v", tx.Hash()) + return fmt.Errorf("error fetching receipt for tx: %v, blockHash: %v", tx.Hash(), block.Hash()) } receipts = append(receipts, receipt) rr.logs = append(rr.logs, receipt.Logs...) @@ -103,7 +114,7 @@ func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH } recordingDB := &TxsAndReceiptsDatabase{ underlying: rr.trieDB, - recorder: daprovider.RecordPreimagesTo(rr.preimages), // RecordingDB will record relevant preimages into tr.preimages + recorder: rr.recordPreimages, // RecordingDB will record relevant preimages into the given preimagesmap } recordingTDB := triedb.NewDatabase(recordingDB, nil) receiptsTrie, err := trie.New(trie.TrieID(rr.blockReceiptHash), recordingTDB) @@ -123,10 +134,7 @@ func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH return nil, fmt.Errorf("failed to unmarshal receipt: %w", err) } // Add the receipt marshaled binary by hash to the preimages map - if _, ok := rr.preimages[arbutil.Keccak256PreimageType]; !ok { - rr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) - } - rr.preimages[arbutil.Keccak256PreimageType][crypto.Keccak256Hash(receiptBytes)] = receiptBytes + rr.recordPreimages(crypto.Keccak256Hash(receiptBytes), receiptBytes, arbutil.Keccak256PreimageType) // Fill in the TxIndex (give as input to this method) into the logs so that Tx recording // is possible. This field is one of the derived fields of Log hence won't be stored in trie. // @@ -146,5 +154,3 @@ func (rr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBloc } return rr.logs, nil } - -func (tr *ReceiptRecorder) GetPreimages() daprovider.PreimagesMap { return tr.preimages } diff --git a/arbnode/mel/recording/receipt_recorder_test.go b/arbnode/mel/recording/receipt_recorder_test.go index 6fefb8ebc9..2e7c92bde2 100644 --- a/arbnode/mel/recording/receipt_recorder_test.go +++ b/arbnode/mel/recording/receipt_recorder_test.go @@ -67,7 +67,8 @@ func TestLogsForTxIndex(t *testing.T) { ) blockReader.blocks[block.Hash()] = block preimages := make(daprovider.PreimagesMap) - recorder := NewReceiptRecorder(blockReader, block.Hash(), preimages) + recorder, err := NewReceiptRecorder(blockReader, block.Hash(), preimages) + require.NoError(t, err) require.NoError(t, recorder.Initialize(ctx)) txIndex := uint(3) diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index 56472cc457..d86786a54c 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -23,27 +23,38 @@ type BlockReader interface { TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) } +// TransactionRecorder records preimages corresponding to the transactions of a parent chain block +// needed during the message extraction. These preimages are needed for MEL validation and +// is used in creation of the validation entries by the MEL validator type TransactionRecorder struct { parentChainReader BlockReader parentChainBlockHash common.Hash - preimages daprovider.PreimagesMap + recordPreimages daprovider.PreimageRecorder txs []*types.Transaction trieDB *triedb.Database blockTxHash common.Hash } +// NewTransactionRecorder returns TransactionRecorder that records +// the transaction preimages into the given preimages map func NewTransactionRecorder( parentChainReader BlockReader, parentChainBlockHash common.Hash, preimages daprovider.PreimagesMap, -) *TransactionRecorder { +) (*TransactionRecorder, error) { + if preimages == nil { + return nil, errors.New("preimages recording destination cannot be nil") + } return &TransactionRecorder{ parentChainReader: parentChainReader, parentChainBlockHash: parentChainBlockHash, - preimages: preimages, - } + recordPreimages: daprovider.RecordPreimagesTo(preimages), + }, nil } +// Initialize must be called first to setup the recording trie database and store all the +// transactions into the triedb. Without this, preimage recording is not possible and +// the other functions will error out if called beforehand func (tr *TransactionRecorder) Initialize(ctx context.Context) error { block, err := tr.parentChainReader.BlockByHash(ctx, tr.parentChainBlockHash) if err != nil { @@ -100,7 +111,7 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. } recordingDB := &TxsAndReceiptsDatabase{ underlying: tr.trieDB, - recorder: daprovider.RecordPreimagesTo(tr.preimages), // RecordingDB will record relevant preimages into tr.preimages + recorder: tr.recordPreimages, // RecordingDB will record relevant preimages into the given preimagesmap } recordingTDB := triedb.NewDatabase(recordingDB, nil) txsTrie, err := trie.New(trie.TrieID(tr.blockTxHash), recordingTDB) @@ -121,11 +132,6 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. return nil, fmt.Errorf("failed to unmarshal transaction: %w", err) } // Add the tx marshaled binary by hash to the preimages map - if _, ok := tr.preimages[arbutil.Keccak256PreimageType]; !ok { - tr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) - } - tr.preimages[arbutil.Keccak256PreimageType][crypto.Keccak256Hash(txBytes)] = txBytes + tr.recordPreimages(crypto.Keccak256Hash(txBytes), txBytes, arbutil.Keccak256PreimageType) return tx, nil } - -func (tr *TransactionRecorder) GetPreimages() daprovider.PreimagesMap { return tr.preimages } diff --git a/arbnode/mel/recording/tx_recorder_test.go b/arbnode/mel/recording/tx_recorder_test.go index 83582aed48..e681c6a132 100644 --- a/arbnode/mel/recording/tx_recorder_test.go +++ b/arbnode/mel/recording/tx_recorder_test.go @@ -69,7 +69,8 @@ func TestTransactionByLog(t *testing.T) { }, } preimages := make(daprovider.PreimagesMap) - recorder := NewTransactionRecorder(blockReader, block.Hash(), preimages) + recorder, err := NewTransactionRecorder(blockReader, block.Hash(), preimages) + require.NoError(t, err) require.NoError(t, recorder.Initialize(ctx)) log := &types.Log{ diff --git a/cmd/mel-replay/delayed_message_db_test.go b/cmd/mel-replay/delayed_message_db_test.go index 995ae45d00..69ce681cd2 100644 --- a/cmd/mel-replay/delayed_message_db_test.go +++ b/cmd/mel-replay/delayed_message_db_test.go @@ -17,6 +17,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/mel/runner" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" ) var _ preimageResolver = (*mockPreimageResolver)(nil) @@ -70,7 +71,9 @@ func TestRecordingPreimagesForReadDelayedMessage(t *testing.T) { require.NoError(t, state.GenerateDelayedMessagesSeenMerklePartialsAndRoot()) require.NoError(t, melDB.SaveState(ctx, state)) - recordingDB := melrecording.NewDelayedMsgDatabase(db) + preimages := make(daprovider.PreimagesMap) + recordingDB, err := melrecording.NewDelayedMsgDatabase(db, preimages) + require.NoError(t, err) for i := startBlockNum; i < numMsgs; i++ { require.NoError(t, state.AccumulateDelayedMessage(delayedMessages[i])) state.DelayedMessagesSeen++ @@ -88,7 +91,7 @@ func TestRecordingPreimagesForReadDelayedMessage(t *testing.T) { // Test reading in wasm mode delayedDB := &delayedMessageDatabase{ &testPreimageResolver{ - preimages: recordingDB.Preimages(), + preimages: preimages[arbutil.Keccak256PreimageType], }, } for i := startBlockNum; i < numMsgsToRead; i++ { From 97b40d0de82c9ec2c8f665c2dca715ee3c54b70f Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 6 Jan 2026 15:18:18 +0530 Subject: [PATCH 36/42] code refactor --- arbnode/mel/recording/dap_reader_source.go | 26 +++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/arbnode/mel/recording/dap_reader_source.go b/arbnode/mel/recording/dap_reader_source.go index 590b24b630..bc8f62ed2d 100644 --- a/arbnode/mel/recording/dap_reader_source.go +++ b/arbnode/mel/recording/dap_reader_source.go @@ -12,16 +12,16 @@ import ( "github.com/offchainlabs/nitro/validator" ) -// RecordingDAPReader implements recording of preimages when melextraction.ExtractMessages function is called by MEL validator for creation +// DAPReader implements recording of preimages when melextraction.ExtractMessages function is called by MEL validator for creation // of validation entry. Since ExtractMessages function would use daprovider.Reader interface to fetch the sequencer batch via RecoverPayload // we implement collecting of preimages as well in the same method and record it -type RecordingDAPReader struct { +type DAPReader struct { validatorCtx context.Context reader daprovider.Reader preimages daprovider.PreimagesMap } -func (r *RecordingDAPReader) RecoverPayload(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadResult] { +func (r *DAPReader) RecoverPayload(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadResult] { promise := r.reader.RecoverPayloadAndPreimages(batchNum, batchBlockHash, sequencerMsg) result, err := promise.Await(r.validatorCtx) if err != nil { @@ -31,39 +31,39 @@ func (r *RecordingDAPReader) RecoverPayload(batchNum uint64, batchBlockHash comm return containers.NewReadyPromise(daprovider.PayloadResult{Payload: result.Payload}, nil) } -func (r *RecordingDAPReader) CollectPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PreimagesResult] { +func (r *DAPReader) CollectPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PreimagesResult] { return r.reader.CollectPreimages(batchNum, batchBlockHash, sequencerMsg) } -func (r *RecordingDAPReader) RecoverPayloadAndPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadAndPreimagesResult] { +func (r *DAPReader) RecoverPayloadAndPreimages(batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte) containers.PromiseInterface[daprovider.PayloadAndPreimagesResult] { return r.reader.RecoverPayloadAndPreimages(batchNum, batchBlockHash, sequencerMsg) } -// RecordingDAPReaderSource is used for recording preimages related to sequencer batches stored by da providers, given a +// DAPReaderSource is used for recording preimages related to sequencer batches stored by da providers, given a // DapReaderSource it implements GetReader method to return a daprovider.Reader interface that records preimgaes. It takes // in a context variable (corresponding to creation of validation entry) from the MEL validator -type RecordingDAPReaderSource struct { +type DAPReaderSource struct { validatorCtx context.Context dapReaders arbstate.DapReaderSource preimages daprovider.PreimagesMap } -// NewRecordingDAPReaderSource returns RecordingDAPReaderSource that records -// preimages related to sequencer batches posted to DA into the given preimages map -func NewRecordingDAPReaderSource(validatorCtx context.Context, dapReaders arbstate.DapReaderSource, preimages daprovider.PreimagesMap) (*RecordingDAPReaderSource, error) { +// NewDAPReaderSource returns DAPReaderSource that records preimages +// related to sequencer batches posted to DA into the given preimages map +func NewDAPReaderSource(validatorCtx context.Context, dapReaders arbstate.DapReaderSource, preimages daprovider.PreimagesMap) (*DAPReaderSource, error) { if preimages == nil { return nil, errors.New("preimages recording destination cannot be nil") } - return &RecordingDAPReaderSource{ + return &DAPReaderSource{ validatorCtx: validatorCtx, dapReaders: dapReaders, preimages: preimages, }, nil } -func (s *RecordingDAPReaderSource) GetReader(headerByte byte) daprovider.Reader { +func (s *DAPReaderSource) GetReader(headerByte byte) daprovider.Reader { reader := s.dapReaders.GetReader(headerByte) - return &RecordingDAPReader{ + return &DAPReader{ validatorCtx: s.validatorCtx, reader: reader, preimages: s.preimages, From 95646c244e671477b6b1f2ad516c17b125861aaf Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 6 Jan 2026 17:12:15 +0530 Subject: [PATCH 37/42] address PR comments --- arbnode/mel/extraction/batch_lookup.go | 3 ++- .../mel/extraction/delayed_message_lookup.go | 6 ++++-- arbnode/mel/recording/dap_reader_source.go | 6 +++--- arbnode/mel/recording/receipt_recorder.go | 4 +++- cmd/mel-replay/receipt_fetcher.go | 6 ++++-- mel-replay/mel-replay.go | 17 +++++++++++++++++ 6 files changed, 33 insertions(+), 9 deletions(-) create mode 100644 mel-replay/mel-replay.go diff --git a/arbnode/mel/extraction/batch_lookup.go b/arbnode/mel/extraction/batch_lookup.go index 4d535317c8..d7184d0354 100644 --- a/arbnode/mel/extraction/batch_lookup.go +++ b/arbnode/mel/extraction/batch_lookup.go @@ -58,7 +58,8 @@ func ParseBatchesFromBlock( return nil, nil, fmt.Errorf("error fetching tx by hash: %v in ParseBatchesFromBlock: %w ", log.TxHash, err) } - // Record this log for MEL validation + // Record this log for MEL validation. This is a very cheap operation in native mode + // and is optimized for recording mode as well. if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), log.TxIndex); err != nil { return nil, nil, fmt.Errorf("error recording relevant logs: %w", err) } diff --git a/arbnode/mel/extraction/delayed_message_lookup.go b/arbnode/mel/extraction/delayed_message_lookup.go index a7da8b16bb..5cd5b915db 100644 --- a/arbnode/mel/extraction/delayed_message_lookup.go +++ b/arbnode/mel/extraction/delayed_message_lookup.go @@ -37,7 +37,8 @@ func parseDelayedMessagesFromBlock( // On Arbitrum One, this is the bridge contract which emits a MessageDelivered event. if log.Address == melState.DelayedMessagePostingTargetAddress { relevantLogs = append(relevantLogs, log) - // Record this log for MEL validation + // Record this log for MEL validation. This is a very cheap operation in native mode + // and is optimized for recording mode as well. if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), log.TxIndex); err != nil { return nil, fmt.Errorf("error recording relevant logs: %w", err) } @@ -80,7 +81,8 @@ func parseDelayedMessagesFromBlock( return nil, err } messageData[common.BigToHash(msgNum)] = msg - // Record this log for MEL validation + // Record this log for MEL validation. This is a very cheap operation in native mode + // and is optimized for recording mode as well. if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), inboxMsgLog.TxIndex); err != nil { return nil, fmt.Errorf("error recording relevant logs: %w", err) } diff --git a/arbnode/mel/recording/dap_reader_source.go b/arbnode/mel/recording/dap_reader_source.go index bc8f62ed2d..4f63b5627f 100644 --- a/arbnode/mel/recording/dap_reader_source.go +++ b/arbnode/mel/recording/dap_reader_source.go @@ -12,9 +12,9 @@ import ( "github.com/offchainlabs/nitro/validator" ) -// DAPReader implements recording of preimages when melextraction.ExtractMessages function is called by MEL validator for creation -// of validation entry. Since ExtractMessages function would use daprovider.Reader interface to fetch the sequencer batch via RecoverPayload -// we implement collecting of preimages as well in the same method and record it +// DAPReader implements recording of data avaialability preimages when melextraction.ExtractMessages function is called by +// MEL validator for creation of validation entry. Since ExtractMessages function would use daprovider.Reader interface to +// fetch the sequencer batch via RecoverPayload we implement collecting of preimages as well in the same method and record it type DAPReader struct { validatorCtx context.Context reader daprovider.Reader diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index be22ba7325..1ff228e0fd 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -17,6 +17,7 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/mel-replay" ) // ReceiptRecorder records preimages corresponding to the receipts of a parent chain block @@ -178,6 +179,7 @@ func (rr *ReceiptRecorder) CollectTxIndicesPreimage() error { if err := rlp.Encode(&buf, relevantLogsTxIndexes); err != nil { return err } - rr.recordPreimages(rr.parentChainBlockHash, buf.Bytes(), arbutil.Keccak256PreimageType) + relevantTxIndicesKey := melreplay.RelevantTxIndexesKey(rr.parentChainBlockHash) + rr.recordPreimages(relevantTxIndicesKey, buf.Bytes(), arbutil.Keccak256PreimageType) return nil } diff --git a/cmd/mel-replay/receipt_fetcher.go b/cmd/mel-replay/receipt_fetcher.go index cb6dd20943..0dabdbadb0 100644 --- a/cmd/mel-replay/receipt_fetcher.go +++ b/cmd/mel-replay/receipt_fetcher.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/triedb" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/mel-replay" ) type receiptFetcherForBlock struct { @@ -49,7 +50,8 @@ func (rf *receiptFetcherForBlock) LogsForBlockHash(ctx context.Context, parentCh if rf.header.Hash() != parentChainBlockHash { return nil, errors.New("parentChainBlockHash mismatch") } - txIndexData, err := rf.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, rf.header.Hash()) + relevantTxIndicesKey := melreplay.RelevantTxIndexesKey(rf.header.Hash()) + txIndexData, err := rf.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, relevantTxIndicesKey) if err != nil { return nil, err } @@ -68,7 +70,7 @@ func (rf *receiptFetcherForBlock) LogsForBlockHash(ctx context.Context, parentCh return relevantLogs, nil } -// LogsForBlockHashAllLogs is kept, in case we go with an implementation of returning all logs present in a block +// TODO: LogsForBlockHashAllLogs is kept, in case we go with an implementation of returning all logs present in a block func (rf *receiptFetcherForBlock) LogsForBlockHashAllLogs(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { if rf.header.Hash() != parentChainBlockHash { return nil, errors.New("parentChainBlockHash mismatch") diff --git a/mel-replay/mel-replay.go b/mel-replay/mel-replay.go new file mode 100644 index 0000000000..7c2661c605 --- /dev/null +++ b/mel-replay/mel-replay.go @@ -0,0 +1,17 @@ +// Copyright 2025-2026, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package melreplay + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// RELEVANT_TX_INDEXES_PREFIX represents the prefix appended to a blockHash and the hash of the resulting string +// maps to the list of MEL-relevant tx indexes in a parent chain block +const RELEVANT_TX_INDEXES_PREFIX string = "TX_INDEX_DATA" + +func RelevantTxIndexesKey(parentChainBlockHash common.Hash) common.Hash { + return crypto.Keccak256Hash([]byte(RELEVANT_TX_INDEXES_PREFIX), parentChainBlockHash.Bytes()) +} From 5659d4278bbecf5c096a3a19f7145a0c1d45334a Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 7 Jan 2026 16:16:28 +0530 Subject: [PATCH 38/42] merge mel-txandreceipt-fetcher --- arbnode/mel/extraction/batch_lookup.go | 3 +- arbnode/mel/extraction/batch_lookup_test.go | 2 + .../mel/extraction/delayed_message_lookup.go | 6 ++- .../extraction/delayed_message_lookup_test.go | 9 ++++ arbnode/mel/recording/dap_reader_source.go | 20 ++++---- arbnode/mel/recording/delayed_msg_database.go | 24 +++++++--- arbnode/mel/recording/receipt_recorder.go | 46 +++++++++++-------- arbnode/mel/recording/tx_recorder.go | 29 +++++++----- .../mel/runner/logs_and_headers_fetcher.go | 3 +- .../runner/logs_and_headers_fetcher_test.go | 12 +++-- cmd/mel-replay/delayed_message_db_test.go | 7 ++- cmd/mel-replay/receipt_fetcher.go | 6 ++- ...ceipt_recorder_and_receipt_fetcher_test.go | 9 ++-- .../tx_recorder_and_tx_fetcher_test.go | 8 ++-- mel-replay/mel-replay.go | 17 +++++++ 15 files changed, 139 insertions(+), 62 deletions(-) create mode 100644 mel-replay/mel-replay.go diff --git a/arbnode/mel/extraction/batch_lookup.go b/arbnode/mel/extraction/batch_lookup.go index 4d535317c8..d7184d0354 100644 --- a/arbnode/mel/extraction/batch_lookup.go +++ b/arbnode/mel/extraction/batch_lookup.go @@ -58,7 +58,8 @@ func ParseBatchesFromBlock( return nil, nil, fmt.Errorf("error fetching tx by hash: %v in ParseBatchesFromBlock: %w ", log.TxHash, err) } - // Record this log for MEL validation + // Record this log for MEL validation. This is a very cheap operation in native mode + // and is optimized for recording mode as well. if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), log.TxIndex); err != nil { return nil, nil, fmt.Errorf("error recording relevant logs: %w", err) } diff --git a/arbnode/mel/extraction/batch_lookup_test.go b/arbnode/mel/extraction/batch_lookup_test.go index 4eb966b44a..5f163fbe05 100644 --- a/arbnode/mel/extraction/batch_lookup_test.go +++ b/arbnode/mel/extraction/batch_lookup_test.go @@ -244,6 +244,7 @@ func Test_parseBatchesFromBlock(t *testing.T) { receipts, trie.NewStackTrie(nil), ) + receipt.BlockHash = block.Hash() blockLogsFetcher := newMockBlockLogsFetcher(receipts) eventUnpacker := &mockEventUnpacker{ events: []*bridgegen.SequencerInboxSequencerBatchDelivered{event}, @@ -322,6 +323,7 @@ func Test_parseBatchesFromBlock_outOfOrderBatches(t *testing.T) { receipts, trie.NewStackTrie(nil), ) + receipt.BlockHash = block.Hash() blockLogsFetcher := newMockBlockLogsFetcher(receipts) eventUnpacker := &mockEventUnpacker{ events: []*bridgegen.SequencerInboxSequencerBatchDelivered{ diff --git a/arbnode/mel/extraction/delayed_message_lookup.go b/arbnode/mel/extraction/delayed_message_lookup.go index a7da8b16bb..5cd5b915db 100644 --- a/arbnode/mel/extraction/delayed_message_lookup.go +++ b/arbnode/mel/extraction/delayed_message_lookup.go @@ -37,7 +37,8 @@ func parseDelayedMessagesFromBlock( // On Arbitrum One, this is the bridge contract which emits a MessageDelivered event. if log.Address == melState.DelayedMessagePostingTargetAddress { relevantLogs = append(relevantLogs, log) - // Record this log for MEL validation + // Record this log for MEL validation. This is a very cheap operation in native mode + // and is optimized for recording mode as well. if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), log.TxIndex); err != nil { return nil, fmt.Errorf("error recording relevant logs: %w", err) } @@ -80,7 +81,8 @@ func parseDelayedMessagesFromBlock( return nil, err } messageData[common.BigToHash(msgNum)] = msg - // Record this log for MEL validation + // Record this log for MEL validation. This is a very cheap operation in native mode + // and is optimized for recording mode as well. if _, err := logsFetcher.LogsForTxIndex(ctx, parentChainHeader.Hash(), inboxMsgLog.TxIndex); err != nil { return nil, fmt.Errorf("error recording relevant logs: %w", err) } diff --git a/arbnode/mel/extraction/delayed_message_lookup_test.go b/arbnode/mel/extraction/delayed_message_lookup_test.go index b0f5aab8c4..9aba84548d 100644 --- a/arbnode/mel/extraction/delayed_message_lookup_test.go +++ b/arbnode/mel/extraction/delayed_message_lookup_test.go @@ -122,6 +122,7 @@ func Test_parseDelayedMessagesFromBlock(t *testing.T) { receipts, trie.NewStackTrie(nil), ) + receipt.BlockHash = block.Hash() blockLogsFetcher = newMockBlockLogsFetcher(receipts) _, err := parseDelayedMessagesFromBlock( ctx, @@ -200,6 +201,8 @@ func Test_parseDelayedMessagesFromBlock(t *testing.T) { receipts, trie.NewStackTrie(nil), ) + receipt1.BlockHash = block.Hash() + receipt2.BlockHash = block.Hash() blockLogsFetcher = newMockBlockLogsFetcher(receipts) _, err = parseDelayedMessagesFromBlock( ctx, @@ -298,6 +301,8 @@ func Test_parseDelayedMessagesFromBlock(t *testing.T) { receipts, trie.NewStackTrie(nil), ) + receipt1.BlockHash = block.Hash() + receipt2.BlockHash = block.Hash() blockLogsFetcher = newMockBlockLogsFetcher(receipts) delayedMessages, err := parseDelayedMessagesFromBlock( ctx, @@ -392,6 +397,8 @@ func Test_parseDelayedMessagesFromBlock(t *testing.T) { receipts, trie.NewStackTrie(nil), ) + receipt1.BlockHash = block.Hash() + receipt2.BlockHash = block.Hash() blockLogsFetcher = newMockBlockLogsFetcher(receipts) _, err = parseDelayedMessagesFromBlock( ctx, @@ -490,6 +497,8 @@ func Test_parseDelayedMessagesFromBlock(t *testing.T) { receipts, trie.NewStackTrie(nil), ) + receipt1.BlockHash = block.Hash() + receipt2.BlockHash = block.Hash() blockLogsFetcher = newMockBlockLogsFetcher(receipts) delayedMessages, err := parseDelayedMessagesFromBlock( ctx, diff --git a/arbnode/mel/recording/dap_reader_source.go b/arbnode/mel/recording/dap_reader_source.go index 24c8f05422..4f63b5627f 100644 --- a/arbnode/mel/recording/dap_reader_source.go +++ b/arbnode/mel/recording/dap_reader_source.go @@ -2,6 +2,7 @@ package melrecording import ( "context" + "errors" "github.com/ethereum/go-ethereum/common" @@ -11,9 +12,9 @@ import ( "github.com/offchainlabs/nitro/validator" ) -// DAPReader implements recording of preimages when melextraction.ExtractMessages function is called by MEL validator for creation -// of validation entry. Since ExtractMessages function would use daprovider.Reader interface to fetch the sequencer batch via RecoverPayload -// we implement collecting of preimages as well in the same method and record it +// DAPReader implements recording of data avaialability preimages when melextraction.ExtractMessages function is called by +// MEL validator for creation of validation entry. Since ExtractMessages function would use daprovider.Reader interface to +// fetch the sequencer batch via RecoverPayload we implement collecting of preimages as well in the same method and record it type DAPReader struct { validatorCtx context.Context reader daprovider.Reader @@ -47,12 +48,17 @@ type DAPReaderSource struct { preimages daprovider.PreimagesMap } -func NewDAPReaderSource(validatorCtx context.Context, dapReaders arbstate.DapReaderSource) *DAPReaderSource { +// NewDAPReaderSource returns DAPReaderSource that records preimages +// related to sequencer batches posted to DA into the given preimages map +func NewDAPReaderSource(validatorCtx context.Context, dapReaders arbstate.DapReaderSource, preimages daprovider.PreimagesMap) (*DAPReaderSource, error) { + if preimages == nil { + return nil, errors.New("preimages recording destination cannot be nil") + } return &DAPReaderSource{ validatorCtx: validatorCtx, dapReaders: dapReaders, - preimages: make(daprovider.PreimagesMap), - } + preimages: preimages, + }, nil } func (s *DAPReaderSource) GetReader(headerByte byte) daprovider.Reader { @@ -63,5 +69,3 @@ func (s *DAPReaderSource) GetReader(headerByte byte) daprovider.Reader { preimages: s.preimages, } } - -func (s *DAPReaderSource) Preimages() daprovider.PreimagesMap { return s.preimages } diff --git a/arbnode/mel/recording/delayed_msg_database.go b/arbnode/mel/recording/delayed_msg_database.go index c6852b50d9..6e69267f99 100644 --- a/arbnode/mel/recording/delayed_msg_database.go +++ b/arbnode/mel/recording/delayed_msg_database.go @@ -14,6 +14,8 @@ import ( "github.com/offchainlabs/nitro/arbnode/db/schema" "github.com/offchainlabs/nitro/arbnode/mel" "github.com/offchainlabs/nitro/arbos/merkleAccumulator" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" ) // DelayedMsgDatabase holds an ethdb.KeyValueStore that contains delayed messages stored by native MEL and implements DelayedMessageDatabase @@ -24,14 +26,26 @@ type DelayedMsgDatabase struct { initialized bool } -func NewDelayedMsgDatabase(db ethdb.KeyValueStore) *DelayedMsgDatabase { - return &DelayedMsgDatabase{db, make(map[common.Hash][]byte), false} +// NewDelayedMsgDatabase returns DelayedMsgDatabase that records preimages related +// to the delayed messages needed for MEL validation into the given preimages map +func NewDelayedMsgDatabase(db ethdb.KeyValueStore, preimages daprovider.PreimagesMap) (*DelayedMsgDatabase, error) { + if preimages == nil { + return nil, errors.New("preimages recording destination cannot be nil") + } + if _, ok := preimages[arbutil.Keccak256PreimageType]; !ok { + preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) + } + return &DelayedMsgDatabase{ + db: db, + preimages: preimages[arbutil.Keccak256PreimageType], + initialized: false, + }, nil } func (r *DelayedMsgDatabase) initialize(ctx context.Context, state *mel.State) error { var acc *merkleAccumulator.MerkleAccumulator for i := state.ParentChainBlockNumber; i > 0; i-- { - seenState, err := getState(ctx, r.db, i) + seenState, err := getState(r.db, i) if err != nil { return err } @@ -83,8 +97,6 @@ func (r *DelayedMsgDatabase) initialize(ctx context.Context, state *mel.State) e return nil } -func (r *DelayedMsgDatabase) Preimages() map[common.Hash][]byte { return r.preimages } - func (r *DelayedMsgDatabase) ReadDelayedMessage(ctx context.Context, state *mel.State, index uint64) (*mel.DelayedInboxMessage, error) { if index == 0 { // Init message // This message cannot be found in the database as it is supposed to be seen and read in the same block, so we persist that in DelayedMessageBacklog @@ -117,7 +129,7 @@ func fetchDelayedMessage(db ethdb.KeyValueStore, index uint64) (*mel.DelayedInbo return &delayed, nil } -func getState(ctx context.Context, db ethdb.KeyValueStore, parentChainBlockNumber uint64) (*mel.State, error) { +func getState(db ethdb.KeyValueStore, parentChainBlockNumber uint64) (*mel.State, error) { state, err := read.Value[mel.State](db, read.Key(schema.MelStatePrefix, parentChainBlockNumber)) if err != nil { return nil, err diff --git a/arbnode/mel/recording/receipt_recorder.go b/arbnode/mel/recording/receipt_recorder.go index 4ed1a73162..1ff228e0fd 100644 --- a/arbnode/mel/recording/receipt_recorder.go +++ b/arbnode/mel/recording/receipt_recorder.go @@ -17,13 +17,17 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/mel-replay" ) +// ReceiptRecorder records preimages corresponding to the receipts of a parent chain block +// needed during the message extraction. These preimages are needed for MEL validation and +// is used in creation of the validation entries by the MEL validator type ReceiptRecorder struct { parentChainReader BlockReader parentChainBlockHash common.Hash parentChainBlockNumber uint64 - preimages daprovider.PreimagesMap + recordPreimages daprovider.PreimageRecorder receipts []*types.Receipt logs []*types.Log relevantLogsTxIndexes map[uint]struct{} @@ -31,18 +35,27 @@ type ReceiptRecorder struct { blockReceiptHash common.Hash } +// NewReceiptRecorder returns ReceiptRecorder that records +// the receipt preimages into the given preimages map func NewReceiptRecorder( parentChainReader BlockReader, parentChainBlockHash common.Hash, -) *ReceiptRecorder { + preimages daprovider.PreimagesMap, +) (*ReceiptRecorder, error) { + if preimages == nil { + return nil, errors.New("preimages recording destination cannot be nil") + } return &ReceiptRecorder{ parentChainReader: parentChainReader, parentChainBlockHash: parentChainBlockHash, - preimages: make(daprovider.PreimagesMap), + recordPreimages: daprovider.RecordPreimagesTo(preimages), relevantLogsTxIndexes: make(map[uint]struct{}), - } + }, nil } +// Initialize must be called first to setup the recording trie database and store all the +// block receipts into the triedb. Without this, preimage recording is not possible and +// the other functions will error out if called beforehand func (rr *ReceiptRecorder) Initialize(ctx context.Context) error { block, err := rr.parentChainReader.BlockByHash(ctx, rr.parentChainBlockHash) if err != nil { @@ -57,7 +70,7 @@ func (rr *ReceiptRecorder) Initialize(ctx context.Context) error { for i, tx := range txs { receipt, err := rr.parentChainReader.TransactionReceipt(ctx, tx.Hash()) if err != nil { - return fmt.Errorf("error fetching receipt for tx: %v", tx.Hash()) + return fmt.Errorf("error fetching receipt for tx: %v, blockHash: %v", tx.Hash(), block.Hash()) } receipts = append(receipts, receipt) rr.logs = append(rr.logs, receipt.Logs...) @@ -110,7 +123,7 @@ func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH } recordingDB := &TxsAndReceiptsDatabase{ underlying: rr.trieDB, - recorder: daprovider.RecordPreimagesTo(rr.preimages), // RecordingDB will record relevant preimages into tr.preimages + recorder: rr.recordPreimages, // RecordingDB will record relevant preimages into the given preimagesmap } recordingTDB := triedb.NewDatabase(recordingDB, nil) receiptsTrie, err := trie.New(trie.TrieID(rr.blockReceiptHash), recordingTDB) @@ -130,10 +143,7 @@ func (rr *ReceiptRecorder) LogsForTxIndex(ctx context.Context, parentChainBlockH return nil, fmt.Errorf("failed to unmarshal receipt: %w", err) } // Add the receipt marshaled binary by hash to the preimages map - if _, ok := rr.preimages[arbutil.Keccak256PreimageType]; !ok { - rr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) - } - rr.preimages[arbutil.Keccak256PreimageType][crypto.Keccak256Hash(receiptBytes)] = receiptBytes + rr.recordPreimages(crypto.Keccak256Hash(receiptBytes), receiptBytes, arbutil.Keccak256PreimageType) // Fill in the TxIndex (give as input to this method) into the logs so that Tx recording // is possible. This field is one of the derived fields of Log hence won't be stored in trie. // @@ -157,21 +167,19 @@ func (rr *ReceiptRecorder) LogsForBlockHash(ctx context.Context, parentChainBloc return rr.logs, nil } -// GetPreimages returns the preimages of recorded receipts, and also adds the array of relevant tx indexes -// to the preimages map as a value to the key represented by parentChainBlockHash. +// CollectTxIndicesPreimage adds the array of relevant tx indexes to the preimages map as a value +// to the key represented by parentChainBlockHash. // TODO: If we use parentChainBlockHash as the key for header- then we need to modify this implementation -func (rr *ReceiptRecorder) GetPreimages() (daprovider.PreimagesMap, error) { +func (rr *ReceiptRecorder) CollectTxIndicesPreimage() error { var relevantLogsTxIndexes []uint for k := range rr.relevantLogsTxIndexes { relevantLogsTxIndexes = append(relevantLogsTxIndexes, k) } var buf bytes.Buffer if err := rlp.Encode(&buf, relevantLogsTxIndexes); err != nil { - return nil, err - } - if _, ok := rr.preimages[arbutil.Keccak256PreimageType]; !ok { - rr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) + return err } - rr.preimages[arbutil.Keccak256PreimageType][rr.parentChainBlockHash] = buf.Bytes() - return rr.preimages, nil + relevantTxIndicesKey := melreplay.RelevantTxIndexesKey(rr.parentChainBlockHash) + rr.recordPreimages(relevantTxIndicesKey, buf.Bytes(), arbutil.Keccak256PreimageType) + return nil } diff --git a/arbnode/mel/recording/tx_recorder.go b/arbnode/mel/recording/tx_recorder.go index 5688b3a5af..d86786a54c 100644 --- a/arbnode/mel/recording/tx_recorder.go +++ b/arbnode/mel/recording/tx_recorder.go @@ -23,26 +23,38 @@ type BlockReader interface { TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) } +// TransactionRecorder records preimages corresponding to the transactions of a parent chain block +// needed during the message extraction. These preimages are needed for MEL validation and +// is used in creation of the validation entries by the MEL validator type TransactionRecorder struct { parentChainReader BlockReader parentChainBlockHash common.Hash - preimages daprovider.PreimagesMap + recordPreimages daprovider.PreimageRecorder txs []*types.Transaction trieDB *triedb.Database blockTxHash common.Hash } +// NewTransactionRecorder returns TransactionRecorder that records +// the transaction preimages into the given preimages map func NewTransactionRecorder( parentChainReader BlockReader, parentChainBlockHash common.Hash, -) *TransactionRecorder { + preimages daprovider.PreimagesMap, +) (*TransactionRecorder, error) { + if preimages == nil { + return nil, errors.New("preimages recording destination cannot be nil") + } return &TransactionRecorder{ parentChainReader: parentChainReader, parentChainBlockHash: parentChainBlockHash, - preimages: make(daprovider.PreimagesMap), - } + recordPreimages: daprovider.RecordPreimagesTo(preimages), + }, nil } +// Initialize must be called first to setup the recording trie database and store all the +// transactions into the triedb. Without this, preimage recording is not possible and +// the other functions will error out if called beforehand func (tr *TransactionRecorder) Initialize(ctx context.Context) error { block, err := tr.parentChainReader.BlockByHash(ctx, tr.parentChainBlockHash) if err != nil { @@ -99,7 +111,7 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. } recordingDB := &TxsAndReceiptsDatabase{ underlying: tr.trieDB, - recorder: daprovider.RecordPreimagesTo(tr.preimages), // RecordingDB will record relevant preimages into tr.preimages + recorder: tr.recordPreimages, // RecordingDB will record relevant preimages into the given preimagesmap } recordingTDB := triedb.NewDatabase(recordingDB, nil) txsTrie, err := trie.New(trie.TrieID(tr.blockTxHash), recordingTDB) @@ -120,11 +132,6 @@ func (tr *TransactionRecorder) TransactionByLog(ctx context.Context, log *types. return nil, fmt.Errorf("failed to unmarshal transaction: %w", err) } // Add the tx marshaled binary by hash to the preimages map - if _, ok := tr.preimages[arbutil.Keccak256PreimageType]; !ok { - tr.preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) - } - tr.preimages[arbutil.Keccak256PreimageType][crypto.Keccak256Hash(txBytes)] = txBytes + tr.recordPreimages(crypto.Keccak256Hash(txBytes), txBytes, arbutil.Keccak256PreimageType) return tx, nil } - -func (tr *TransactionRecorder) GetPreimages() daprovider.PreimagesMap { return tr.preimages } diff --git a/arbnode/mel/runner/logs_and_headers_fetcher.go b/arbnode/mel/runner/logs_and_headers_fetcher.go index 039ccd99b7..d789b74959 100644 --- a/arbnode/mel/runner/logs_and_headers_fetcher.go +++ b/arbnode/mel/runner/logs_and_headers_fetcher.go @@ -13,7 +13,7 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode/mel" - melextraction "github.com/offchainlabs/nitro/arbnode/mel/extraction" + "github.com/offchainlabs/nitro/arbnode/mel/extraction" ) type logsAndHeadersFetcher struct { @@ -149,7 +149,6 @@ func (f *logsAndHeadersFetcher) fetchDelayedMessageLogs(ctx context.Context, fro } for _, log := range logs { f.logsByBlockHash[log.BlockHash] = append(f.logsByBlockHash[log.BlockHash], &log) - // Not necessary in native mode but needed to make the behavior consistent with recording mode if _, ok := f.logsByTxIndex[log.BlockHash]; !ok { f.logsByTxIndex[log.BlockHash] = make(map[uint][]*types.Log) } diff --git a/arbnode/mel/runner/logs_and_headers_fetcher_test.go b/arbnode/mel/runner/logs_and_headers_fetcher_test.go index 3972eda054..4d49169a6e 100644 --- a/arbnode/mel/runner/logs_and_headers_fetcher_test.go +++ b/arbnode/mel/runner/logs_and_headers_fetcher_test.go @@ -11,7 +11,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/offchainlabs/nitro/arbnode/mel" - melextraction "github.com/offchainlabs/nitro/arbnode/mel/extraction" + "github.com/offchainlabs/nitro/arbnode/mel/extraction" ) func TestLogsFetcher(t *testing.T) { @@ -100,7 +100,13 @@ func TestLogsFetcher(t *testing.T) { require.True(t, reflect.DeepEqual(fetcher.logsByBlockHash[batchBlockHash], batchTxLogs[:2])) // last log shouldn't be returned by the filter query require.True(t, reflect.DeepEqual(fetcher.logsByBlockHash[delayedBlockHash], delayedMsgTxLogs[:3])) // last log shouldn't be returned by the filter query // Verify that logsByTxIndex is correct - require.True(t, len(fetcher.logsByTxIndex) == 1) + require.True(t, len(fetcher.logsByTxIndex) == 2) // for both delayed msg and sequencer batch require.True(t, fetcher.logsByTxIndex[batchBlockHash] != nil) - require.True(t, reflect.DeepEqual(fetcher.logsByTxIndex[batchBlockHash][batchTxIndex], batchTxLogs[:2])) // last log shouldn't be returned by the filter query + require.True(t, fetcher.logsByTxIndex[delayedBlockHash] != nil) + require.True(t, reflect.DeepEqual(fetcher.logsByTxIndex[batchBlockHash][batchTxIndex], batchTxLogs[:2])) // last log shouldn't be returned by the filter query + require.True(t, reflect.DeepEqual(fetcher.logsByTxIndex[delayedBlockHash][delayedMsgTxIndex], delayedMsgTxLogs[:3])) // last log shouldn't be returned by the filter query + + // TODO: remove this when mel runner code is synced, this is added temporarily to fix lint failures + _, err := fetcher.getHeaderByNumber(ctx, 0) + require.Error(t, err) } diff --git a/cmd/mel-replay/delayed_message_db_test.go b/cmd/mel-replay/delayed_message_db_test.go index 1e7b7c5660..b6544787f4 100644 --- a/cmd/mel-replay/delayed_message_db_test.go +++ b/cmd/mel-replay/delayed_message_db_test.go @@ -17,6 +17,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/mel/runner" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" ) var _ preimageResolver = (*mockPreimageResolver)(nil) @@ -70,7 +71,9 @@ func TestRecordingPreimagesForReadDelayedMessage(t *testing.T) { require.NoError(t, state.GenerateDelayedMessagesSeenMerklePartialsAndRoot()) require.NoError(t, melDB.SaveState(ctx, state)) - recordingDB := melrecording.NewDelayedMsgDatabase(db) + preimages := make(daprovider.PreimagesMap) + recordingDB, err := melrecording.NewDelayedMsgDatabase(db, preimages) + require.NoError(t, err) for i := startBlockNum; i < numMsgs; i++ { require.NoError(t, state.AccumulateDelayedMessage(delayedMessages[i])) state.DelayedMessagesSeen++ @@ -88,7 +91,7 @@ func TestRecordingPreimagesForReadDelayedMessage(t *testing.T) { // Test reading in wasm mode delayedDB := &delayedMessageDatabase{ &testPreimageResolver{ - preimages: recordingDB.Preimages(), + preimages: preimages[arbutil.Keccak256PreimageType], }, } for i := startBlockNum; i < numMsgsToRead; i++ { diff --git a/cmd/mel-replay/receipt_fetcher.go b/cmd/mel-replay/receipt_fetcher.go index cb6dd20943..0dabdbadb0 100644 --- a/cmd/mel-replay/receipt_fetcher.go +++ b/cmd/mel-replay/receipt_fetcher.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/triedb" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/mel-replay" ) type receiptFetcherForBlock struct { @@ -49,7 +50,8 @@ func (rf *receiptFetcherForBlock) LogsForBlockHash(ctx context.Context, parentCh if rf.header.Hash() != parentChainBlockHash { return nil, errors.New("parentChainBlockHash mismatch") } - txIndexData, err := rf.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, rf.header.Hash()) + relevantTxIndicesKey := melreplay.RelevantTxIndexesKey(rf.header.Hash()) + txIndexData, err := rf.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, relevantTxIndicesKey) if err != nil { return nil, err } @@ -68,7 +70,7 @@ func (rf *receiptFetcherForBlock) LogsForBlockHash(ctx context.Context, parentCh return relevantLogs, nil } -// LogsForBlockHashAllLogs is kept, in case we go with an implementation of returning all logs present in a block +// TODO: LogsForBlockHashAllLogs is kept, in case we go with an implementation of returning all logs present in a block func (rf *receiptFetcherForBlock) LogsForBlockHashAllLogs(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { if rf.header.Hash() != parentChainBlockHash { return nil, errors.New("parentChainBlockHash mismatch") diff --git a/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go b/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go index 57f5c64720..1ada7bf754 100644 --- a/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go +++ b/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go @@ -16,8 +16,9 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/trie" - melrecording "github.com/offchainlabs/nitro/arbnode/mel/recording" + "github.com/offchainlabs/nitro/arbnode/mel/recording" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" ) type mockPreimageResolver struct { @@ -82,7 +83,9 @@ func TestRecordingOfReceiptPreimagesAndFetchingLogsFromPreimages(t *testing.T) { log.BlockNumber = block.NumberU64() } } - recorder := melrecording.NewReceiptRecorder(blockReader, block.Hash()) + preimages := make(daprovider.PreimagesMap) + recorder, err := melrecording.NewReceiptRecorder(blockReader, block.Hash(), preimages) + require.NoError(t, err) require.NoError(t, recorder.Initialize(ctx)) // Test recording of preimages @@ -99,7 +102,7 @@ func TestRecordingOfReceiptPreimagesAndFetchingLogsFromPreimages(t *testing.T) { } // Test reading of logs from the recorded preimages - preimages, err := recorder.GetPreimages() + require.NoError(t, recorder.CollectTxIndicesPreimage()) require.NoError(t, err) receiptFetcher := &receiptFetcherForBlock{ header: block.Header(), diff --git a/cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go b/cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go index b0e441a85a..a4898d1c4f 100644 --- a/cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go +++ b/cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go @@ -14,6 +14,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/mel/recording" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" ) type mockBlockReader struct { @@ -63,7 +64,9 @@ func TestRecordingOfTxPreimagesAndFetchingTxsFromPreimages(t *testing.T) { block.Hash(): block, }, } - recorder := melrecording.NewTransactionRecorder(blockReader, block.Hash()) + preimages := make(daprovider.PreimagesMap) + recorder, err := melrecording.NewTransactionRecorder(blockReader, block.Hash(), preimages) + require.NoError(t, err) require.NoError(t, recorder.Initialize(ctx)) // Test recording of preimages @@ -80,7 +83,6 @@ func TestRecordingOfTxPreimagesAndFetchingTxsFromPreimages(t *testing.T) { } // Test reading of txs from the recorded preimages - preimages := recorder.GetPreimages() txsFetcher := &txFetcherForBlock{ header: block.Header(), preimageResolver: &testPreimageResolver{ @@ -98,7 +100,7 @@ func TestRecordingOfTxPreimagesAndFetchingTxsFromPreimages(t *testing.T) { } // Tx fetching should fail for not recorded ones - _, err := txsFetcher.TransactionByLog(ctx, &types.Log{TxIndex: recordStart - 1}) + _, err = txsFetcher.TransactionByLog(ctx, &types.Log{TxIndex: recordStart - 1}) if err == nil || !strings.Contains(err.Error(), "preimage not found for hash") { t.Fatalf("failed with unexpected error: %v", err) } diff --git a/mel-replay/mel-replay.go b/mel-replay/mel-replay.go new file mode 100644 index 0000000000..7c2661c605 --- /dev/null +++ b/mel-replay/mel-replay.go @@ -0,0 +1,17 @@ +// Copyright 2025-2026, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md + +package melreplay + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// RELEVANT_TX_INDEXES_PREFIX represents the prefix appended to a blockHash and the hash of the resulting string +// maps to the list of MEL-relevant tx indexes in a parent chain block +const RELEVANT_TX_INDEXES_PREFIX string = "TX_INDEX_DATA" + +func RelevantTxIndexesKey(parentChainBlockHash common.Hash) common.Hash { + return crypto.Keccak256Hash([]byte(RELEVANT_TX_INDEXES_PREFIX), parentChainBlockHash.Bytes()) +} From d56284381ea25e58198c83876db8a7ac7166d598 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 7 Jan 2026 17:56:04 +0530 Subject: [PATCH 39/42] move mel replay code to its own package --- cmd/mel-replay/main.go | 10 --------- {cmd/mel-replay => mel-replay}/db.go | 4 ++-- .../delayed_message_db.go | 12 ++++++---- .../delayed_message_db_test.go | 22 +++++++++---------- mel-replay/mel-replay.go | 6 +++++ .../receipt_fetcher.go | 12 ++++++---- ...ceipt_recorder_and_receipt_fetcher_test.go | 11 +++++----- .../mel-replay => mel-replay}/trie_fetcher.go | 4 ++-- {cmd/mel-replay => mel-replay}/tx_fetcher.go | 10 +++++++-- .../tx_recorder_and_tx_fetcher_test.go | 11 +++++----- 10 files changed, 57 insertions(+), 45 deletions(-) rename {cmd/mel-replay => mel-replay}/db.go (98%) rename {cmd/mel-replay => mel-replay}/delayed_message_db.go (88%) rename {cmd/mel-replay => mel-replay}/delayed_message_db_test.go (94%) rename {cmd/mel-replay => mel-replay}/receipt_fetcher.go (93%) rename {cmd/mel-replay => mel-replay}/receipt_recorder_and_receipt_fetcher_test.go (96%) rename {cmd/mel-replay => mel-replay}/trie_fetcher.go (98%) rename {cmd/mel-replay => mel-replay}/tx_fetcher.go (68%) rename {cmd/mel-replay => mel-replay}/tx_recorder_and_tx_fetcher_test.go (94%) diff --git a/cmd/mel-replay/main.go b/cmd/mel-replay/main.go index 2f4c4e3d30..a41462dce8 100644 --- a/cmd/mel-replay/main.go +++ b/cmd/mel-replay/main.go @@ -3,15 +3,5 @@ package main -import ( - "github.com/ethereum/go-ethereum/common" - - "github.com/offchainlabs/nitro/arbutil" -) - -type preimageResolver interface { - ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) -} - func main() { } diff --git a/cmd/mel-replay/db.go b/mel-replay/db.go similarity index 98% rename from cmd/mel-replay/db.go rename to mel-replay/db.go index e90685f1ba..ab53bf60d5 100644 --- a/cmd/mel-replay/db.go +++ b/mel-replay/db.go @@ -1,4 +1,4 @@ -package main +package melreplay import ( "errors" @@ -14,7 +14,7 @@ import ( var _ ethdb.Database = (*DB)(nil) type DB struct { - resolver preimageResolver + resolver PreimageResolver } func (d *DB) Get(key []byte) ([]byte, error) { diff --git a/cmd/mel-replay/delayed_message_db.go b/mel-replay/delayed_message_db.go similarity index 88% rename from cmd/mel-replay/delayed_message_db.go rename to mel-replay/delayed_message_db.go index f6c0223be5..ba2cef00ee 100644 --- a/cmd/mel-replay/delayed_message_db.go +++ b/mel-replay/delayed_message_db.go @@ -1,4 +1,4 @@ -package main +package melreplay import ( "bytes" @@ -14,7 +14,11 @@ import ( ) type delayedMessageDatabase struct { - preimageResolver preimageResolver + preimageResolver PreimageResolver +} + +func NewDelayedMessageDatabase(preimageResolver PreimageResolver) mel.DelayedMessageDatabase { + return &delayedMessageDatabase{preimageResolver} } func (d *delayedMessageDatabase) ReadDelayedMessage( @@ -27,7 +31,7 @@ func (d *delayedMessageDatabase) ReadDelayedMessage( if msgIndex >= totalMsgsSeen { return nil, fmt.Errorf("index %d out of range, total delayed messages seen: %d", msgIndex, totalMsgsSeen) } - treeSize := nextPowerOfTwo(totalMsgsSeen) + treeSize := NextPowerOfTwo(totalMsgsSeen) merkleDepth := bits.TrailingZeros64(treeSize) // Start traversal from root, which is the delayed messages seen root. @@ -74,7 +78,7 @@ func (d *delayedMessageDatabase) ReadDelayedMessage( return delayedMessage, nil } -func nextPowerOfTwo(n uint64) uint64 { +func NextPowerOfTwo(n uint64) uint64 { if n == 0 { return 1 } diff --git a/cmd/mel-replay/delayed_message_db_test.go b/mel-replay/delayed_message_db_test.go similarity index 94% rename from cmd/mel-replay/delayed_message_db_test.go rename to mel-replay/delayed_message_db_test.go index 69ce681cd2..3c210145b2 100644 --- a/cmd/mel-replay/delayed_message_db_test.go +++ b/mel-replay/delayed_message_db_test.go @@ -1,4 +1,4 @@ -package main +package melreplay_test import ( "context" @@ -13,15 +13,15 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbnode/mel" - melrecording "github.com/offchainlabs/nitro/arbnode/mel/recording" + "github.com/offchainlabs/nitro/arbnode/mel/recording" "github.com/offchainlabs/nitro/arbnode/mel/runner" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/mel-replay" ) -var _ preimageResolver = (*mockPreimageResolver)(nil) -var _ mel.DelayedMessageDatabase = (*delayedMessageDatabase)(nil) +var _ melreplay.PreimageResolver = (*mockPreimageResolver)(nil) type testPreimageResolver struct { preimages map[common.Hash][]byte @@ -89,11 +89,11 @@ func TestRecordingPreimagesForReadDelayedMessage(t *testing.T) { } // Test reading in wasm mode - delayedDB := &delayedMessageDatabase{ + delayedDB := melreplay.NewDelayedMessageDatabase( &testPreimageResolver{ preimages: preimages[arbutil.Keccak256PreimageType], }, - } + ) for i := startBlockNum; i < numMsgsToRead; i++ { msg, err := delayedDB.ReadDelayedMessage(ctx, state, i) require.NoError(t, err) @@ -104,7 +104,7 @@ func TestRecordingPreimagesForReadDelayedMessage(t *testing.T) { func TestReadDelayedMessage(t *testing.T) { ctx := context.Background() t.Run("message index out of range", func(t *testing.T) { - db := &delayedMessageDatabase{} + db := melreplay.NewDelayedMessageDatabase(nil) state := &mel.State{ DelayedMessagesSeen: 5, } @@ -122,7 +122,7 @@ func TestReadDelayedMessage(t *testing.T) { preimages, root := buildMerkleTree(t, messages) resolver := &mockPreimageResolver{preimages: preimages} - db := &delayedMessageDatabase{preimageResolver: resolver} + db := melreplay.NewDelayedMessageDatabase(resolver) state := &mel.State{ DelayedMessagesSeen: 1, DelayedMessagesSeenRoot: root, @@ -151,7 +151,7 @@ func TestReadDelayedMessage(t *testing.T) { preimages, root := buildMerkleTree(t, messages) resolver := &mockPreimageResolver{preimages: preimages} - db := &delayedMessageDatabase{preimageResolver: resolver} + db := melreplay.NewDelayedMessageDatabase(resolver) state := &mel.State{ DelayedMessagesSeen: 2, DelayedMessagesSeenRoot: root, @@ -184,7 +184,7 @@ func TestReadDelayedMessage(t *testing.T) { preimages, root := buildMerkleTree(t, messages) resolver := &mockPreimageResolver{preimages: preimages} - db := &delayedMessageDatabase{preimageResolver: resolver} + db := melreplay.NewDelayedMessageDatabase(resolver) state := &mel.State{ DelayedMessagesSeen: 3, DelayedMessagesSeenRoot: root, @@ -218,7 +218,7 @@ func TestNextPowerOfTwo(t *testing.T) { } for _, tc := range testCases { - result := nextPowerOfTwo(tc.input) + result := melreplay.NextPowerOfTwo(tc.input) if result != tc.expected { t.Errorf("nextPowerOfTwo(%d) = %d, expected %d", tc.input, result, tc.expected) } diff --git a/mel-replay/mel-replay.go b/mel-replay/mel-replay.go index 7c2661c605..c075627922 100644 --- a/mel-replay/mel-replay.go +++ b/mel-replay/mel-replay.go @@ -6,6 +6,8 @@ package melreplay import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + + "github.com/offchainlabs/nitro/arbutil" ) // RELEVANT_TX_INDEXES_PREFIX represents the prefix appended to a blockHash and the hash of the resulting string @@ -15,3 +17,7 @@ const RELEVANT_TX_INDEXES_PREFIX string = "TX_INDEX_DATA" func RelevantTxIndexesKey(parentChainBlockHash common.Hash) common.Hash { return crypto.Keccak256Hash([]byte(RELEVANT_TX_INDEXES_PREFIX), parentChainBlockHash.Bytes()) } + +type PreimageResolver interface { + ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) +} diff --git a/cmd/mel-replay/receipt_fetcher.go b/mel-replay/receipt_fetcher.go similarity index 93% rename from cmd/mel-replay/receipt_fetcher.go rename to mel-replay/receipt_fetcher.go index 0dabdbadb0..5567ebc09d 100644 --- a/cmd/mel-replay/receipt_fetcher.go +++ b/mel-replay/receipt_fetcher.go @@ -1,7 +1,7 @@ // Copyright 2025-2026, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md -package main +package melreplay import ( "context" @@ -15,13 +15,17 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb" + "github.com/offchainlabs/nitro/arbnode/mel/extraction" "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/mel-replay" ) type receiptFetcherForBlock struct { header *types.Header - preimageResolver preimageResolver + preimageResolver PreimageResolver +} + +func NewLogsFetcher(header *types.Header, preimageResolver PreimageResolver) melextraction.LogsFetcher { + return &receiptFetcherForBlock{header, preimageResolver} } // LogsForTxIndex fetches logs for a specific transaction index by walking @@ -50,7 +54,7 @@ func (rf *receiptFetcherForBlock) LogsForBlockHash(ctx context.Context, parentCh if rf.header.Hash() != parentChainBlockHash { return nil, errors.New("parentChainBlockHash mismatch") } - relevantTxIndicesKey := melreplay.RelevantTxIndexesKey(rf.header.Hash()) + relevantTxIndicesKey := RelevantTxIndexesKey(rf.header.Hash()) txIndexData, err := rf.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, relevantTxIndicesKey) if err != nil { return nil, err diff --git a/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go b/mel-replay/receipt_recorder_and_receipt_fetcher_test.go similarity index 96% rename from cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go rename to mel-replay/receipt_recorder_and_receipt_fetcher_test.go index 1ada7bf754..3fae81213e 100644 --- a/cmd/mel-replay/receipt_recorder_and_receipt_fetcher_test.go +++ b/mel-replay/receipt_recorder_and_receipt_fetcher_test.go @@ -1,7 +1,7 @@ // Copyright 2025-2026, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE.md -package main +package melreplay_test import ( "context" @@ -19,6 +19,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/mel/recording" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/mel-replay" ) type mockPreimageResolver struct { @@ -104,12 +105,12 @@ func TestRecordingOfReceiptPreimagesAndFetchingLogsFromPreimages(t *testing.T) { // Test reading of logs from the recorded preimages require.NoError(t, recorder.CollectTxIndicesPreimage()) require.NoError(t, err) - receiptFetcher := &receiptFetcherForBlock{ - header: block.Header(), - preimageResolver: &testPreimageResolver{ + receiptFetcher := melreplay.NewLogsFetcher( + block.Header(), + &testPreimageResolver{ preimages: preimages[arbutil.Keccak256PreimageType], }, - } + ) // Test LogsForBlockHash logs, err := receiptFetcher.LogsForBlockHash(ctx, block.Hash()) require.NoError(t, err) diff --git a/cmd/mel-replay/trie_fetcher.go b/mel-replay/trie_fetcher.go similarity index 98% rename from cmd/mel-replay/trie_fetcher.go rename to mel-replay/trie_fetcher.go index 277ab65e0a..3a89cfd6dc 100644 --- a/cmd/mel-replay/trie_fetcher.go +++ b/mel-replay/trie_fetcher.go @@ -1,4 +1,4 @@ -package main +package melreplay import ( "bytes" @@ -14,7 +14,7 @@ import ( // Fetches a specific object at index from a block's Receipt/Tx trie by navigating its // Merkle Patricia Trie structure. It uses the preimage resolver to fetch preimages // of trie nodes as needed, and determines how to navigate depending on the structure of the trie nodes. -func fetchObjectFromTrie[T any](root common.Hash, index uint, preimageResolver preimageResolver) (*T, error) { +func fetchObjectFromTrie[T any](root common.Hash, index uint, preimageResolver PreimageResolver) (*T, error) { var empty *T currentNodeHash := root currentPath := []byte{} // Track nibbles consumed so far. diff --git a/cmd/mel-replay/tx_fetcher.go b/mel-replay/tx_fetcher.go similarity index 68% rename from cmd/mel-replay/tx_fetcher.go rename to mel-replay/tx_fetcher.go index be7290b0c3..46e21b6813 100644 --- a/cmd/mel-replay/tx_fetcher.go +++ b/mel-replay/tx_fetcher.go @@ -1,14 +1,20 @@ -package main +package melreplay import ( "context" "github.com/ethereum/go-ethereum/core/types" + + "github.com/offchainlabs/nitro/arbnode/mel/extraction" ) type txFetcherForBlock struct { header *types.Header - preimageResolver preimageResolver + preimageResolver PreimageResolver +} + +func NewTransactionFetcher(header *types.Header, preimageResolver PreimageResolver) melextraction.TransactionFetcher { + return &txFetcherForBlock{header, preimageResolver} } // TransactionByLog fetches the tx for a specific transaction index by walking diff --git a/cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go b/mel-replay/tx_recorder_and_tx_fetcher_test.go similarity index 94% rename from cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go rename to mel-replay/tx_recorder_and_tx_fetcher_test.go index a4898d1c4f..ffeba97f3c 100644 --- a/cmd/mel-replay/tx_recorder_and_tx_fetcher_test.go +++ b/mel-replay/tx_recorder_and_tx_fetcher_test.go @@ -1,4 +1,4 @@ -package main +package melreplay_test import ( "context" @@ -15,6 +15,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/mel/recording" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/mel-replay" ) type mockBlockReader struct { @@ -83,12 +84,12 @@ func TestRecordingOfTxPreimagesAndFetchingTxsFromPreimages(t *testing.T) { } // Test reading of txs from the recorded preimages - txsFetcher := &txFetcherForBlock{ - header: block.Header(), - preimageResolver: &testPreimageResolver{ + txsFetcher := melreplay.NewTransactionFetcher( + block.Header(), + &testPreimageResolver{ preimages: preimages[arbutil.Keccak256PreimageType], }, - } + ) for i := recordStart; i <= recordEnd; i++ { tx, err := txsFetcher.TransactionByLog(ctx, &types.Log{TxIndex: i}) require.NoError(t, err) From 09c3fb403eeb879de83c9a46bbb88d39a550e682 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 7 Jan 2026 18:19:24 +0530 Subject: [PATCH 40/42] implement typeBasedPreimageResolver --- mel-replay/delayed_message_db_test.go | 21 ++++------------ mel-replay/mel-replay.go | 24 +++++++++++++++++++ ...ceipt_recorder_and_receipt_fetcher_test.go | 7 +++--- mel-replay/tx_recorder_and_tx_fetcher_test.go | 7 +++--- 4 files changed, 36 insertions(+), 23 deletions(-) diff --git a/mel-replay/delayed_message_db_test.go b/mel-replay/delayed_message_db_test.go index 3c210145b2..f640433dc7 100644 --- a/mel-replay/delayed_message_db_test.go +++ b/mel-replay/delayed_message_db_test.go @@ -23,20 +23,6 @@ import ( var _ melreplay.PreimageResolver = (*mockPreimageResolver)(nil) -type testPreimageResolver struct { - preimages map[common.Hash][]byte -} - -func (r *testPreimageResolver) ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) { - if preimageType != arbutil.Keccak256PreimageType { - return nil, fmt.Errorf("unsupported preimageType: %d", preimageType) - } - if preimage, ok := r.preimages[hash]; ok { - return preimage, nil - } - return nil, fmt.Errorf("preimage not found for hash: %v", hash) -} - func TestRecordingPreimagesForReadDelayedMessage(t *testing.T) { ctx := context.Background() var delayedMessages []*mel.DelayedInboxMessage @@ -90,9 +76,10 @@ func TestRecordingPreimagesForReadDelayedMessage(t *testing.T) { // Test reading in wasm mode delayedDB := melreplay.NewDelayedMessageDatabase( - &testPreimageResolver{ - preimages: preimages[arbutil.Keccak256PreimageType], - }, + melreplay.NewTypeBasedPreimageResolver( + arbutil.Keccak256PreimageType, + preimages, + ), ) for i := startBlockNum; i < numMsgsToRead; i++ { msg, err := delayedDB.ReadDelayedMessage(ctx, state, i) diff --git a/mel-replay/mel-replay.go b/mel-replay/mel-replay.go index c075627922..649ce5c2d9 100644 --- a/mel-replay/mel-replay.go +++ b/mel-replay/mel-replay.go @@ -4,10 +4,13 @@ package melreplay import ( + "fmt" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/daprovider" ) // RELEVANT_TX_INDEXES_PREFIX represents the prefix appended to a blockHash and the hash of the resulting string @@ -21,3 +24,24 @@ func RelevantTxIndexesKey(parentChainBlockHash common.Hash) common.Hash { type PreimageResolver interface { ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) } + +type typeBasedPreimageResolver struct { + ty arbutil.PreimageType + preimagesMap daprovider.PreimagesMap +} + +func NewTypeBasedPreimageResolver(ty arbutil.PreimageType, preimagesMap daprovider.PreimagesMap) PreimageResolver { + return &typeBasedPreimageResolver{ty, preimagesMap} +} + +func (t *typeBasedPreimageResolver) ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) { + if preimageType != t.ty { + return nil, fmt.Errorf("unsupported preimageType: %d, want: %d", preimageType, t.ty) + } + if targetMap, ok := t.preimagesMap[preimageType]; ok { + if preimage, ok := targetMap[hash]; ok { + return preimage, nil + } + } + return nil, fmt.Errorf("preimage not found for hash: %v", hash) +} diff --git a/mel-replay/receipt_recorder_and_receipt_fetcher_test.go b/mel-replay/receipt_recorder_and_receipt_fetcher_test.go index 3fae81213e..66b68ce2b6 100644 --- a/mel-replay/receipt_recorder_and_receipt_fetcher_test.go +++ b/mel-replay/receipt_recorder_and_receipt_fetcher_test.go @@ -107,9 +107,10 @@ func TestRecordingOfReceiptPreimagesAndFetchingLogsFromPreimages(t *testing.T) { require.NoError(t, err) receiptFetcher := melreplay.NewLogsFetcher( block.Header(), - &testPreimageResolver{ - preimages: preimages[arbutil.Keccak256PreimageType], - }, + melreplay.NewTypeBasedPreimageResolver( + arbutil.Keccak256PreimageType, + preimages, + ), ) // Test LogsForBlockHash logs, err := receiptFetcher.LogsForBlockHash(ctx, block.Hash()) diff --git a/mel-replay/tx_recorder_and_tx_fetcher_test.go b/mel-replay/tx_recorder_and_tx_fetcher_test.go index ffeba97f3c..e6cee33b0d 100644 --- a/mel-replay/tx_recorder_and_tx_fetcher_test.go +++ b/mel-replay/tx_recorder_and_tx_fetcher_test.go @@ -86,9 +86,10 @@ func TestRecordingOfTxPreimagesAndFetchingTxsFromPreimages(t *testing.T) { // Test reading of txs from the recorded preimages txsFetcher := melreplay.NewTransactionFetcher( block.Header(), - &testPreimageResolver{ - preimages: preimages[arbutil.Keccak256PreimageType], - }, + melreplay.NewTypeBasedPreimageResolver( + arbutil.Keccak256PreimageType, + preimages, + ), ) for i := recordStart; i <= recordEnd; i++ { tx, err := txsFetcher.TransactionByLog(ctx, &types.Log{TxIndex: i}) From 1ab38f9caf7eae8e0eaaf0995745bb4523bd543e Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 7 Jan 2026 18:41:58 +0530 Subject: [PATCH 41/42] use typeBasedPreimageResolver in test and update melValidator --- staker/mel_validator.go | 35 +- .../message_extraction_layer_utils.go | 337 ------------------ ...essage_extraction_layer_validation_test.go | 56 ++- 3 files changed, 64 insertions(+), 364 deletions(-) delete mode 100644 system_tests/message_extraction_layer_utils.go diff --git a/staker/mel_validator.go b/staker/mel_validator.go index 981f743610..6c461f8c24 100644 --- a/staker/mel_validator.go +++ b/staker/mel_validator.go @@ -17,11 +17,9 @@ import ( "github.com/offchainlabs/nitro/arbnode/mel/recording" "github.com/offchainlabs/nitro/arbnode/mel/runner" "github.com/offchainlabs/nitro/arbstate" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/util/stopwaiter" - "github.com/offchainlabs/nitro/validator" ) type MELValidator struct { @@ -97,19 +95,31 @@ func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, lastValid if preState.MsgCount >= toValidateMsgExtractionCount { return nil, nil } - delayedMsgRecordingDB := melrecording.NewDelayedMsgDatabase(mv.arbDb) - recordingDAPReaders := melrecording.NewDAPReaderSource(ctx, mv.dapReaders) - txsAndReceiptsPreimages := make(daprovider.PreimagesMap) + preimages := make(daprovider.PreimagesMap) + delayedMsgRecordingDB, err := melrecording.NewDelayedMsgDatabase(mv.arbDb, preimages) + if err != nil { + return nil, err + } + recordingDAPReaders, err := melrecording.NewDAPReaderSource(ctx, mv.dapReaders, preimages) + if err != nil { + return nil, err + } for i := lastValidatedParentChainBlock + 1; ; i++ { header, err := mv.l1Client.HeaderByNumber(ctx, new(big.Int).SetUint64(i)) if err != nil { return nil, err } - txsRecorder := melrecording.NewTransactionRecorder(mv.l1Client, header.Hash()) + txsRecorder, err := melrecording.NewTransactionRecorder(mv.l1Client, header.Hash(), preimages) + if err != nil { + return nil, err + } if err := txsRecorder.Initialize(ctx); err != nil { return nil, err } - receiptsRecorder := melrecording.NewReceiptRecorder(mv.l1Client, header.Hash()) + receiptsRecorder, err := melrecording.NewReceiptRecorder(mv.l1Client, header.Hash(), preimages) + if err != nil { + return nil, err + } if err := receiptsRecorder.Initialize(ctx); err != nil { return nil, err } @@ -124,23 +134,14 @@ func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, lastValid if state.Hash() != wantState.Hash() { return nil, fmt.Errorf("calculated MEL state hash in recording mode doesn't match the one computed in native mode, parentchainBlocknumber: %d", i) } - validator.CopyPreimagesInto(txsAndReceiptsPreimages, txsRecorder.GetPreimages()) - receiptsPreimages, err := receiptsRecorder.GetPreimages() - if err != nil { + if err := receiptsRecorder.CollectTxIndicesPreimage(); err != nil { return nil, err } - validator.CopyPreimagesInto(txsAndReceiptsPreimages, receiptsPreimages) if state.MsgCount >= toValidateMsgExtractionCount { break } preState = state } - preimages := recordingDAPReaders.Preimages() - delayedPreimages := daprovider.PreimagesMap{ - arbutil.Keccak256PreimageType: delayedMsgRecordingDB.Preimages(), - } - validator.CopyPreimagesInto(preimages, delayedPreimages) - validator.CopyPreimagesInto(preimages, txsAndReceiptsPreimages) return &validationEntry{ Preimages: preimages, }, nil diff --git a/system_tests/message_extraction_layer_utils.go b/system_tests/message_extraction_layer_utils.go deleted file mode 100644 index 725d7e72db..0000000000 --- a/system_tests/message_extraction_layer_utils.go +++ /dev/null @@ -1,337 +0,0 @@ -package arbtest - -import ( - "bytes" - "context" - "errors" - "fmt" - "math/bits" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/rlp" - - "github.com/offchainlabs/nitro/arbnode/mel" - "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/daprovider" -) - -// TODO: Code from cmd/mel-replay and cmd/replay packages for verification of preimages, should be deleted once we have validation wired -type blobPreimageReader struct { - preimages daprovider.PreimagesMap -} - -func (r *blobPreimageReader) Initialize(ctx context.Context) error { return nil } - -func (r *blobPreimageReader) GetBlobs( - ctx context.Context, - batchBlockHash common.Hash, - versionedHashes []common.Hash, -) ([]kzg4844.Blob, error) { - var blobs []kzg4844.Blob - for _, h := range versionedHashes { - var blob kzg4844.Blob - if _, ok := r.preimages[arbutil.EthVersionedHashPreimageType]; !ok { - return nil, errors.New("no blobs found in preimages") - } - preimage, ok := r.preimages[arbutil.EthVersionedHashPreimageType][h] - if !ok { - return nil, errors.New("no blobs found in preimages") - } - if len(preimage) != len(blob) { - return nil, fmt.Errorf("for blob %v got back preimage of length %v but expected blob length %v", h, len(preimage), len(blob)) - } - copy(blob[:], preimage) - blobs = append(blobs, blob) - } - return blobs, nil -} - -type testPreimageResolver struct { - preimages map[common.Hash][]byte -} - -func (r *testPreimageResolver) ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) { - if preimageType != arbutil.Keccak256PreimageType { - return nil, fmt.Errorf("unsupported preimageType: %d", preimageType) - } - if preimage, ok := r.preimages[hash]; ok { - return preimage, nil - } - return nil, fmt.Errorf("preimage not found for hash: %v", hash) -} - -type preimageResolver interface { - ResolveTypedPreimage(preimageType arbutil.PreimageType, hash common.Hash) ([]byte, error) -} - -type delayedMessageDatabase struct { - preimageResolver preimageResolver -} - -func (d *delayedMessageDatabase) ReadDelayedMessage( - ctx context.Context, - state *mel.State, - msgIndex uint64, -) (*mel.DelayedInboxMessage, error) { - originalMsgIndex := msgIndex - totalMsgsSeen := state.DelayedMessagesSeen - if msgIndex >= totalMsgsSeen { - return nil, fmt.Errorf("index %d out of range, total delayed messages seen: %d", msgIndex, totalMsgsSeen) - } - treeSize := nextPowerOfTwo(totalMsgsSeen) - merkleDepth := bits.TrailingZeros64(treeSize) - - // Start traversal from root, which is the delayed messages seen root. - merkleRoot := state.DelayedMessagesSeenRoot - currentHash := merkleRoot - currentDepth := merkleDepth - - // Traverse down the Merkle tree to find the leaf at the given index. - for currentDepth > 0 { - // Resolve the preimage to get left and right children. - result, err := d.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentHash) - if err != nil { - return nil, err - } - if len(result) != 64 { - return nil, fmt.Errorf("invalid preimage result length: %d, wanted 64", len(result)) - } - // Split result into left and right halves. - mid := len(result) / 2 - left := result[:mid] - right := result[mid:] - - // Calculate which subtree contains our index. - subtreeSize := uint64(1) << (currentDepth - 1) - if msgIndex < subtreeSize { - // Go left. - currentHash = common.BytesToHash(left) - } else { - // Go right. - currentHash = common.BytesToHash(right) - msgIndex -= subtreeSize - } - currentDepth-- - } - // At this point, currentHash should be the hash of the delayed message. - delayedMsgBytes, err := d.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentHash) - if err != nil { - return nil, err - } - delayedMessage := new(mel.DelayedInboxMessage) - if err = rlp.Decode(bytes.NewBuffer(delayedMsgBytes), &delayedMessage); err != nil { - return nil, fmt.Errorf("failed to decode delayed message at index %d: %w", originalMsgIndex, err) - } - return delayedMessage, nil -} - -func nextPowerOfTwo(n uint64) uint64 { - if n == 0 { - return 1 - } - if n&(n-1) == 0 { - return n - } - return 1 << bits.Len64(n) -} - -type txFetcherForBlock struct { - header *types.Header - preimageResolver preimageResolver -} - -// TransactionByLog fetches the tx for a specific transaction index by walking -// the tx trie of the block header. It uses the preimage resolver to fetch the preimages -// of the trie nodes as needed. -func (tf *txFetcherForBlock) TransactionByLog(ctx context.Context, log *types.Log) (*types.Transaction, error) { - tx, err := fetchObjectFromTrie[types.Transaction](tf.header.TxHash, log.TxIndex, tf.preimageResolver) - if err != nil { - return nil, err - } - return tx, err -} - -type receiptFetcherForBlock struct { - header *types.Header - preimageResolver preimageResolver -} - -// LogsForTxIndex fetches logs for a specific transaction index by walking -// the receipt trie of the block header. It uses the preimage resolver to fetch the preimages -// of the trie nodes as needed. -func (rf *receiptFetcherForBlock) LogsForTxIndex(ctx context.Context, parentChainBlockHash common.Hash, txIndex uint) ([]*types.Log, error) { - if rf.header.Hash() != parentChainBlockHash { - return nil, errors.New("parentChainBlockHash mismatch") - } - receipt, err := fetchObjectFromTrie[types.Receipt](rf.header.ReceiptHash, txIndex, rf.preimageResolver) - if err != nil { - return nil, err - } - // This is needed to enable fetching corresponding tx from the txFetcher - for _, log := range receipt.Logs { - log.TxIndex = txIndex - } - return receipt.Logs, nil -} - -// LogsForBlockHash first gets the txIndexes corresponding to the relevant logs by reading -// the key `parentChainBlockHash` from the preimages and then fetches logs for each of these txIndexes -func (rf *receiptFetcherForBlock) LogsForBlockHash(ctx context.Context, parentChainBlockHash common.Hash) ([]*types.Log, error) { - if rf.header.Hash() != parentChainBlockHash { - return nil, errors.New("parentChainBlockHash mismatch") - } - txIndexData, err := rf.preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, rf.header.Hash()) - if err != nil { - return nil, err - } - var txIndexes []uint - if err := rlp.DecodeBytes(txIndexData, &txIndexes); err != nil { - return nil, err - } - var relevantLogs []*types.Log - for _, txIndex := range txIndexes { - logs, err := rf.LogsForTxIndex(ctx, parentChainBlockHash, txIndex) - if err != nil { - return nil, err - } - relevantLogs = append(relevantLogs, logs...) - } - return relevantLogs, nil -} - -// Fetches a specific object at index from a block's Receipt/Tx trie by navigating its -// Merkle Patricia Trie structure. It uses the preimage resolver to fetch preimages -// of trie nodes as needed, and determines how to navigate depending on the structure of the trie nodes. -func fetchObjectFromTrie[T any](root common.Hash, index uint, preimageResolver preimageResolver) (*T, error) { - var empty *T - currentNodeHash := root - currentPath := []byte{} // Track nibbles consumed so far. - receiptKey, err := rlp.EncodeToBytes(index) - if err != nil { - return empty, err - } - targetNibbles := keyToNibbles(receiptKey) - for { - nodeData, err := preimageResolver.ResolveTypedPreimage(arbutil.Keccak256PreimageType, currentNodeHash) - if err != nil { - return empty, err - } - var node []any - if err = rlp.DecodeBytes(nodeData, &node); err != nil { - return empty, fmt.Errorf("failed to decode RLP node: %w", err) - } - switch len(node) { - case 17: - // We hit a branch node, which has 16 children and a value. - if len(currentPath) == len(targetNibbles) { - // A branch node's 17th item could be the value, so we check if it contains the receipt. - if valueBytes, ok := node[16].([]byte); ok && len(valueBytes) > 0 { - // This branch node has the actual value as the last item, so we decode the receipt - return decodeBinary[T](valueBytes) - } - return empty, fmt.Errorf("no receipt found at target key") - } - // Get the next nibble to follow. - targetNibble := targetNibbles[len(currentPath)] - childData, ok := node[targetNibble].([]byte) - if !ok || len(childData) == 0 { - return empty, fmt.Errorf("no child at nibble %d", targetNibble) - } - // Move to the child node, which is the next hash we have to navigate. - currentNodeHash = common.BytesToHash(childData) - currentPath = append(currentPath, targetNibble) - case 2: - keyPath, ok := node[0].([]byte) - if !ok { - return empty, fmt.Errorf("invalid key path in node") - } - key := extractKeyNibbles(keyPath) - expectedPath := make([]byte, 0) - expectedPath = append(expectedPath, currentPath...) - expectedPath = append(expectedPath, key...) - - // Check if it is a leaf or extension node. - leaf, err := isLeaf(keyPath) - if err != nil { - return empty, err - } - if leaf { - // Check that the keyPath matches the target nibbles, - // otherwise, the receipt does not exist in the trie. - if !bytes.Equal(expectedPath, targetNibbles) { - return empty, fmt.Errorf("leaf key does not match target nibbles") - } - rawData, ok := node[1].([]byte) - if !ok { - return empty, fmt.Errorf("invalid receipt data in leaf node") - } - return decodeBinary[T](rawData) - } - // If the node is not a leaf node, it is an extension node. - // Check if our target key matches this extension path. - if len(expectedPath) > len(targetNibbles) || !bytes.Equal(expectedPath, targetNibbles[:len(expectedPath)]) { - return empty, fmt.Errorf("extension path mismatch") - } - nextNodeBytes, ok := node[1].([]byte) - if !ok { - return empty, fmt.Errorf("invalid next node in extension") - } - // We navigate to the next node in the trie. - currentNodeHash = common.BytesToHash(nextNodeBytes) - currentPath = expectedPath - default: - return empty, fmt.Errorf("invalid node structure: unexpected length %d", len(node)) - } - } -} - -// Converts a byte slice key into a slice of nibbles (4-bit values). -// Keys are encoded in big endian format, which is required by Ethereum MPTs. -func keyToNibbles(key []byte) []byte { - nibbles := make([]byte, len(key)*2) - for i, b := range key { - nibbles[i*2] = b >> 4 - nibbles[i*2+1] = b & 0x0f - } - return nibbles -} - -// Extracts the key nibbles from a key path, handling odd/even length cases. -func extractKeyNibbles(keyPath []byte) []byte { - if len(keyPath) == 0 { - return nil - } - nibbles := keyToNibbles(keyPath) - if nibbles[0]&1 != 0 { - return nibbles[1:] - } - return nibbles[2:] -} - -func isLeaf(keyPath []byte) (bool, error) { - firstByte := keyPath[0] - firstNibble := firstByte >> 4 - // 2 or 3 indicates leaf, while 0 or 1 indicates extension nodes in the Ethereum MPT specification. - if firstNibble > 3 { - return false, errors.New("first nibble cannot be greater than 3") - } - return firstNibble >= 2, nil -} - -func decodeBinary[T any](data []byte) (*T, error) { - var empty *T - if len(data) == 0 { - return empty, errors.New("empty data cannot be decoded") - } - v := new(T) - u, ok := any(v).(interface{ UnmarshalBinary([]byte) error }) - if !ok { - return empty, errors.New("decodeBinary is called on a type that doesnt implement UnmarshalBinary") - } - if err := u.UnmarshalBinary(data); err != nil { - return empty, err - } - return v, nil -} diff --git a/system_tests/message_extraction_layer_validation_test.go b/system_tests/message_extraction_layer_validation_test.go index c38b030cf8..193e398f99 100644 --- a/system_tests/message_extraction_layer_validation_test.go +++ b/system_tests/message_extraction_layer_validation_test.go @@ -2,15 +2,19 @@ package arbtest import ( "context" + "fmt" "math/big" "testing" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/offchainlabs/nitro/arbnode/mel/extraction" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/mel-replay" "github.com/offchainlabs/nitro/staker" ) @@ -61,22 +65,27 @@ func TestMELValidator_Recording_Preimages(t *testing.T) { Require(t, err) // Represents running of MEL validation using preimages in wasm mode. TODO: remove this once we have validation wired - preimageResolver := &testPreimageResolver{ - preimages: entry.Preimages[arbutil.Keccak256PreimageType], - } + preimageResolver := melreplay.NewTypeBasedPreimageResolver( + arbutil.Keccak256PreimageType, + entry.Preimages, + ) state, err := builder.L2.ConsensusNode.MessageExtractor.GetState(ctx, startBlock) Require(t, err) - preimagesBasedDelayedDb := &delayedMessageDatabase{ - preimageResolver: preimageResolver, - } + preimagesBasedDelayedDb := melreplay.NewDelayedMessageDatabase(preimageResolver) preimagesBasedDapReaders := daprovider.NewDAProviderRegistry() - Require(t, preimagesBasedDapReaders.SetupBlobReader(daprovider.NewReaderForBlobReader(&blobPreimageReader{entry.Preimages}))) + blobReader := &blobPreimageReader{ + melreplay.NewTypeBasedPreimageResolver( + arbutil.EthVersionedHashPreimageType, + entry.Preimages, + ), + } + Require(t, preimagesBasedDapReaders.SetupBlobReader(daprovider.NewReaderForBlobReader(blobReader))) for state.MsgCount < uint64(extractedMsgCount) { header, err := builder.L1.Client.HeaderByNumber(ctx, new(big.Int).SetUint64(state.ParentChainBlockNumber+1)) Require(t, err) - preimagesBasedTxsFetcher := &txFetcherForBlock{header, preimageResolver} - preimagesBasedReceiptsFetcher := &receiptFetcherForBlock{header, preimageResolver} - postState, _, _, _, err := melextraction.ExtractMessages(ctx, state, header, preimagesBasedDapReaders, preimagesBasedDelayedDb, preimagesBasedTxsFetcher, preimagesBasedReceiptsFetcher, nil) + preimagesBasedTxsFetcher := melreplay.NewTransactionFetcher(header, preimageResolver) + preimagesBasedLogsFetcher := melreplay.NewLogsFetcher(header, preimageResolver) + postState, _, _, _, err := melextraction.ExtractMessages(ctx, state, header, preimagesBasedDapReaders, preimagesBasedDelayedDb, preimagesBasedTxsFetcher, preimagesBasedLogsFetcher, nil) Require(t, err) wantState, err := builder.L2.ConsensusNode.MessageExtractor.GetState(ctx, state.ParentChainBlockNumber+1) Require(t, err) @@ -86,3 +95,30 @@ func TestMELValidator_Recording_Preimages(t *testing.T) { state = postState } } + +type blobPreimageReader struct { + preimageResolver melreplay.PreimageResolver +} + +func (b *blobPreimageReader) Initialize(ctx context.Context) error { return nil } + +func (b *blobPreimageReader) GetBlobs( + ctx context.Context, + batchBlockHash common.Hash, + versionedHashes []common.Hash, +) ([]kzg4844.Blob, error) { + var blobs []kzg4844.Blob + for _, h := range versionedHashes { + var blob kzg4844.Blob + preimage, err := b.preimageResolver.ResolveTypedPreimage(arbutil.EthVersionedHashPreimageType, h) + if err != nil { + return nil, err + } + if len(preimage) != len(blob) { + return nil, fmt.Errorf("for blob %v got back preimage of length %v but expected blob length %v", h, len(preimage), len(blob)) + } + copy(blob[:], preimage) + blobs = append(blobs, blob) + } + return blobs, nil +} From 9b7a73c975c3f4257158323afe6a5601bd8a77de Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 14 Jan 2026 16:18:40 +0530 Subject: [PATCH 42/42] implement more methods of MEL validator and make blockvalidator to only validate messages whose extraction is validated --- arbnode/mel/state.go | 8 +- staker/block_validator.go | 13 +- staker/mel_validator.go | 331 +++++++++++++++++- staker/stateless_block_validator.go | 21 +- ...alidator_schema.go => validator_schema.go} | 5 +- ...essage_extraction_layer_validation_test.go | 5 +- 6 files changed, 351 insertions(+), 32 deletions(-) rename staker/{block_validator_schema.go => validator_schema.go} (53%) diff --git a/arbnode/mel/state.go b/arbnode/mel/state.go index 7d9c497c06..13e6c61556 100644 --- a/arbnode/mel/state.go +++ b/arbnode/mel/state.go @@ -80,10 +80,10 @@ type MessageConsumer interface { func (s *State) Hash() common.Hash { var delayedMerklePartialsBytes []byte - for _, partials := range s.DelayedMessageMerklePartials { - delayedMerklePartialsBytes = append(delayedMerklePartialsBytes, partials.Bytes()...) + for _, partial := range s.DelayedMessageMerklePartials { + delayedMerklePartialsBytes = append(delayedMerklePartialsBytes, partial.Bytes()...) } - hash := crypto.Keccak256( + return crypto.Keccak256Hash( arbmath.Uint16ToBytes(s.Version), arbmath.UintToBytes(s.ParentChainId), arbmath.UintToBytes(s.ParentChainBlockNumber), @@ -97,8 +97,8 @@ func (s *State) Hash() common.Hash { arbmath.UintToBytes(s.BatchCount), arbmath.UintToBytes(s.DelayedMessagesRead), arbmath.UintToBytes(s.DelayedMessagesSeen), + delayedMerklePartialsBytes, ) - return crypto.Keccak256Hash(hash, delayedMerklePartialsBytes) } // Performs a deep clone of the state struct to prevent any unintended diff --git a/staker/block_validator.go b/staker/block_validator.go index 38714cc39e..b19d4376e5 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -88,6 +88,7 @@ func NewThrottledValidationSpawner(spawner validator.ValidationSpawner) *Throttl type BlockValidator struct { stopwaiter.StopWaiter *StatelessBlockValidator + melValidator MELValidatorInterface reorgMutex sync.RWMutex @@ -636,7 +637,7 @@ func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, e log.Trace("create validation entry: nothing to do", "pos", pos, "validated", v.validated()) return false, nil } - streamerMsgCount, err := v.streamer.GetProcessedMessageCount() + streamerMsgCount, err := v.streamer.GetProcessedMessageCount() // Ask MEL validator LatestValidatedMELState().MsgCount if err != nil { return false, err } @@ -644,6 +645,16 @@ func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, e log.Trace("create validation entry: nothing to do", "pos", pos, "streamerMsgCount", streamerMsgCount) return false, nil } + if v.melValidator != nil { + latestValidatedState, err := v.melValidator.LatestValidatedMELState(ctx) + if err != nil { + return false, err + } + if pos >= arbutil.MessageIndex(latestValidatedState.MsgCount) { + log.Trace("create validation entry: nothing to do", "pos", pos, "latestMELValidatedMsgCount", latestValidatedState.MsgCount) + return false, nil + } + } msg, err := v.streamer.GetMessage(pos) if err != nil { return false, err diff --git a/staker/mel_validator.go b/staker/mel_validator.go index 6c461f8c24..bd413dc974 100644 --- a/staker/mel_validator.go +++ b/staker/mel_validator.go @@ -2,29 +2,45 @@ package staker import ( "context" + "encoding/json" "errors" "fmt" "math/big" + "net/url" + "regexp" + "sync" + "sync/atomic" "time" + "github.com/spf13/pflag" + "github.com/ethereum/go-ethereum/accounts/abi/bind/v2" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rlp" + "github.com/offchainlabs/nitro/arbnode/mel" "github.com/offchainlabs/nitro/arbnode/mel/extraction" "github.com/offchainlabs/nitro/arbnode/mel/recording" "github.com/offchainlabs/nitro/arbnode/mel/runner" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/daprovider" "github.com/offchainlabs/nitro/solgen/go/rollupgen" + "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/stopwaiter" + "github.com/offchainlabs/nitro/validator" + "github.com/offchainlabs/nitro/validator/client" + "github.com/offchainlabs/nitro/validator/client/redis" + "github.com/offchainlabs/nitro/validator/retry_wrapper" ) type MELValidator struct { stopwaiter.StopWaiter + config MELValidatorConfigFetcher arbDb ethdb.KeyValueStore l1Client *ethclient.Client @@ -35,16 +51,188 @@ type MELValidator struct { messageExtractor *melrunner.MessageExtractor dapReaders arbstate.DapReaderSource - lastValidatedParentChainBlock uint64 + latestValidatedGS validator.GoGlobalState + latestValidatedParentChainBlock atomic.Uint64 + + latestWasmModuleRoot common.Hash + redisValidator *redis.ValidationClient + executionSpawners []validator.ExecutionSpawner + chosenValidator map[common.Hash]validator.ValidationSpawner + + // wasmModuleRoot + moduleMutex sync.Mutex + currentWasmModuleRoot common.Hash + pendingWasmModuleRoot common.Hash +} + +type MELValidatorConfig struct { + Enable bool `koanf:"enable"` + RedisValidationClientConfig redis.ValidationClientConfig `koanf:"redis-validation-client-config"` + ValidationServer rpcclient.ClientConfig `koanf:"validation-server" reload:"hot"` + ValidationServerConfigs []rpcclient.ClientConfig `koanf:"validation-server-configs"` + ValidationPoll time.Duration `koanf:"validation-poll" reload:"hot"` + CurrentModuleRoot string `koanf:"current-module-root"` + PendingUpgradeModuleRoot string `koanf:"pending-upgrade-module-root"` + ValidationServerConfigsList string `koanf:"validation-server-configs-list"` + ValidationSpawningAllowedAttempts uint64 `koanf:"validation-spawning-allowed-attempts" reload:"hot"` +} + +func (c *MELValidatorConfig) Validate() error { + if err := c.RedisValidationClientConfig.Validate(); err != nil { + return fmt.Errorf("failed to validate redis validation client config: %w", err) + } + streamsEnabled := c.RedisValidationClientConfig.Enabled() + if len(c.ValidationServerConfigs) == 0 { + c.ValidationServerConfigs = []rpcclient.ClientConfig{c.ValidationServer} + if c.ValidationServerConfigsList != "default" { + var executionServersConfigs []rpcclient.ClientConfig + if err := json.Unmarshal([]byte(c.ValidationServerConfigsList), &executionServersConfigs); err != nil && !streamsEnabled { + return fmt.Errorf("failed to parse block-validator validation-server-configs-list string: %w", err) + } + c.ValidationServerConfigs = executionServersConfigs + } + } + for i := range c.ValidationServerConfigs { + if err := c.ValidationServerConfigs[i].Validate(); err != nil { + return fmt.Errorf("failed to validate one of the block-validator validation-server-configs. url: %s, err: %w", c.ValidationServerConfigs[i].URL, err) + } + serverUrl := c.ValidationServerConfigs[i].URL + if len(serverUrl) > 0 && serverUrl != "self" && serverUrl != "self-auth" { + u, err := url.Parse(serverUrl) + if err != nil { + return fmt.Errorf("failed parsing validation server's url:%s err: %w", serverUrl, err) + } + if u.Scheme != "ws" && u.Scheme != "wss" { + return fmt.Errorf("validation server's url scheme is unsupported, it should either be ws or wss, url:%s", serverUrl) + } + } + } + return nil +} + +type MELValidatorConfigFetcher func() *MELValidatorConfig + +func MELValidatorConfigAddOptions(prefix string, f *pflag.FlagSet) { + f.Bool(prefix+".enable", DefaultMELValidatorConfig.Enable, "enable MEL state validation") + rpcclient.RPCClientAddOptions(prefix+".validation-server", f, &DefaultMELValidatorConfig.ValidationServer) + redis.ValidationClientConfigAddOptions(prefix+".redis-validation-client-config", f) + f.String(prefix+".validation-server-configs-list", DefaultMELValidatorConfig.ValidationServerConfigsList, "array of execution rpc configs given as a json string. time duration should be supplied in number indicating nanoseconds") + f.Duration(prefix+".validation-poll", DefaultMELValidatorConfig.ValidationPoll, "poll time to check validations") + f.String(prefix+".current-module-root", DefaultMELValidatorConfig.CurrentModuleRoot, "current wasm module root ('current' read from chain, 'latest' from machines/latest dir, or provide hash)") + f.String(prefix+".pending-upgrade-module-root", DefaultMELValidatorConfig.PendingUpgradeModuleRoot, "pending upgrade wasm module root to additionally validate (hash, 'latest' or empty)") + BlockValidatorDangerousConfigAddOptions(prefix+".dangerous", f) + f.Uint64(prefix+".validation-spawning-allowed-attempts", DefaultMELValidatorConfig.ValidationSpawningAllowedAttempts, "number of attempts allowed when trying to spawn a validation before erroring out") +} + +var DefaultMELValidatorConfig = MELValidatorConfig{ + Enable: false, + ValidationServerConfigsList: "default", + ValidationServer: rpcclient.DefaultClientConfig, + RedisValidationClientConfig: redis.DefaultValidationClientConfig, + ValidationPoll: time.Second, + CurrentModuleRoot: "current", + PendingUpgradeModuleRoot: "latest", + ValidationSpawningAllowedAttempts: 1, } -func NewMELValidator(arbDb ethdb.KeyValueStore, l1Client *ethclient.Client, messageExtractor *melrunner.MessageExtractor, dapReaders arbstate.DapReaderSource) *MELValidator { +func NewMELValidator( + config MELValidatorConfigFetcher, + arbDb ethdb.KeyValueStore, + l1Client *ethclient.Client, + stack *node.Node, + messageExtractor *melrunner.MessageExtractor, + dapReaders arbstate.DapReaderSource, + latestWasmModuleRoot common.Hash, +) (*MELValidator, error) { + var executionSpawners []validator.ExecutionSpawner + configs := config().ValidationServerConfigs + for i := range configs { + confFetcher := func() *rpcclient.ClientConfig { return &config().ValidationServerConfigs[i] } + executionSpawner := client.NewExecutionClient(confFetcher, stack) + executionSpawners = append(executionSpawners, executionSpawner) + } + if len(executionSpawners) == 0 { + return nil, errors.New("no enabled execution servers") + } + var redisValClient *redis.ValidationClient + if config().RedisValidationClientConfig.Enabled() { + var err error + redisValClient, err = redis.NewValidationClient(&config().RedisValidationClientConfig) + if err != nil { + return nil, fmt.Errorf("creating new redis validation client: %w", err) + } + } + if latestWasmModuleRoot == (common.Hash{}) { + return nil, errors.New("latestWasmModuleRoot not set") + } return &MELValidator{ - arbDb: arbDb, - l1Client: l1Client, - messageExtractor: messageExtractor, - dapReaders: dapReaders, + config: config, + arbDb: arbDb, + l1Client: l1Client, + messageExtractor: messageExtractor, + dapReaders: dapReaders, + latestWasmModuleRoot: latestWasmModuleRoot, + redisValidator: redisValClient, + executionSpawners: executionSpawners, + }, nil +} + +func (mv *MELValidator) Initialize(ctx context.Context) error { + config := mv.config() + currentModuleRoot := config.CurrentModuleRoot + switch currentModuleRoot { + case "latest": + mv.currentWasmModuleRoot = mv.latestWasmModuleRoot + case "current": + if (mv.currentWasmModuleRoot == common.Hash{}) { + return errors.New("wasmModuleRoot set to 'current' - but info not set from chain") + } + default: + mv.currentWasmModuleRoot = common.HexToHash(currentModuleRoot) + if (mv.currentWasmModuleRoot == common.Hash{}) { + return errors.New("current-module-root config value illegal") + } + } + pendingModuleRoot := config.PendingUpgradeModuleRoot + if pendingModuleRoot != "" { + if pendingModuleRoot == "latest" { + mv.pendingWasmModuleRoot = mv.latestWasmModuleRoot + } else { + valid, _ := regexp.MatchString("(0x)?[0-9a-fA-F]{64}", pendingModuleRoot) + mv.pendingWasmModuleRoot = common.HexToHash(pendingModuleRoot) + if (!valid || mv.pendingWasmModuleRoot == common.Hash{}) { + return errors.New("pending-upgrade-module-root config value illegal") + } + } + } + log.Info("MELValidator initialized", "current", mv.currentWasmModuleRoot, "pending", mv.pendingWasmModuleRoot) + moduleRoots := mv.GetModuleRootsToValidate() + // First spawner is always RedisValidationClient if RedisStreams are enabled. + if mv.redisValidator != nil { + err := mv.redisValidator.Initialize(ctx, moduleRoots) + if err != nil { + return err + } + } + mv.chosenValidator = make(map[common.Hash]validator.ValidationSpawner) + for _, root := range moduleRoots { + if mv.redisValidator != nil && validator.SpawnerSupportsModule(mv.redisValidator, root) { + mv.chosenValidator[root] = mv.redisValidator + log.Info("validator chosen", "WasmModuleRoot", root, "chosen", "redis", "maxWorkers", mv.redisValidator.Capacity()) + } else { + for _, spawner := range mv.executionSpawners { + if validator.SpawnerSupportsModule(spawner, root) { + mv.chosenValidator[root] = spawner + log.Info("validator chosen", "WasmModuleRoot", root, "chosen", spawner.Name(), "maxWorkers", spawner.Capacity()) + break + } + } + if mv.chosenValidator[root] == nil { + return fmt.Errorf("cannot validate WasmModuleRoot %v", root) + } + } } + return nil } func (mv *MELValidator) Start(ctx context.Context) { @@ -65,28 +253,81 @@ func (mv *MELValidator) Start(ctx context.Context) { } // Create validation entry - entry, err := mv.CreateNextValidationEntry(ctx, mv.lastValidatedParentChainBlock, latestStakedAssertion.InboxMaxCount.Uint64()) + entry, err := mv.CreateNextValidationEntry(ctx, mv.latestValidatedParentChainBlock.Load(), latestStakedAssertion.InboxMaxCount.Uint64()) if err != nil { - log.Error("MEL validator: Error creating validation entry", "lastValidatedParentChainBlock", mv.lastValidatedParentChainBlock, "inboxMaxCount", latestStakedAssertion.InboxMaxCount.Uint64(), "err", err) - return time.Minute // wait for latestStakedAssertion to progress by the blockValidator + log.Error("MEL validator: Error creating validation entry", "latestValidatedParentChainBlock", mv.latestValidatedParentChainBlock.Load(), "inboxMaxCount", latestStakedAssertion.InboxMaxCount.Uint64(), "err", err) + return 0 + } + if entry == nil { // nothing to create, so lets wait for latestStakedAssertion to progress through blockValidator + return time.Minute } // Send validation entry to validation nodes - if err := mv.SendValidationEntry(ctx, entry); err != nil { + doneEntry, err := mv.SendValidationEntry(ctx, entry) + if err != nil { log.Error("MEL validator: Error sending validation entry", "err", err) + return 0 } // Advance validations + if err := mv.AdvanceValidations(ctx, doneEntry); err != nil { + log.Error("MEL validator: Error advancing validation status", "err", err) + } + mv.latestValidatedParentChainBlock.Store(entry.EndGSParentChainBlockNumber) + mv.latestValidatedGS = doneEntry.End return 0 }) } -func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, lastValidatedParentChainBlock, toValidateMsgExtractionCount uint64) (*validationEntry, error) { - if lastValidatedParentChainBlock == 0 { // TODO: last validated. +func (mv *MELValidator) LatestValidatedMELState(ctx context.Context) (*mel.State, error) { + return mv.messageExtractor.GetState(ctx, mv.latestValidatedParentChainBlock.Load()) +} + +func (mv *MELValidator) SetCurrentWasmModuleRoot(hash common.Hash) error { + mv.moduleMutex.Lock() + defer mv.moduleMutex.Unlock() + + if (hash == common.Hash{}) { + return errors.New("trying to set zero as wasmModuleRoot") + } + if hash == mv.currentWasmModuleRoot { + return nil + } + if (mv.currentWasmModuleRoot == common.Hash{}) { + mv.currentWasmModuleRoot = hash + return nil + } + if mv.pendingWasmModuleRoot == hash { + log.Info("Block validator: detected progressing to pending machine", "hash", hash) + mv.currentWasmModuleRoot = hash + return nil + } + if mv.config().CurrentModuleRoot != "current" { + return nil + } + return fmt.Errorf( + "unexpected wasmModuleRoot! cannot validate! found %v, current %v, pending %v", + hash, mv.currentWasmModuleRoot, mv.pendingWasmModuleRoot, + ) +} + +func (mv *MELValidator) GetModuleRootsToValidate() []common.Hash { + mv.moduleMutex.Lock() + defer mv.moduleMutex.Unlock() + + validatingModuleRoots := []common.Hash{mv.currentWasmModuleRoot} + if mv.currentWasmModuleRoot != mv.pendingWasmModuleRoot && mv.pendingWasmModuleRoot != (common.Hash{}) { + validatingModuleRoots = append(validatingModuleRoots, mv.pendingWasmModuleRoot) + } + return validatingModuleRoots +} + +func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, latestValidatedParentChainBlock, toValidateMsgExtractionCount uint64) (*validationEntry, error) { + if latestValidatedParentChainBlock == 0 { // TODO: last validated. // ending position- bold staker latest posted assertion on chain that it agrees with (l1blockhash)- return nil, errors.New("trying to create validation entry for zero block number") } - preState, err := mv.messageExtractor.GetState(ctx, lastValidatedParentChainBlock) + preState, err := mv.messageExtractor.GetState(ctx, latestValidatedParentChainBlock) if err != nil { return nil, err } @@ -104,7 +345,8 @@ func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, lastValid if err != nil { return nil, err } - for i := lastValidatedParentChainBlock + 1; ; i++ { + var state *mel.State // to be used in endGS + for i := latestValidatedParentChainBlock + 1; ; i++ { header, err := mv.l1Client.HeaderByNumber(ctx, new(big.Int).SetUint64(i)) if err != nil { return nil, err @@ -123,7 +365,7 @@ func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, lastValid if err := receiptsRecorder.Initialize(ctx); err != nil { return nil, err } - state, _, _, _, err := melextraction.ExtractMessages(ctx, preState, header, recordingDAPReaders, delayedMsgRecordingDB, txsRecorder, receiptsRecorder, nil) + state, _, _, _, err = melextraction.ExtractMessages(ctx, preState, header, recordingDAPReaders, delayedMsgRecordingDB, txsRecorder, receiptsRecorder, nil) if err != nil { return nil, fmt.Errorf("error calling melextraction.ExtractMessages in recording mode: %w", err) } @@ -142,11 +384,66 @@ func (mv *MELValidator) CreateNextValidationEntry(ctx context.Context, lastValid } preState = state } + endGs := validator.GoGlobalState{ + // After MEL fields get added to GlobalState + // MELStateHash: state.Hash(), + // PositionInMEL: preState.MsgCount - 1, + } return &validationEntry{ - Preimages: preimages, + EndGSParentChainBlockNumber: state.ParentChainBlockNumber, + Start: mv.latestValidatedGS, + End: endGs, + Preimages: preimages, + }, nil +} + +func (mv *MELValidator) SendValidationEntry(ctx context.Context, entry *validationEntry) (*validationDoneEntry, error) { + wasmRoots := mv.GetModuleRootsToValidate() + var runs []validator.ValidationRun + for _, moduleRoot := range wasmRoots { + chosenSpawner := mv.chosenValidator[moduleRoot] + spawner := retry_wrapper.NewValidationSpawnerRetryWrapper(chosenSpawner) + spawner.StopWaiter.Start(ctx, mv) + input, err := entry.ToInput(nil) + if err != nil && ctx.Err() == nil { + return nil, fmt.Errorf("%w: error preparing validation", err) + } + if ctx.Err() != nil { + return nil, ctx.Err() + } + run := spawner.LaunchWithNAllowedAttempts(input, moduleRoot, mv.config().ValidationSpawningAllowedAttempts) + log.Trace("sendValidations: launched", "pos", entry.Pos, "moduleRoot", moduleRoot) + runs = append(runs, run) + } + for _, run := range runs { + runEnd, err := run.Await(ctx) + if err == nil && runEnd != entry.End { + err = fmt.Errorf("validation failed: got %v", runEnd) + } + if err != nil { + return nil, fmt.Errorf("MEL validator: error while validating: %w", err) + } + } + return &validationDoneEntry{ + Success: true, + Start: entry.Start, + End: entry.End, + WasmModuleRoots: wasmRoots, }, nil } -func (mv *MELValidator) SendValidationEntry(ctx context.Context, entry *validationEntry) error { +func (mv *MELValidator) AdvanceValidations(ctx context.Context, doneEntry *validationDoneEntry) error { + info := GlobalStateValidatedInfo{ + GlobalState: doneEntry.End, + WasmRoots: doneEntry.WasmModuleRoots, + } + encoded, err := rlp.EncodeToBytes(info) + if err != nil { + return err + } + err = mv.arbDb.Put(lastMELGlobalStateValidatedInfoKey, encoded) + if err != nil { + return err + } return nil } diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index c4554a76ff..ac98c8d1da 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -18,6 +18,7 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbnode/mel" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/daprovider" @@ -72,6 +73,10 @@ type TransactionStreamerInterface interface { ChainConfig() *params.ChainConfig } +type MELValidatorInterface interface { + LatestValidatedMELState(context.Context) (*mel.State, error) +} + type InboxReaderInterface interface { GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, common.Hash, error) GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, error) @@ -132,18 +137,20 @@ type FullBatchInfo struct { type validationEntry struct { Stage ValidationEntryStage // Valid since ReadyforRecord: - Pos arbutil.MessageIndex - Start validator.GoGlobalState - End validator.GoGlobalState - HasDelayedMsg bool - DelayedMsgNr uint64 - ChainConfig *params.ChainConfig + Pos arbutil.MessageIndex + HasDelayedMsg bool + DelayedMsgNr uint64 + ChainConfig *params.ChainConfig + EndGSParentChainBlockNumber uint64 // MEL relevant field + Start validator.GoGlobalState // MEL relevant field + End validator.GoGlobalState // MEL relevant field + // valid when created, removed after recording msg *arbostypes.MessageWithMetadata // Has batch when created - others could be added on record BatchInfo []validator.BatchInfo // Valid since Ready - Preimages daprovider.PreimagesMap + Preimages daprovider.PreimagesMap // MEL relevant field UserWasms state.UserWasms DelayedMsg []byte } diff --git a/staker/block_validator_schema.go b/staker/validator_schema.go similarity index 53% rename from staker/block_validator_schema.go rename to staker/validator_schema.go index 5b76a292b2..3f9a10086c 100644 --- a/staker/block_validator_schema.go +++ b/staker/validator_schema.go @@ -21,6 +21,7 @@ type GlobalStateValidatedInfo struct { } var ( - lastGlobalStateValidatedInfoKey = []byte("_lastGlobalStateValidatedInfo") // contains a rlp encoded lastBlockValidatedDbInfo - legacyLastBlockValidatedInfoKey = []byte("_lastBlockValidatedInfo") // LEGACY - contains a rlp encoded lastBlockValidatedDbInfo + lastMELGlobalStateValidatedInfoKey = []byte("_lastMELGlobalStateValidatedInfo") // contains a rlp encoded GlobalStateValidatedInfo of the last validated MEL state + lastGlobalStateValidatedInfoKey = []byte("_lastGlobalStateValidatedInfo") // contains a rlp encoded lastBlockValidatedDbInfo + legacyLastBlockValidatedInfoKey = []byte("_lastBlockValidatedInfo") // LEGACY - contains a rlp encoded lastBlockValidatedDbInfo ) diff --git a/system_tests/message_extraction_layer_validation_test.go b/system_tests/message_extraction_layer_validation_test.go index 193e398f99..ca411afde8 100644 --- a/system_tests/message_extraction_layer_validation_test.go +++ b/system_tests/message_extraction_layer_validation_test.go @@ -58,7 +58,10 @@ func TestMELValidator_Recording_Preimages(t *testing.T) { // MEL Validator: create validation entry blobReaderRegistry := daprovider.NewDAProviderRegistry() Require(t, blobReaderRegistry.SetupBlobReader(daprovider.NewReaderForBlobReader(builder.L1.L1BlobReader))) - melValidator := staker.NewMELValidator(builder.L2.ConsensusNode.ConsensusDB, builder.L1.Client, builder.L2.ConsensusNode.MessageExtractor, blobReaderRegistry) + config := func() *staker.MELValidatorConfig { return &staker.DefaultMELValidatorConfig } + Require(t, config().Validate()) + melValidator, err := staker.NewMELValidator(config, builder.L2.ConsensusNode.ConsensusDB, builder.L1.Client, builder.L1.Stack, builder.L2.ConsensusNode.MessageExtractor, blobReaderRegistry, common.MaxHash) + Require(t, err) extractedMsgCount, err := builder.L2.ConsensusNode.TxStreamer.GetMessageCount() Require(t, err) entry, err := melValidator.CreateNextValidationEntry(ctx, startBlock, uint64(extractedMsgCount))