From 7b940fcc62f26faa8c549a151595e3ab2e05a5d7 Mon Sep 17 00:00:00 2001 From: Wen Date: Thu, 7 May 2026 12:43:56 -0700 Subject: [PATCH 1/9] giga: route BlockByHash through sei-db BlockDB (CON-256) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replaces data.State's in-memory hash→height map with a writer/reader pair against sei-db/ledger_db/block.BlockDB (mem_block_db backend for now). runExecute writes the block before each executeBlock call; GigaRouter.BlockByHash reads via BlockDB.GetBlockByHash. The data-layer index, GlobalBlockByHash, and TestGlobalBlockByHash are removed. Crash recovery for BlockDB is not wired yet — the in-memory backend is per-process and reads return "unknown" for blocks finalized but not yet started executing. Will be revisited when a persistent backend lands. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../internal/autobahn/data/state.go | 41 ------------ .../internal/autobahn/data/state_test.go | 52 --------------- sei-tendermint/internal/p2p/giga_blockdb.go | 66 +++++++++++++++++++ sei-tendermint/internal/p2p/giga_router.go | 56 +++++++++------- 4 files changed, 97 insertions(+), 118 deletions(-) create mode 100644 sei-tendermint/internal/p2p/giga_blockdb.go diff --git a/sei-tendermint/internal/autobahn/data/state.go b/sei-tendermint/internal/autobahn/data/state.go index 89b3b537bb..0bf846d332 100644 --- a/sei-tendermint/internal/autobahn/data/state.go +++ b/sei-tendermint/internal/autobahn/data/state.go @@ -149,17 +149,6 @@ type inner struct { blocks map[types.GlobalBlockNumber]*types.Block // [first,nextBlock) + subset of [nextBlock,nextQC) appProposals map[types.GlobalBlockNumber]appProposalWithTimestamp // [first,nextAppProposal) - // blockHashes is a hash → height index mirroring blocks. Maintained - // in lockstep with blocks via insertBlock / pruneFirst, so it covers - // exactly the same retain window without a separate prune cursor or - // startup warmup. Powers BlockByHash. - // - // TODO(autobahn): remove once a writer is wired into block execution - // that populates sei-db/ledger_db/block.BlockDB. BlockDB has a built-in - // hash index that survives process restart and lives outside Autobahn's - // RetainHeight pruning, making this in-memory index obsolete. - blockHashes map[types.BlockHeaderHash]types.GlobalBlockNumber - // first <= nextAppProposal <= nextBlockToPersist <= nextBlock <= nextQC // // This invariant guarantees no race between pruning and persisting: @@ -179,7 +168,6 @@ func newInner(committee *types.Committee) *inner { qcs: map[types.GlobalBlockNumber]*types.FullCommitQC{}, blocks: map[types.GlobalBlockNumber]*types.Block{}, appProposals: map[types.GlobalBlockNumber]appProposalWithTimestamp{}, - blockHashes: map[types.BlockHeaderHash]types.GlobalBlockNumber{}, first: first, nextAppProposal: first, nextBlockToPersist: first, @@ -243,7 +231,6 @@ func (i *inner) insertBlock(committee *types.Committee, n types.GlobalBlockNumbe return fmt.Errorf("block %d header hash mismatch: want %v, got %v", n, want, got) } i.blocks[n] = block - i.blockHashes[got] = n return nil } @@ -269,7 +256,6 @@ func (i *inner) pruneFirst(now time.Time, m *dataMetrics) { delete(i.appProposals, i.first) delete(i.blocks, i.first) delete(i.qcs, i.first) - delete(i.blockHashes, b.Header().Hash()) i.first += 1 } @@ -449,33 +435,6 @@ func (s *State) NextBlock() types.GlobalBlockNumber { panic("unreachable") } -// GlobalBlockByHash returns the finalized GlobalBlock whose stored header -// hashes to the given value, or None if no such block is currently in the -// retained range. The lookup-and-construct happens under a single lock so -// the returned block matches the looked-up hash atomically — pruning can't -// change which height a hash maps to between the index check and the block -// construction. Tracks the same retain window as Block / GlobalBlock since -// the hash index is maintained in lockstep by insertBlock / pruneFirst. -// -// Returns an error in the signature for forward-compat with the eventual -// switch to sei-db/ledger_db/block.BlockDB.GetBlockByHash. Today's -// in-memory implementation never errors. -// -// TODO(autobahn): when BlockDB is wired, take a ctx parameter and narrow -// the error contract — db-internal errors should surface by shutting down -// the persistence background task (matching how persistence handles errors -// today), so the query path's error stays bounded to context.Canceled. -func (s *State) GlobalBlockByHash(hash types.BlockHeaderHash) (utils.Option[*types.GlobalBlock], error) { - for inner := range s.inner.Lock() { - n, ok := inner.blockHashes[hash] - if !ok { - return utils.None[*types.GlobalBlock](), nil - } - return utils.Some(inner.globalBlockAt(s.Committee(), n)), nil - } - panic("unreachable") -} - // Block returns the block with the given global number. // This function is used for syncing - GlobalBlock can be derived from Block and FullCommitQC, // which have to be fetched upfront anyway. diff --git a/sei-tendermint/internal/autobahn/data/state_test.go b/sei-tendermint/internal/autobahn/data/state_test.go index 6c99c1f3be..d85f9d777b 100644 --- a/sei-tendermint/internal/autobahn/data/state_test.go +++ b/sei-tendermint/internal/autobahn/data/state_test.go @@ -331,58 +331,6 @@ func TestPushBlockAcceptsBlockWithQC(t *testing.T) { require.Equal(t, blocks[0], got) } -// TestGlobalBlockByHash isolates the hash-keyed lookup from the -// consensus-driven harness. We push a single QC + block via the same code -// path the network would (insertBlock writes to inner.blockHashes), then: -// -// - the block's own header hash resolves to Some(*GlobalBlock) with the -// expected height/header/payload — the index points at the right -// block, atomically with the block construction -// - a zero hash and a random hash both resolve to None — distinct -// unknown-hash inputs all read as "not found", no panics -// - err is nil throughout — today's in-memory implementation has no -// failure mode; the error return on GlobalBlockByHash is reserved -// for the future BlockDB-backed path -func TestGlobalBlockByHash(t *testing.T) { - ctx := t.Context() - rng := utils.TestRng() - committee, keys := types.GenCommittee(rng, 3) - - state := utils.OrPanic1(NewState(&Config{ - Committee: committee, - }, utils.OrPanic1(NewDataWAL(utils.None[string](), committee)))) - - qc, blocks := TestCommitQC(rng, committee, keys, utils.None[*types.CommitQC]()) - require.NoError(t, state.PushQC(ctx, qc, blocks)) - gr := qc.QC().GlobalRange(committee) - n := gr.First - wantBlock := blocks[0] - wantHash := wantBlock.Header().Hash() - - // Known hash → Some with correct fields. - gotOpt, err := state.GlobalBlockByHash(wantHash) - require.NoError(t, err) - gotGB, ok := gotOpt.Get() - require.True(t, ok, "GlobalBlockByHash(known) returned None") - require.Equal(t, n, gotGB.GlobalNumber) - require.Equal(t, wantBlock.Header(), gotGB.Header) - require.Equal(t, wantBlock.Payload(), gotGB.Payload) - - // Zero hash → None. - zeroOpt, err := state.GlobalBlockByHash(types.BlockHeaderHash{}) - require.NoError(t, err) - _, ok = zeroOpt.Get() - require.False(t, ok, "GlobalBlockByHash(zero) returned Some") - - // Random unknown hash → None. - var randHash types.BlockHeaderHash - rng.Read(randHash[:]) - randOpt, err := state.GlobalBlockByHash(randHash) - require.NoError(t, err) - _, ok = randOpt.Get() - require.False(t, ok, "GlobalBlockByHash(random) returned Some") -} - // ── Reconcile tests (grouped by case number) ────────────────────────── // TestStateRecoveryBlocksOnly simulates a crash after blocks are written diff --git a/sei-tendermint/internal/p2p/giga_blockdb.go b/sei-tendermint/internal/p2p/giga_blockdb.go new file mode 100644 index 0000000000..3ddd5d89e2 --- /dev/null +++ b/sei-tendermint/internal/p2p/giga_blockdb.go @@ -0,0 +1,66 @@ +package p2p + +import ( + "encoding/binary" + "fmt" + + "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block" + "github.com/sei-protocol/sei-chain/sei-tendermint/internal/autobahn/pb" + atypes "github.com/sei-protocol/sei-chain/sei-tendermint/internal/autobahn/types" + "github.com/sei-protocol/sei-chain/sei-tendermint/internal/protoutils" + "github.com/sei-protocol/sei-chain/sei-tendermint/libs/utils" +) + +// encodeBinaryBlock packs a finalized GlobalBlock into BlockDB's BinaryBlock +// shape. Layout of BlockData: [4 LE timestamp-proto length][timestamp proto] +// [block proto]. The block proto carries header + payload (txs included), +// matching the WAL format. Transactions is left nil — BlockData is fully +// self-describing for today's BlockByHash read path; we'll switch to indexed +// per-tx storage when GetTransactionByHash gets a real consumer. +func encodeBinaryBlock(gb *atypes.GlobalBlock) *block.BinaryBlock { + tsBytes := atypes.TimeConv.Marshal(gb.Timestamp) + blkBytes := protoutils.Marshal(&pb.Block{ + Header: atypes.BlockHeaderConv.Encode(gb.Header), + Payload: atypes.PayloadConv.Encode(gb.Payload), + }) + out := make([]byte, 4+len(tsBytes)+len(blkBytes)) + binary.LittleEndian.PutUint32(out[:4], uint32(len(tsBytes))) //nolint:gosec // tsBytes is a small proto Timestamp. + copy(out[4:], tsBytes) + copy(out[4+len(tsBytes):], blkBytes) + hash := gb.Header.Hash() + return &block.BinaryBlock{ + Height: uint64(gb.GlobalNumber), + Hash: hash.Bytes(), + BlockData: out, + } +} + +// decodeBinaryBlock reconstructs a GlobalBlock from BlockDB's BinaryBlock +// shape produced by encodeBinaryBlock. FinalAppState is left None — the +// BlockByHash read path doesn't read it (translateGlobalBlock ignores it), +// and we don't want to wire AppProposal serialization through BlockDB until +// there's a consumer. +func decodeBinaryBlock(bb *block.BinaryBlock) (*atypes.GlobalBlock, error) { + if len(bb.BlockData) < 4 { + return nil, fmt.Errorf("block data too short: %d bytes", len(bb.BlockData)) + } + tsLen := binary.LittleEndian.Uint32(bb.BlockData[:4]) + if uint64(len(bb.BlockData)) < uint64(4+tsLen) { + return nil, fmt.Errorf("block data truncated: have %d, need >=%d", len(bb.BlockData), 4+tsLen) + } + ts, err := atypes.TimeConv.Unmarshal(bb.BlockData[4 : 4+tsLen]) + if err != nil { + return nil, fmt.Errorf("decode timestamp: %w", err) + } + b, err := atypes.BlockConv.Unmarshal(bb.BlockData[4+tsLen:]) + if err != nil { + return nil, fmt.Errorf("decode block: %w", err) + } + return &atypes.GlobalBlock{ + Header: b.Header(), + Timestamp: ts, + GlobalNumber: atypes.GlobalBlockNumber(bb.Height), + Payload: b.Payload(), + FinalAppState: utils.None[*atypes.AppProposal](), + }, nil +} diff --git a/sei-tendermint/internal/p2p/giga_router.go b/sei-tendermint/internal/p2p/giga_router.go index 5434979ab0..1582c7aadf 100644 --- a/sei-tendermint/internal/p2p/giga_router.go +++ b/sei-tendermint/internal/p2p/giga_router.go @@ -8,6 +8,8 @@ import ( "slices" "time" + "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block" + memblockdb "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block/mem_block_db" abci "github.com/sei-protocol/sei-chain/sei-tendermint/abci/types" "github.com/sei-protocol/sei-chain/sei-tendermint/crypto" "github.com/sei-protocol/sei-chain/sei-tendermint/internal/autobahn/consensus" @@ -53,6 +55,14 @@ type GigaRouter struct { service *giga.Service poolIn *giga.Pool[NodePublicKey, rpc.Server[giga.API]] poolOut *giga.Pool[NodePublicKey, rpc.Client[giga.API]] + // blockDB indexes finalized blocks by hash. Populated synchronously by + // runExecute right before each block is handed to executeBlock; read by + // BlockByHash. Today's instance is mem_block_db (in-memory), so it does + // not survive process restarts — but neither does data.State's prior + // hash index, and the read path is best-effort (CometBFT semantics for + // unknown hash is &ResultBlock{Block: nil}). Restart-safe repopulation + // belongs to a follow-up that wires a persistent BlockDB. + blockDB block.BlockDB // lastCommitQCRecv is subscribed once at construction and reused for the // lifetime of the GigaRouter. Load() is lock-free (a single @@ -103,6 +113,7 @@ func NewGigaRouter(cfg *GigaRouterConfig, key NodeSecretKey) (*GigaRouter, error service: giga.NewService(consensusState), poolIn: giga.NewPool[NodePublicKey, rpc.Server[giga.API]](), poolOut: giga.NewPool[NodePublicKey, rpc.Client[giga.API]](), + blockDB: memblockdb.NewMemBlockDB(), // Subscribe once here (takes avail's internal lock once); subsequent // Load() calls from RPC handlers are lock-free atomic pointer reads. @@ -146,14 +157,6 @@ func (r *GigaRouter) MaxGasPerBlock() int64 { // evmrpc does not read them on the receipt path. If gb.Header is nil // BlockID.Hash also stays empty; if gb.Payload is nil Block.Data.Txs // stays empty (see the malformed-block handling below). -// -// TODO(autobahn): switch this to read from sei-db/ledger_db/block.BlockDB -// once a writer is wired (e.g. from app.FinalizeBlocker or executeBlock). -// Today no production code calls BlockDB.WriteBlock, so Autobahn's in-memory -// data.State is the only place a full block lives — but it's pruned per -// Sei's RetainHeight and exposes only a height index (no GetBlockByHash). -// BlockDB has the right shape (height + hash indexes, async pruning) and -// is the long-term home for this read path. func (r *GigaRouter) BlockByNumber(ctx context.Context, n atypes.GlobalBlockNumber) (*coretypes.ResultBlock, error) { gb, err := r.data.GlobalBlock(ctx, n) if err != nil { @@ -175,29 +178,24 @@ func (r *GigaRouter) BlockByNumber(ctx context.Context, n atypes.GlobalBlockNumb // (same translation as BlockByNumber). Matches CometBFT semantics for // unknown hashes: returns &ResultBlock{Block: nil} with no error. // -// Lookup-and-construct happens under a single data.State lock acquire, so -// the returned block matches the requested hash atomically. Hashes below -// the pruning watermark are not indexed and read as "unknown". Wrong-size -// inputs are rejected at the call site (env.BlockByHash) so this method -// can stay strongly typed on atypes.BlockHeaderHash. -// -// TODO(autobahn): replace this with a direct read from -// sei-db/ledger_db/block.BlockDB.GetBlockByHash once a writer is wired into -// block execution. The data.State-side index can also go away at that point. +// Reads from sei-db/ledger_db/block.BlockDB, which runExecute populates +// just before each block is handed to the app. Blocks finalized but not +// yet started executing are not yet indexed and read as "unknown" — same +// shape CometBFT returns for an unknown hash. Wrong-size inputs are +// rejected at the call site (env.BlockByHash) so this method can stay +// strongly typed on atypes.BlockHeaderHash. func (r *GigaRouter) BlockByHash(ctx context.Context, hash atypes.BlockHeaderHash) (*coretypes.ResultBlock, error) { - opt, err := r.data.GlobalBlockByHash(hash) + bb, ok, err := r.blockDB.GetBlockByHash(ctx, hash.Bytes()) if err != nil { - return nil, fmt.Errorf("data.GlobalBlockByHash: %w", err) + return nil, fmt.Errorf("blockDB.GetBlockByHash: %w", err) } - // Reject the unknown-hash case here so translateGlobalBlock can rely - // on the *GlobalBlock type contract (non-nil, with non-nil Header - // and Payload) — same way executeBlock dereferences b.Header - // without checking. Mirrors CometBFT's BlockStore.LoadBlockByHash - // returning &ResultBlock{Block: nil} for an unknown hash. - gb, ok := opt.Get() if !ok { return &coretypes.ResultBlock{}, nil } + gb, err := decodeBinaryBlock(bb) + if err != nil { + return nil, fmt.Errorf("decodeBinaryBlock: %w", err) + } return r.translateGlobalBlock(gb), nil } @@ -359,6 +357,14 @@ func (r *GigaRouter) runExecute(ctx context.Context) error { if err != nil { return fmt.Errorf("r.data.GlobalBlock(%v): %w", n, err) } + // Persist to BlockDB before execution. WriteBlock provides + // read-your-writes within this process, so any concurrent RPC + // BlockByHash sees the block from this point forward. The data + // layer's WAL remains the primary durability story; BlockDB is the + // hash index, not the source of truth on restart. + if err := r.blockDB.WriteBlock(ctx, encodeBinaryBlock(b)); err != nil { + return fmt.Errorf("r.blockDB.WriteBlock(%v): %w", n, err) + } commitResp, err := r.executeBlock(ctx, b) if err != nil { return fmt.Errorf("r.executeBlock(%v): %w", n, err) From 2d80ad182cae8067f9e8d830888fc95f0ea6ca22 Mon Sep 17 00:00:00 2001 From: Wen Date: Thu, 7 May 2026 14:09:56 -0700 Subject: [PATCH 2/9] sei-db: type BlockDB on Block/Transaction interfaces (CON-256) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replaces the BinaryBlock/BinaryTransaction byte-container types with Block/Transaction interfaces (Hash/Height/Time/Transactions and Hash/Bytes). mem_block_db stores the interface directly, so the tx-by-hash index works naturally without serialization. blocksim and the cross-backend tests get small synthetic implementations of the interfaces. On the giga side, the proto encode/decode helpers are gone — replaced with a globalBlockAdapter that wraps *atypes.GlobalBlock. Per-tx hashes use tmhash.Sum (sha256), matching CometBFT's tx-hash convention. The single translateBlock now serves both BlockByNumber (data.State path, wrapped via the adapter) and BlockByHash (BlockDB path), so the read path emits the same shape regardless of source. Co-Authored-By: Claude Opus 4.7 (1M context) --- sei-db/ledger_db/block/block_db.go | 48 +++--- .../block/block_db_test/block_db_test.go | 154 +++++++++--------- .../block/blocksim/block_generator.go | 55 +++++-- sei-db/ledger_db/block/blocksim/blocksim.go | 19 ++- .../block/mem_block_db/mem_block_db.go | 47 +++--- .../block/mem_block_db/mem_block_db_test.go | 40 +++-- sei-tendermint/internal/p2p/giga_blockdb.go | 86 ++++------ sei-tendermint/internal/p2p/giga_router.go | 42 ++--- 8 files changed, 256 insertions(+), 235 deletions(-) diff --git a/sei-db/ledger_db/block/block_db.go b/sei-db/ledger_db/block/block_db.go index 564e97b7e8..4a2f670d3a 100644 --- a/sei-db/ledger_db/block/block_db.go +++ b/sei-db/ledger_db/block/block_db.go @@ -3,33 +3,39 @@ package block import ( "context" "errors" + "time" ) // ErrNoBlocks is returned by GetLowestBlockHeight and GetHighestBlockHeight // when the database contains no blocks. var ErrNoBlocks = errors.New("block db: no blocks") -// A binary transaction with its hash. -type BinaryTransaction struct { - // The hash of the transaction. - Hash []byte - // The binary transaction data. - Transaction []byte +// Transaction is the BlockDB's view of a single transaction inside a block: +// its hash plus its raw bytes. Implementations are expected to be cheap +// (typically just a struct returning pre-computed fields). +type Transaction interface { + // Hash returns the canonical transaction hash used for indexing. + Hash() []byte + // Bytes returns the raw, on-the-wire transaction bytes. + Bytes() []byte } -// A binary block with its transactions and hash. -type BinaryBlock struct { - // The height of the block. Must be unique. - Height uint64 - // The hash of the block. Must be unique. - Hash []byte - // The binary block data, not including transaction data (unless you are ok with wasting space) - BlockData []byte - // The transactions in the block and their hashes. - Transactions []*BinaryTransaction +// Block is the BlockDB's view of a finalized block. The interface intentionally +// exposes only what BlockDB itself needs to index and serve reads — backends +// must not assume any particular concrete implementation. Methods returning +// slices may allocate; callers that index repeatedly should cache the result. +type Block interface { + // Hash returns the canonical block hash used for indexing. + Hash() []byte + // Height returns the block height (used as the key for the height index). + Height() uint64 + // Time returns the block timestamp. + Time() time.Time + // Transactions returns the block's transactions in order. + Transactions() []Transaction } -// A database for storing binary block and transaction data. +// A database for storing finalized block and transaction data. // // This store is fully threadsafe. All writes are atomic (that is, after a crash you will either see the write or // you will not see it at all, i.e. partial writes are not possible). Multiple writes are not atomic with respect @@ -41,7 +47,7 @@ type BlockDB interface { // // This method may return immediately and does not necessarily wait for the block to be written to disk. // Call Flush() if you need to wait until the block is written to disk. - WriteBlock(ctx context.Context, block *BinaryBlock) error + WriteBlock(ctx context.Context, block Block) error // Blocks until all pending writes are flushed to disk. Any call to WriteBlock issued before calling Flush() // will be crash-durable after Flush() returns. Calls to WriteBlock() made concurrently with Flush() may or @@ -53,13 +59,13 @@ type BlockDB interface { Flush(ctx context.Context) error // Retrieves a block by its hash. - GetBlockByHash(ctx context.Context, hash []byte) (block *BinaryBlock, ok bool, err error) + GetBlockByHash(ctx context.Context, hash []byte) (block Block, ok bool, err error) // Retrieves a block by its height. - GetBlockByHeight(ctx context.Context, height uint64) (block *BinaryBlock, ok bool, err error) + GetBlockByHeight(ctx context.Context, height uint64) (block Block, ok bool, err error) // Retrieves a transaction by its hash. - GetTransactionByHash(ctx context.Context, hash []byte) (transaction *BinaryTransaction, ok bool, err error) + GetTransactionByHash(ctx context.Context, hash []byte) (transaction Transaction, ok bool, err error) // Schedules pruning for all blocks with a height less than the given height. Pruning is asynchronous, // and so this method does not provide any guarantees about when the pruning will complete. It is possible diff --git a/sei-db/ledger_db/block/block_db_test/block_db_test.go b/sei-db/ledger_db/block/block_db_test/block_db_test.go index ed0ae33f9d..b014468431 100644 --- a/sei-db/ledger_db/block/block_db_test/block_db_test.go +++ b/sei-db/ledger_db/block/block_db_test/block_db_test.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "testing" + "time" crand "github.com/sei-protocol/sei-chain/sei-db/common/rand" "github.com/sei-protocol/sei-chain/sei-db/common/unit" @@ -35,19 +36,38 @@ func newMemBlockDBBuilder() blockDBBuilder { } } -func makeBlock(height uint64, numTxs int) *block.BinaryBlock { - txs := make([]*block.BinaryTransaction, numTxs) +type testTx struct { + hash []byte + bytes []byte +} + +func (t *testTx) Hash() []byte { return t.hash } +func (t *testTx) Bytes() []byte { return t.bytes } + +type testBlock struct { + hash []byte + height uint64 + time time.Time + txs []block.Transaction +} + +func (b *testBlock) Hash() []byte { return b.hash } +func (b *testBlock) Height() uint64 { return b.height } +func (b *testBlock) Time() time.Time { return b.time } +func (b *testBlock) Transactions() []block.Transaction { return b.txs } + +func makeBlock(height uint64, numTxs int) *testBlock { + txs := make([]block.Transaction, numTxs) for i := 0; i < numTxs; i++ { - txs[i] = &block.BinaryTransaction{ - Hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), - Transaction: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), + txs[i] = &testTx{ + hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), + bytes: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), } } - return &block.BinaryBlock{ - Height: height, - Hash: []byte(fmt.Sprintf("block-%d", height)), - BlockData: []byte(fmt.Sprintf("block-data-%d", height)), - Transactions: txs, + return &testBlock{ + hash: []byte(fmt.Sprintf("block-%d", height)), + height: height, + txs: txs, } } @@ -86,7 +106,7 @@ func TestWriteAndGetBlockByHash(t *testing.T) { blk := makeBlock(5, 3) requireNoError(t, db.WriteBlock(ctx, blk)) - got, ok, err := db.GetBlockByHash(ctx, blk.Hash) + got, ok, err := db.GetBlockByHash(ctx, blk.Hash()) requireNoError(t, err) requireTrue(t, ok, "expected block with matching hash") requireBlockEqual(t, blk, got) @@ -103,12 +123,12 @@ func TestGetTransactionByHash(t *testing.T) { blk := makeBlock(1, 4) requireNoError(t, db.WriteBlock(ctx, blk)) - for _, tx := range blk.Transactions { - got, ok, err := db.GetTransactionByHash(ctx, tx.Hash) + for _, tx := range blk.Transactions() { + got, ok, err := db.GetTransactionByHash(ctx, tx.Hash()) requireNoError(t, err) - requireTrue(t, ok, "expected transaction with hash %s", tx.Hash) - requireBytesEqual(t, tx.Hash, got.Hash, "transaction hash") - requireBytesEqual(t, tx.Transaction, got.Transaction, "transaction data") + requireTrue(t, ok, "expected transaction with hash %s", tx.Hash()) + requireBytesEqual(t, tx.Hash(), got.Hash(), "transaction hash") + requireBytesEqual(t, tx.Bytes(), got.Bytes(), "transaction data") } }) } @@ -150,16 +170,16 @@ func TestMultipleBlocks(t *testing.T) { requireNoError(t, err) defer db.Close(ctx) - blocks := make([]*block.BinaryBlock, 10) + blocks := make([]*testBlock, 10) for i := range blocks { blocks[i] = makeBlock(uint64(i+1), 2) requireNoError(t, db.WriteBlock(ctx, blocks[i])) } for _, blk := range blocks { - got, ok, err := db.GetBlockByHeight(ctx, blk.Height) + got, ok, err := db.GetBlockByHeight(ctx, blk.Height()) requireNoError(t, err) - requireTrue(t, ok, "expected block at height %d", blk.Height) + requireTrue(t, ok, "expected block at height %d", blk.Height()) requireBlockEqual(t, blk, got) } }) @@ -201,10 +221,10 @@ func TestPrunePreservesUnprunedTransactions(t *testing.T) { requireNoError(t, db.Flush(ctx)) requireNoError(t, db.Prune(ctx, 2)) - for _, tx := range survivingBlock.Transactions { - _, ok, err := db.GetTransactionByHash(ctx, tx.Hash) + for _, tx := range survivingBlock.Transactions() { + _, ok, err := db.GetTransactionByHash(ctx, tx.Hash()) requireNoError(t, err) - requireTrue(t, ok, "expected transaction %s to survive pruning", tx.Hash) + requireTrue(t, ok, "expected transaction %s to survive pruning", tx.Hash()) } }) } @@ -249,11 +269,11 @@ func TestCloseAndReopen(t *testing.T) { requireTrue(t, ok, "expected block to survive close/reopen") requireBlockEqual(t, blk, got) - for _, tx := range blk.Transactions { - gotTx, ok, err := db2.GetTransactionByHash(ctx, tx.Hash) + for _, tx := range blk.Transactions() { + gotTx, ok, err := db2.GetTransactionByHash(ctx, tx.Hash()) requireNoError(t, err) requireTrue(t, ok, "expected tx to survive close/reopen") - requireBytesEqual(t, tx.Transaction, gotTx.Transaction, "transaction data") + requireBytesEqual(t, tx.Bytes(), gotTx.Bytes(), "transaction data") } }) } @@ -307,7 +327,7 @@ func TestBulkWriteAndQuery(t *testing.T) { requireNoError(t, err) defer db.Close(ctx) - blocks := make([]*block.BinaryBlock, numBlocks) + blocks := make([]*testBlock, numBlocks) for i := range blocks { blocks[i] = makeRandomBlock(testRng, uint64(i+1), txsPerBlock) requireNoError(t, db.WriteBlock(ctx, blocks[i])) @@ -316,22 +336,22 @@ func TestBulkWriteAndQuery(t *testing.T) { requireNoError(t, db.Flush(ctx)) for _, expected := range blocks { - byHeight, ok, err := db.GetBlockByHeight(ctx, expected.Height) + byHeight, ok, err := db.GetBlockByHeight(ctx, expected.Height()) requireNoError(t, err) - requireTrue(t, ok, "block not found by height %d", expected.Height) - requireBlockBytesEqual(t, expected, byHeight) + requireTrue(t, ok, "block not found by height %d", expected.Height()) + requireBlockEqual(t, expected, byHeight) - byHash, ok, err := db.GetBlockByHash(ctx, expected.Hash) + byHash, ok, err := db.GetBlockByHash(ctx, expected.Hash()) requireNoError(t, err) - requireTrue(t, ok, "block not found by hash at height %d", expected.Height) - requireBlockBytesEqual(t, expected, byHash) + requireTrue(t, ok, "block not found by hash at height %d", expected.Height()) + requireBlockEqual(t, expected, byHash) - for _, expectedTx := range expected.Transactions { - gotTx, ok, err := db.GetTransactionByHash(ctx, expectedTx.Hash) + for _, expectedTx := range expected.Transactions() { + gotTx, ok, err := db.GetTransactionByHash(ctx, expectedTx.Hash()) requireNoError(t, err) - requireTrue(t, ok, "tx not found by hash %x (block height %d)", expectedTx.Hash, expected.Height) - requireBytesEqual(t, expectedTx.Hash, gotTx.Hash, "tx hash") - requireBytesEqual(t, expectedTx.Transaction, gotTx.Transaction, "tx data") + requireTrue(t, ok, "tx not found by hash %x (block height %d)", expectedTx.Hash(), expected.Height()) + requireBytesEqual(t, expectedTx.Hash(), gotTx.Hash(), "tx hash") + requireBytesEqual(t, expectedTx.Bytes(), gotTx.Bytes(), "tx data") } } }) @@ -339,24 +359,20 @@ func TestBulkWriteAndQuery(t *testing.T) { // makeRandomBlock builds a block with deterministic random binary payloads. // Returned slices are owned copies safe for storage and later comparison. -func makeRandomBlock(rng *crand.CannedRandom, height uint64, numTxs int) *block.BinaryBlock { - txs := make([]*block.BinaryTransaction, numTxs) +func makeRandomBlock(rng *crand.CannedRandom, height uint64, numTxs int) *testBlock { + txs := make([]block.Transaction, numTxs) for i := range txs { txHash := rng.Address('t', int64(height)*1000+int64(i), 32) txDataLen := 64 + int(rng.Int64Range(0, 512)) txData := copyBytes(rng.Bytes(txDataLen)) - txs[i] = &block.BinaryTransaction{Hash: txHash, Transaction: txData} + txs[i] = &testTx{hash: txHash, bytes: txData} } blockHash := rng.Address('b', int64(height), 32) - blockDataLen := 128 + int(rng.Int64Range(0, 1024)) - blockData := copyBytes(rng.Bytes(blockDataLen)) - - return &block.BinaryBlock{ - Height: height, - Hash: blockHash, - BlockData: blockData, - Transactions: txs, + return &testBlock{ + hash: blockHash, + height: height, + txs: txs, } } @@ -366,26 +382,6 @@ func copyBytes(src []byte) []byte { return dst } -// requireBlockBytesEqual does a deep byte-level comparison, suitable for verifying -// round-trip fidelity through serialization. -func requireBlockBytesEqual(t *testing.T, expected, actual *block.BinaryBlock) { - t.Helper() - if expected.Height != actual.Height { - t.Fatalf("height mismatch: expected %d, got %d", expected.Height, actual.Height) - } - requireBytesEqual(t, expected.Hash, actual.Hash, "block hash") - requireBytesEqual(t, expected.BlockData, actual.BlockData, "block data") - if len(expected.Transactions) != len(actual.Transactions) { - t.Fatalf("transaction count mismatch at height %d: expected %d, got %d", - expected.Height, len(expected.Transactions), len(actual.Transactions)) - } - for i, tx := range expected.Transactions { - label := fmt.Sprintf("height %d tx[%d]", expected.Height, i) - requireBytesEqual(t, tx.Hash, actual.Transactions[i].Hash, label+" hash") - requireBytesEqual(t, tx.Transaction, actual.Transactions[i].Transaction, label+" data") - } -} - // --- test helpers --- func requireNoError(t *testing.T, err error) { @@ -409,19 +405,19 @@ func requireBytesEqual(t *testing.T, expected, actual []byte, label string) { } } -func requireBlockEqual(t *testing.T, expected, actual *block.BinaryBlock) { +func requireBlockEqual(t *testing.T, expected, actual block.Block) { t.Helper() - if expected.Height != actual.Height { - t.Fatalf("height mismatch: expected %d, got %d", expected.Height, actual.Height) + if expected.Height() != actual.Height() { + t.Fatalf("height mismatch: expected %d, got %d", expected.Height(), actual.Height()) } - requireBytesEqual(t, expected.Hash, actual.Hash, "block hash") - requireBytesEqual(t, expected.BlockData, actual.BlockData, "block data") - if len(expected.Transactions) != len(actual.Transactions) { - t.Fatalf("transaction count mismatch: expected %d, got %d", - len(expected.Transactions), len(actual.Transactions)) + requireBytesEqual(t, expected.Hash(), actual.Hash(), "block hash") + expTxs := expected.Transactions() + actTxs := actual.Transactions() + if len(expTxs) != len(actTxs) { + t.Fatalf("transaction count mismatch: expected %d, got %d", len(expTxs), len(actTxs)) } - for i, tx := range expected.Transactions { - requireBytesEqual(t, tx.Hash, actual.Transactions[i].Hash, fmt.Sprintf("tx[%d] hash", i)) - requireBytesEqual(t, tx.Transaction, actual.Transactions[i].Transaction, fmt.Sprintf("tx[%d] data", i)) + for i, tx := range expTxs { + requireBytesEqual(t, tx.Hash(), actTxs[i].Hash(), fmt.Sprintf("tx[%d] hash", i)) + requireBytesEqual(t, tx.Bytes(), actTxs[i].Bytes(), fmt.Sprintf("tx[%d] data", i)) } } diff --git a/sei-db/ledger_db/block/blocksim/block_generator.go b/sei-db/ledger_db/block/blocksim/block_generator.go index cac1373233..b08b6de190 100644 --- a/sei-db/ledger_db/block/blocksim/block_generator.go +++ b/sei-db/ledger_db/block/blocksim/block_generator.go @@ -2,6 +2,7 @@ package blocksim import ( "context" + "time" "github.com/sei-protocol/sei-chain/sei-db/common/rand" "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block" @@ -12,6 +13,32 @@ const ( txHashType = 't' ) +// genTx is a synthetic transaction that satisfies block.Transaction. +type genTx struct { + hash []byte + bytes []byte +} + +func (t *genTx) Hash() []byte { return t.hash } +func (t *genTx) Bytes() []byte { return t.bytes } + +// genBlock is a synthetic block that satisfies block.Block. extra is held to +// simulate block-level metadata bytes — the BlockDB contract has no field for +// it, but the bytes still occupy memory (and serialized space, for backends +// that materialize the whole Block). +type genBlock struct { + hash []byte + height uint64 + time time.Time + txs []block.Transaction + extra []byte +} + +func (b *genBlock) Hash() []byte { return b.hash } +func (b *genBlock) Height() uint64 { return b.height } +func (b *genBlock) Time() time.Time { return b.time } +func (b *genBlock) Transactions() []block.Transaction { return b.txs } + // Asynchronously generates random blocks and feeds them into a channel. type BlockGenerator struct { ctx context.Context @@ -22,7 +49,7 @@ type BlockGenerator struct { nextHeight uint64 // Generated blocks are sent to this channel. - blocksChan chan *block.BinaryBlock + blocksChan chan *genBlock } // Creates a new BlockGenerator and immediately starts its background goroutine. @@ -38,7 +65,7 @@ func NewBlockGenerator( config: config, rand: rng, nextHeight: startHeight, - blocksChan: make(chan *block.BinaryBlock, config.StagedBlockQueueSize), + blocksChan: make(chan *genBlock, config.StagedBlockQueueSize), } go g.mainLoop() return g @@ -46,7 +73,7 @@ func NewBlockGenerator( // NextBlock blocks until the next generated block is available and returns it. // Returns nil if the context has been cancelled and no more blocks will be produced. -func (g *BlockGenerator) NextBlock() *block.BinaryBlock { +func (g *BlockGenerator) NextBlock() *genBlock { select { case <-g.ctx.Done(): return nil @@ -66,26 +93,26 @@ func (g *BlockGenerator) mainLoop() { } } -func (g *BlockGenerator) buildBlock() *block.BinaryBlock { +func (g *BlockGenerator) buildBlock() *genBlock { height := g.nextHeight g.nextHeight++ - txs := make([]*block.BinaryTransaction, g.config.TransactionsPerBlock) + txs := make([]block.Transaction, g.config.TransactionsPerBlock) for i := uint64(0); i < g.config.TransactionsPerBlock; i++ { txID := int64(height)*int64(g.config.TransactionsPerBlock) + int64(i) //nolint:gosec - txs[i] = &block.BinaryTransaction{ - Hash: g.rand.Address(txHashType, txID, int(g.config.TransactionHashSize)), //nolint:gosec - Transaction: g.rand.Bytes(int(g.config.BytesPerTransaction)), //nolint:gosec + txs[i] = &genTx{ + hash: g.rand.Address(txHashType, txID, int(g.config.TransactionHashSize)), //nolint:gosec + bytes: g.rand.Bytes(int(g.config.BytesPerTransaction)), //nolint:gosec } } blockHash := g.rand.Address(blockHashType, int64(height), int(g.config.BlockHashSize)) //nolint:gosec - blockData := g.rand.Bytes(int(g.config.ExtraBytesPerBlock)) //nolint:gosec + extra := g.rand.Bytes(int(g.config.ExtraBytesPerBlock)) //nolint:gosec - return &block.BinaryBlock{ - Height: height, - Hash: blockHash, - BlockData: blockData, - Transactions: txs, + return &genBlock{ + hash: blockHash, + height: height, + txs: txs, + extra: extra, } } diff --git a/sei-db/ledger_db/block/blocksim/blocksim.go b/sei-db/ledger_db/block/blocksim/blocksim.go index 42bd3f9ff9..750b31a26f 100644 --- a/sei-db/ledger_db/block/blocksim/blocksim.go +++ b/sei-db/ledger_db/block/blocksim/blocksim.go @@ -164,23 +164,24 @@ func (b *BlockSim) maybeThrottle() { } } -func (b *BlockSim) handleNextBlock(blk *block.BinaryBlock) { +func (b *BlockSim) handleNextBlock(blk *genBlock) { b.metrics.SetMainThreadPhase("write_block") if err := b.db.WriteBlock(b.ctx, blk); err != nil { - fmt.Printf("failed to write block %d: %v\n", blk.Height, err) + fmt.Printf("failed to write block %d: %v\n", blk.Height(), err) b.cancel() return } - txCount := int64(len(blk.Transactions)) - blockBytes := int64(len(blk.Hash) + len(blk.BlockData)) - for _, tx := range blk.Transactions { - blockBytes += int64(len(tx.Hash) + len(tx.Transaction)) + txs := blk.Transactions() + txCount := int64(len(txs)) + blockBytes := int64(len(blk.Hash()) + len(blk.extra)) + for _, tx := range txs { + blockBytes += int64(len(tx.Hash()) + len(tx.Bytes())) } b.totalBlocksWritten++ b.totalTransactionsWritten += txCount b.totalBytesWritten += blockBytes - b.highestBlockHeight = blk.Height + b.highestBlockHeight = blk.Height() b.metrics.ReportBlockWritten(txCount, blockBytes) // Periodic flush. @@ -195,9 +196,9 @@ func (b *BlockSim) handleNextBlock(blk *block.BinaryBlock) { } // Periodic prune. - if blk.Height > 0 && blk.Height%b.config.PruneIntervalBlocks == 0 { + if blk.Height() > 0 && blk.Height()%b.config.PruneIntervalBlocks == 0 { b.metrics.SetMainThreadPhase("prune") - lowestToKeep := blk.Height - b.config.UnprunedBlocks + lowestToKeep := blk.Height() - b.config.UnprunedBlocks if err := b.db.Prune(b.ctx, lowestToKeep); err != nil { fmt.Printf("failed to prune: %v\n", err) b.cancel() diff --git a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go index 2e32d6fcd4..acfbfe6e0d 100644 --- a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go +++ b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go @@ -10,9 +10,9 @@ import ( // Shared backing store, keyed by path in test builders to simulate restarts. type memBlockDBData struct { mu sync.RWMutex - blocksByHash map[string]*block.BinaryBlock - blocksByHeight map[uint64]*block.BinaryBlock - txByHash map[string]*block.BinaryTransaction + blocksByHash map[string]block.Block + blocksByHeight map[uint64]block.Block + txByHash map[string]block.Transaction lowestHeight uint64 highestHeight uint64 hasBlocks bool @@ -28,34 +28,35 @@ type memBlockDB struct { func NewMemBlockDB() block.BlockDB { return &memBlockDB{ data: &memBlockDBData{ - blocksByHash: make(map[string]*block.BinaryBlock), - blocksByHeight: make(map[uint64]*block.BinaryBlock), - txByHash: make(map[string]*block.BinaryTransaction), + blocksByHash: make(map[string]block.Block), + blocksByHeight: make(map[uint64]block.Block), + txByHash: make(map[string]block.Transaction), }, } } -func (m *memBlockDB) WriteBlock(_ context.Context, blk *block.BinaryBlock) error { +func (m *memBlockDB) WriteBlock(_ context.Context, blk block.Block) error { d := m.data d.mu.Lock() defer d.mu.Unlock() - d.blocksByHash[string(blk.Hash)] = blk - d.blocksByHeight[blk.Height] = blk - for _, tx := range blk.Transactions { - d.txByHash[string(tx.Hash)] = tx + height := blk.Height() + d.blocksByHash[string(blk.Hash())] = blk + d.blocksByHeight[height] = blk + for _, tx := range blk.Transactions() { + d.txByHash[string(tx.Hash())] = tx } if !d.hasBlocks { - d.lowestHeight = blk.Height - d.highestHeight = blk.Height + d.lowestHeight = height + d.highestHeight = height d.hasBlocks = true } else { - if blk.Height < d.lowestHeight { - d.lowestHeight = blk.Height + if height < d.lowestHeight { + d.lowestHeight = height } - if blk.Height > d.highestHeight { - d.highestHeight = blk.Height + if height > d.highestHeight { + d.highestHeight = height } } return nil @@ -65,7 +66,7 @@ func (m *memBlockDB) Flush(_ context.Context) error { return nil } -func (m *memBlockDB) GetBlockByHash(_ context.Context, hash []byte) (*block.BinaryBlock, bool, error) { +func (m *memBlockDB) GetBlockByHash(_ context.Context, hash []byte) (block.Block, bool, error) { d := m.data d.mu.RLock() defer d.mu.RUnlock() @@ -74,7 +75,7 @@ func (m *memBlockDB) GetBlockByHash(_ context.Context, hash []byte) (*block.Bina return blk, ok, nil } -func (m *memBlockDB) GetBlockByHeight(_ context.Context, height uint64) (*block.BinaryBlock, bool, error) { +func (m *memBlockDB) GetBlockByHeight(_ context.Context, height uint64) (block.Block, bool, error) { d := m.data d.mu.RLock() defer d.mu.RUnlock() @@ -83,7 +84,7 @@ func (m *memBlockDB) GetBlockByHeight(_ context.Context, height uint64) (*block. return blk, ok, nil } -func (m *memBlockDB) GetTransactionByHash(_ context.Context, hash []byte) (*block.BinaryTransaction, bool, error) { +func (m *memBlockDB) GetTransactionByHash(_ context.Context, hash []byte) (block.Transaction, bool, error) { d := m.data d.mu.RLock() defer d.mu.RUnlock() @@ -107,9 +108,9 @@ func (m *memBlockDB) Prune(_ context.Context, lowestHeightToKeep uint64) error { continue } delete(d.blocksByHeight, h) - delete(d.blocksByHash, string(blk.Hash)) - for _, tx := range blk.Transactions { - delete(d.txByHash, string(tx.Hash)) + delete(d.blocksByHash, string(blk.Hash())) + for _, tx := range blk.Transactions() { + delete(d.txByHash, string(tx.Hash())) } } diff --git a/sei-db/ledger_db/block/mem_block_db/mem_block_db_test.go b/sei-db/ledger_db/block/mem_block_db/mem_block_db_test.go index 2cf400a923..edec11cede 100644 --- a/sei-db/ledger_db/block/mem_block_db/mem_block_db_test.go +++ b/sei-db/ledger_db/block/mem_block_db/mem_block_db_test.go @@ -4,23 +4,43 @@ import ( "context" "fmt" "testing" + "time" "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block" ) -func makeBlock(height uint64, numTxs int) *block.BinaryBlock { - txs := make([]*block.BinaryTransaction, numTxs) +type testTx struct { + hash []byte + bytes []byte +} + +func (t *testTx) Hash() []byte { return t.hash } +func (t *testTx) Bytes() []byte { return t.bytes } + +type testBlock struct { + hash []byte + height uint64 + time time.Time + txs []block.Transaction +} + +func (b *testBlock) Hash() []byte { return b.hash } +func (b *testBlock) Height() uint64 { return b.height } +func (b *testBlock) Time() time.Time { return b.time } +func (b *testBlock) Transactions() []block.Transaction { return b.txs } + +func makeBlock(height uint64, numTxs int) block.Block { + txs := make([]block.Transaction, numTxs) for i := 0; i < numTxs; i++ { - txs[i] = &block.BinaryTransaction{ - Hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), - Transaction: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), + txs[i] = &testTx{ + hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), + bytes: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), } } - return &block.BinaryBlock{ - Height: height, - Hash: []byte(fmt.Sprintf("block-%d", height)), - BlockData: []byte(fmt.Sprintf("block-data-%d", height)), - Transactions: txs, + return &testBlock{ + hash: []byte(fmt.Sprintf("block-%d", height)), + height: height, + txs: txs, } } diff --git a/sei-tendermint/internal/p2p/giga_blockdb.go b/sei-tendermint/internal/p2p/giga_blockdb.go index 3ddd5d89e2..7d56227a95 100644 --- a/sei-tendermint/internal/p2p/giga_blockdb.go +++ b/sei-tendermint/internal/p2p/giga_blockdb.go @@ -1,66 +1,44 @@ package p2p import ( - "encoding/binary" - "fmt" + "time" "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block" - "github.com/sei-protocol/sei-chain/sei-tendermint/internal/autobahn/pb" + "github.com/sei-protocol/sei-chain/sei-tendermint/crypto/tmhash" atypes "github.com/sei-protocol/sei-chain/sei-tendermint/internal/autobahn/types" - "github.com/sei-protocol/sei-chain/sei-tendermint/internal/protoutils" - "github.com/sei-protocol/sei-chain/sei-tendermint/libs/utils" ) -// encodeBinaryBlock packs a finalized GlobalBlock into BlockDB's BinaryBlock -// shape. Layout of BlockData: [4 LE timestamp-proto length][timestamp proto] -// [block proto]. The block proto carries header + payload (txs included), -// matching the WAL format. Transactions is left nil — BlockData is fully -// self-describing for today's BlockByHash read path; we'll switch to indexed -// per-tx storage when GetTransactionByHash gets a real consumer. -func encodeBinaryBlock(gb *atypes.GlobalBlock) *block.BinaryBlock { - tsBytes := atypes.TimeConv.Marshal(gb.Timestamp) - blkBytes := protoutils.Marshal(&pb.Block{ - Header: atypes.BlockHeaderConv.Encode(gb.Header), - Payload: atypes.PayloadConv.Encode(gb.Payload), - }) - out := make([]byte, 4+len(tsBytes)+len(blkBytes)) - binary.LittleEndian.PutUint32(out[:4], uint32(len(tsBytes))) //nolint:gosec // tsBytes is a small proto Timestamp. - copy(out[4:], tsBytes) - copy(out[4+len(tsBytes):], blkBytes) - hash := gb.Header.Hash() - return &block.BinaryBlock{ - Height: uint64(gb.GlobalNumber), - Hash: hash.Bytes(), - BlockData: out, - } +// globalBlockAdapter wraps *atypes.GlobalBlock so it satisfies block.Block +// without leaking sei-db into autobahn/types. Per-tx hashes use +// tmhash.Sum (sha256), matching CometBFT's tx-hash convention. +type globalBlockAdapter struct { + gb *atypes.GlobalBlock } -// decodeBinaryBlock reconstructs a GlobalBlock from BlockDB's BinaryBlock -// shape produced by encodeBinaryBlock. FinalAppState is left None — the -// BlockByHash read path doesn't read it (translateGlobalBlock ignores it), -// and we don't want to wire AppProposal serialization through BlockDB until -// there's a consumer. -func decodeBinaryBlock(bb *block.BinaryBlock) (*atypes.GlobalBlock, error) { - if len(bb.BlockData) < 4 { - return nil, fmt.Errorf("block data too short: %d bytes", len(bb.BlockData)) - } - tsLen := binary.LittleEndian.Uint32(bb.BlockData[:4]) - if uint64(len(bb.BlockData)) < uint64(4+tsLen) { - return nil, fmt.Errorf("block data truncated: have %d, need >=%d", len(bb.BlockData), 4+tsLen) - } - ts, err := atypes.TimeConv.Unmarshal(bb.BlockData[4 : 4+tsLen]) - if err != nil { - return nil, fmt.Errorf("decode timestamp: %w", err) - } - b, err := atypes.BlockConv.Unmarshal(bb.BlockData[4+tsLen:]) - if err != nil { - return nil, fmt.Errorf("decode block: %w", err) +func (a globalBlockAdapter) Hash() []byte { + h := a.gb.Header.Hash() + return h.Bytes() +} + +func (a globalBlockAdapter) Height() uint64 { return uint64(a.gb.GlobalNumber) } + +func (a globalBlockAdapter) Time() time.Time { return a.gb.Timestamp } + +func (a globalBlockAdapter) Transactions() []block.Transaction { + txs := a.gb.Payload.Txs() + out := make([]block.Transaction, len(txs)) + for i, tx := range txs { + out[i] = txAdapter{hash: tmhash.Sum(tx), bytes: tx} } - return &atypes.GlobalBlock{ - Header: b.Header(), - Timestamp: ts, - GlobalNumber: atypes.GlobalBlockNumber(bb.Height), - Payload: b.Payload(), - FinalAppState: utils.None[*atypes.AppProposal](), - }, nil + return out } + +// txAdapter wraps a single Autobahn tx + its CometBFT-style hash so it +// satisfies block.Transaction. +type txAdapter struct { + hash []byte + bytes []byte +} + +func (t txAdapter) Hash() []byte { return t.hash } +func (t txAdapter) Bytes() []byte { return t.bytes } diff --git a/sei-tendermint/internal/p2p/giga_router.go b/sei-tendermint/internal/p2p/giga_router.go index 1582c7aadf..1c4a934d0c 100644 --- a/sei-tendermint/internal/p2p/giga_router.go +++ b/sei-tendermint/internal/p2p/giga_router.go @@ -170,7 +170,7 @@ func (r *GigaRouter) BlockByNumber(ctx context.Context, n atypes.GlobalBlockNumb } return nil, fmt.Errorf("data.GlobalBlock(%v): %w", n, err) } - return r.translateGlobalBlock(gb), nil + return r.translateBlock(globalBlockAdapter{gb: gb}), nil } // BlockByHash returns the finalized global block keyed by Autobahn block- @@ -185,27 +185,21 @@ func (r *GigaRouter) BlockByNumber(ctx context.Context, n atypes.GlobalBlockNumb // rejected at the call site (env.BlockByHash) so this method can stay // strongly typed on atypes.BlockHeaderHash. func (r *GigaRouter) BlockByHash(ctx context.Context, hash atypes.BlockHeaderHash) (*coretypes.ResultBlock, error) { - bb, ok, err := r.blockDB.GetBlockByHash(ctx, hash.Bytes()) + b, ok, err := r.blockDB.GetBlockByHash(ctx, hash.Bytes()) if err != nil { return nil, fmt.Errorf("blockDB.GetBlockByHash: %w", err) } if !ok { return &coretypes.ResultBlock{}, nil } - gb, err := decodeBinaryBlock(bb) - if err != nil { - return nil, fmt.Errorf("decodeBinaryBlock: %w", err) - } - return r.translateGlobalBlock(gb), nil + return r.translateBlock(b), nil } -// translateGlobalBlock converts an Autobahn GlobalBlock to the CometBFT -// coretypes.ResultBlock shape used by env.Block / env.BlockByHash and -// downstream evmrpc consumers. Caller must pass a non-nil *GlobalBlock with -// non-nil Header and Payload — that's the contract data.State guarantees on -// a successful lookup, and matches how executeBlock dereferences b.Header -// without a nil-check on the same type. The "no such block" case is -// rejected at the BlockByHash call site before delegating here. +// translateBlock converts a block.Block into the CometBFT coretypes.ResultBlock +// shape used by env.Block / env.BlockByHash and downstream evmrpc consumers. +// Both BlockByNumber (data.State path, wrapped via globalBlockAdapter) and +// BlockByHash (BlockDB path) feed through here so the read path always emits +// the same shape regardless of source. // // LastCommit is non-nil with empty Signatures, mirroring executeBlock's // FinalizeBlock call which passes an empty abci.CommitInfo. Under Autobahn @@ -215,23 +209,21 @@ func (r *GigaRouter) BlockByHash(ctx context.Context, hash atypes.BlockHeaderHas // counters and diverge from production. ToReqBeginBlock skips the per- // validator loop when Signatures is empty, so empty Votes flow into // distribution/slashing on both paths. -func (r *GigaRouter) translateGlobalBlock(gb *atypes.GlobalBlock) *coretypes.ResultBlock { - srcTxs := gb.Payload.Txs() +func (r *GigaRouter) translateBlock(b block.Block) *coretypes.ResultBlock { + srcTxs := b.Transactions() tmTxs := make(types.Txs, len(srcTxs)) for i, tx := range srcTxs { - tmTxs[i] = tx + tmTxs[i] = tx.Bytes() } - h := gb.Header.Hash() return &coretypes.ResultBlock{ - BlockID: types.BlockID{Hash: tmbytes.HexBytes(h.Bytes())}, + BlockID: types.BlockID{Hash: tmbytes.HexBytes(b.Hash())}, Block: &types.Block{ Header: types.Header{ ChainID: r.cfg.GenDoc.ChainID, - // Clamp accepts any constraints.Integer for From, so - // gb.GlobalNumber (a typed uint64) goes in directly — no - // intermediate uint64() conversion needed. - Height: utils.Clamp[int64](gb.GlobalNumber), - Time: gb.Timestamp, + // Clamp accepts any constraints.Integer for From, so the + // uint64 height goes in directly — no intermediate cast. + Height: utils.Clamp[int64](b.Height()), + Time: b.Time(), }, Data: types.Data{Txs: tmTxs}, LastCommit: &types.Commit{}, @@ -362,7 +354,7 @@ func (r *GigaRouter) runExecute(ctx context.Context) error { // BlockByHash sees the block from this point forward. The data // layer's WAL remains the primary durability story; BlockDB is the // hash index, not the source of truth on restart. - if err := r.blockDB.WriteBlock(ctx, encodeBinaryBlock(b)); err != nil { + if err := r.blockDB.WriteBlock(ctx, globalBlockAdapter{gb: b}); err != nil { return fmt.Errorf("r.blockDB.WriteBlock(%v): %w", n, err) } commitResp, err := r.executeBlock(ctx, b) From 62838978201bb9d9174de3ca6c2c1726f1ababd2 Mon Sep 17 00:00:00 2001 From: Wen Date: Thu, 7 May 2026 15:01:30 -0700 Subject: [PATCH 3/9] giga: persist tx execution results, route env.Tx through BlockDB (CON-256) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extends block.Transaction with Result/Height/Index. Result returns (bytes, ok) — consumers tell "executed" from "block written, results not yet attached" without nil-as-sentinel. New BlockDB.SetTransactionResults(blockHash, []Result) attaches per-tx results post-execution. mem_block_db keeps results in a separate map and wraps stored Transactions on read with composedTx that overrides Result(). Blocksim's synthetic genTx returns (nil, false); test fixtures get a parallel testTx + testResult. GigaRouter.executeBlock now returns the FinalizeBlock TxResults too; runExecute calls SetTransactionResults right after execution, wrapping each *abci.ExecTxResult in execResultAdapter (lazy Marshal). New GigaRouter.Tx returns a fully-translated coretypes.ResultTx — env.Tx delegates to it under giga, mirroring the BlockByHash routing pattern. giga_router_test now round-trips every tx in the latest block through GigaRouter.Tx, asserting hash/height/index/bytes faithfulness and that TxResult.Code lands as 0 (testApp returns CodeTypeOK). Co-Authored-By: Claude Opus 4.7 (1M context) --- sei-db/ledger_db/block/block_db.go | 48 +++++++++- .../block/block_db_test/block_db_test.go | 87 +++++++++++++++++-- .../block/blocksim/block_generator.go | 19 ++-- .../block/mem_block_db/mem_block_db.go | 47 +++++++++- .../block/mem_block_db/mem_block_db_test.go | 21 +++-- sei-tendermint/internal/p2p/giga_blockdb.go | 47 ++++++++-- sei-tendermint/internal/p2p/giga_router.go | 66 ++++++++++++-- .../internal/p2p/giga_router_test.go | 17 ++++ sei-tendermint/internal/rpc/core/tx.go | 7 ++ 9 files changed, 321 insertions(+), 38 deletions(-) diff --git a/sei-db/ledger_db/block/block_db.go b/sei-db/ledger_db/block/block_db.go index 4a2f670d3a..f818a5d841 100644 --- a/sei-db/ledger_db/block/block_db.go +++ b/sei-db/ledger_db/block/block_db.go @@ -10,14 +10,32 @@ import ( // when the database contains no blocks. var ErrNoBlocks = errors.New("block db: no blocks") +// ErrUnknownBlock is returned by SetTransactionResults when the referenced +// block hash has not been written to the database. +var ErrUnknownBlock = errors.New("block db: unknown block hash") + +// ErrResultCountMismatch is returned by SetTransactionResults when the supplied +// results slice doesn't match the number of transactions in the referenced block. +var ErrResultCountMismatch = errors.New("block db: result count does not match transaction count") + // Transaction is the BlockDB's view of a single transaction inside a block: -// its hash plus its raw bytes. Implementations are expected to be cheap -// (typically just a struct returning pre-computed fields). +// its hash, raw bytes, post-execution result, plus its position within the +// chain (height + index). Result returns ok=false until SetTransactionResults +// has been called for the parent block. type Transaction interface { // Hash returns the canonical transaction hash used for indexing. Hash() []byte // Bytes returns the raw, on-the-wire transaction bytes. Bytes() []byte + // Result returns the marshaled execution result and ok=true once it has + // been attached via SetTransactionResults. Returns (nil, false) for + // transactions whose parent block has been written but not yet had + // results recorded — callers should treat that as "not yet executed". + Result() (bytes []byte, ok bool) + // Height returns the height of the block this transaction belongs to. + Height() uint64 + // Index returns the position of this transaction within its block. + Index() uint32 } // Block is the BlockDB's view of a finalized block. The interface intentionally @@ -31,10 +49,23 @@ type Block interface { Height() uint64 // Time returns the block timestamp. Time() time.Time - // Transactions returns the block's transactions in order. + // Transactions returns the block's transactions in order. Each Transaction + // must report Height() == this block's height and Index() == its position + // in the slice. Result() may be empty at WriteBlock time; results are + // supplied separately via SetTransactionResults. Transactions() []Transaction } +// Result is the BlockDB's view of one transaction's post-execution result, +// supplied to SetTransactionResults after the application has executed the +// block. The interface keeps BlockDB chain-agnostic — callers wrap their +// concrete result types (e.g. abci.ExecTxResult) in a small adapter that +// returns the marshaled bytes. +type Result interface { + // Bytes returns the marshaled execution result for one transaction. + Bytes() []byte +} + // A database for storing finalized block and transaction data. // // This store is fully threadsafe. All writes are atomic (that is, after a crash you will either see the write or @@ -49,6 +80,17 @@ type BlockDB interface { // Call Flush() if you need to wait until the block is written to disk. WriteBlock(ctx context.Context, block Block) error + // SetTransactionResults attaches per-transaction execution results to a previously written + // block, identified by its block hash. results must be the same length as the block's + // Transactions(); each entry corresponds positionally to the transaction at that index. + // + // Returns ErrUnknownBlock if no block with the given hash has been written, and + // ErrResultCountMismatch if len(results) does not match the block's transaction count. + // + // Like WriteBlock, this is async with respect to disk persistence; pair with Flush() + // for crash durability. + SetTransactionResults(ctx context.Context, blockHash []byte, results []Result) error + // Blocks until all pending writes are flushed to disk. Any call to WriteBlock issued before calling Flush() // will be crash-durable after Flush() returns. Calls to WriteBlock() made concurrently with Flush() may or // may not be crash-durable after Flush() returns (but are otherwise eventually durable). diff --git a/sei-db/ledger_db/block/block_db_test/block_db_test.go b/sei-db/ledger_db/block/block_db_test/block_db_test.go index b014468431..54cb233f76 100644 --- a/sei-db/ledger_db/block/block_db_test/block_db_test.go +++ b/sei-db/ledger_db/block/block_db_test/block_db_test.go @@ -37,12 +37,23 @@ func newMemBlockDBBuilder() blockDBBuilder { } type testTx struct { - hash []byte - bytes []byte + hash []byte + bytes []byte + result []byte + hasResult bool + height uint64 + index uint32 } -func (t *testTx) Hash() []byte { return t.hash } -func (t *testTx) Bytes() []byte { return t.bytes } +func (t *testTx) Hash() []byte { return t.hash } +func (t *testTx) Bytes() []byte { return t.bytes } +func (t *testTx) Result() ([]byte, bool) { return t.result, t.hasResult } +func (t *testTx) Height() uint64 { return t.height } +func (t *testTx) Index() uint32 { return t.index } + +type testResult struct{ bytes []byte } + +func (r testResult) Bytes() []byte { return r.bytes } type testBlock struct { hash []byte @@ -60,8 +71,10 @@ func makeBlock(height uint64, numTxs int) *testBlock { txs := make([]block.Transaction, numTxs) for i := 0; i < numTxs; i++ { txs[i] = &testTx{ - hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), - bytes: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), + hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), + bytes: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), + height: height, + index: uint32(i), //nolint:gosec } } return &testBlock{ @@ -185,6 +198,66 @@ func TestMultipleBlocks(t *testing.T) { }) } +func TestSetTransactionResults(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + blk := makeBlock(7, 3) + requireNoError(t, db.WriteBlock(ctx, blk)) + + // Pre-results: GetTransactionByHash returns the tx with Result ok=false. + for _, tx := range blk.Transactions() { + got, ok, err := db.GetTransactionByHash(ctx, tx.Hash()) + requireNoError(t, err) + requireTrue(t, ok, "expected tx pre-results") + _, hasResult := got.Result() + requireTrue(t, !hasResult, "expected Result ok=false before SetTransactionResults") + requireTrue(t, got.Height() == 7, "expected height carried through, got %d", got.Height()) + requireTrue(t, got.Index() == tx.Index(), "expected index carried through") + } + + // Attach results. + results := []block.Result{ + testResult{bytes: []byte("result-0")}, + testResult{bytes: []byte("result-1")}, + testResult{bytes: []byte("result-2")}, + } + requireNoError(t, db.SetTransactionResults(ctx, blk.Hash(), results)) + + // Post-results: Result() returns (bytes, true). + for i, tx := range blk.Transactions() { + got, ok, err := db.GetTransactionByHash(ctx, tx.Hash()) + requireNoError(t, err) + requireTrue(t, ok, "expected tx post-results") + gotResult, hasResult := got.Result() + requireTrue(t, hasResult, "expected Result ok=true after SetTransactionResults") + requireBytesEqual(t, results[i].Bytes(), gotResult, fmt.Sprintf("tx[%d] result", i)) + } + }) +} + +func TestSetTransactionResultsErrors(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + // Unknown block hash. + err = db.SetTransactionResults(ctx, []byte("nonexistent"), nil) + requireTrue(t, err != nil, "expected error for unknown block hash") + + // Mismatched count. + blk := makeBlock(1, 2) + requireNoError(t, db.WriteBlock(ctx, blk)) + err = db.SetTransactionResults(ctx, blk.Hash(), []block.Result{testResult{bytes: []byte("only-one")}}) + requireTrue(t, err != nil, "expected error for mismatched result count") + }) +} + func TestPrunePreservesUnprunedBlocks(t *testing.T) { forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { ctx := context.Background() @@ -365,7 +438,7 @@ func makeRandomBlock(rng *crand.CannedRandom, height uint64, numTxs int) *testBl txHash := rng.Address('t', int64(height)*1000+int64(i), 32) txDataLen := 64 + int(rng.Int64Range(0, 512)) txData := copyBytes(rng.Bytes(txDataLen)) - txs[i] = &testTx{hash: txHash, bytes: txData} + txs[i] = &testTx{hash: txHash, bytes: txData, height: height, index: uint32(i)} //nolint:gosec } blockHash := rng.Address('b', int64(height), 32) diff --git a/sei-db/ledger_db/block/blocksim/block_generator.go b/sei-db/ledger_db/block/blocksim/block_generator.go index b08b6de190..2763830496 100644 --- a/sei-db/ledger_db/block/blocksim/block_generator.go +++ b/sei-db/ledger_db/block/blocksim/block_generator.go @@ -15,12 +15,17 @@ const ( // genTx is a synthetic transaction that satisfies block.Transaction. type genTx struct { - hash []byte - bytes []byte + hash []byte + bytes []byte + height uint64 + index uint32 } -func (t *genTx) Hash() []byte { return t.hash } -func (t *genTx) Bytes() []byte { return t.bytes } +func (t *genTx) Hash() []byte { return t.hash } +func (t *genTx) Bytes() []byte { return t.bytes } +func (t *genTx) Result() ([]byte, bool) { return nil, false } +func (t *genTx) Height() uint64 { return t.height } +func (t *genTx) Index() uint32 { return t.index } // genBlock is a synthetic block that satisfies block.Block. extra is held to // simulate block-level metadata bytes — the BlockDB contract has no field for @@ -101,8 +106,10 @@ func (g *BlockGenerator) buildBlock() *genBlock { for i := uint64(0); i < g.config.TransactionsPerBlock; i++ { txID := int64(height)*int64(g.config.TransactionsPerBlock) + int64(i) //nolint:gosec txs[i] = &genTx{ - hash: g.rand.Address(txHashType, txID, int(g.config.TransactionHashSize)), //nolint:gosec - bytes: g.rand.Bytes(int(g.config.BytesPerTransaction)), //nolint:gosec + hash: g.rand.Address(txHashType, txID, int(g.config.TransactionHashSize)), //nolint:gosec + bytes: g.rand.Bytes(int(g.config.BytesPerTransaction)), //nolint:gosec + height: height, + index: uint32(i), //nolint:gosec } } diff --git a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go index acfbfe6e0d..0bb3e3a4a6 100644 --- a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go +++ b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go @@ -2,6 +2,7 @@ package memblockdb import ( "context" + "fmt" "sync" "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block" @@ -13,6 +14,11 @@ type memBlockDBData struct { blocksByHash map[string]block.Block blocksByHeight map[uint64]block.Block txByHash map[string]block.Transaction + // txResultByHash holds result bytes set by SetTransactionResults. Kept + // separate from txByHash so writes (block) and result-attachment + // (post-execution) stay independent — a Transaction read before its + // block executes returns nil from Result() rather than blocking. + txResultByHash map[string][]byte lowestHeight uint64 highestHeight uint64 hasBlocks bool @@ -31,6 +37,7 @@ func NewMemBlockDB() block.BlockDB { blocksByHash: make(map[string]block.Block), blocksByHeight: make(map[uint64]block.Block), txByHash: make(map[string]block.Transaction), + txResultByHash: make(map[string][]byte), }, } } @@ -62,6 +69,28 @@ func (m *memBlockDB) WriteBlock(_ context.Context, blk block.Block) error { return nil } +func (m *memBlockDB) SetTransactionResults(_ context.Context, blockHash []byte, results []block.Result) error { + d := m.data + d.mu.Lock() + defer d.mu.Unlock() + + blk, ok := d.blocksByHash[string(blockHash)] + if !ok { + return fmt.Errorf("%w: %x", block.ErrUnknownBlock, blockHash) + } + txs := blk.Transactions() + if len(txs) != len(results) { + return fmt.Errorf("%w: block has %d txs, got %d results", block.ErrResultCountMismatch, len(txs), len(results)) + } + for i, tx := range txs { + // Eager copy of the result bytes so callers can release the source + // adapter (and the underlying *abci.ExecTxResult) immediately after + // SetTransactionResults returns. + d.txResultByHash[string(tx.Hash())] = results[i].Bytes() + } + return nil +} + func (m *memBlockDB) Flush(_ context.Context) error { return nil } @@ -84,13 +113,28 @@ func (m *memBlockDB) GetBlockByHeight(_ context.Context, height uint64) (block.B return blk, ok, nil } +// composedTx layers a separately-stored result on top of a stored Transaction. +// Hash/Bytes/Height/Index come from the original Transaction; Result reflects +// whether SetTransactionResults has run for the parent block. +type composedTx struct { + block.Transaction + result []byte + hasResult bool +} + +func (c composedTx) Result() ([]byte, bool) { return c.result, c.hasResult } + func (m *memBlockDB) GetTransactionByHash(_ context.Context, hash []byte) (block.Transaction, bool, error) { d := m.data d.mu.RLock() defer d.mu.RUnlock() tx, ok := d.txByHash[string(hash)] - return tx, ok, nil + if !ok { + return nil, false, nil + } + result, hasResult := d.txResultByHash[string(hash)] + return composedTx{Transaction: tx, result: result, hasResult: hasResult}, true, nil } func (m *memBlockDB) Prune(_ context.Context, lowestHeightToKeep uint64) error { @@ -111,6 +155,7 @@ func (m *memBlockDB) Prune(_ context.Context, lowestHeightToKeep uint64) error { delete(d.blocksByHash, string(blk.Hash())) for _, tx := range blk.Transactions() { delete(d.txByHash, string(tx.Hash())) + delete(d.txResultByHash, string(tx.Hash())) } } diff --git a/sei-db/ledger_db/block/mem_block_db/mem_block_db_test.go b/sei-db/ledger_db/block/mem_block_db/mem_block_db_test.go index edec11cede..56504d514e 100644 --- a/sei-db/ledger_db/block/mem_block_db/mem_block_db_test.go +++ b/sei-db/ledger_db/block/mem_block_db/mem_block_db_test.go @@ -10,12 +10,19 @@ import ( ) type testTx struct { - hash []byte - bytes []byte + hash []byte + bytes []byte + result []byte + hasResult bool + height uint64 + index uint32 } -func (t *testTx) Hash() []byte { return t.hash } -func (t *testTx) Bytes() []byte { return t.bytes } +func (t *testTx) Hash() []byte { return t.hash } +func (t *testTx) Bytes() []byte { return t.bytes } +func (t *testTx) Result() ([]byte, bool) { return t.result, t.hasResult } +func (t *testTx) Height() uint64 { return t.height } +func (t *testTx) Index() uint32 { return t.index } type testBlock struct { hash []byte @@ -33,8 +40,10 @@ func makeBlock(height uint64, numTxs int) block.Block { txs := make([]block.Transaction, numTxs) for i := 0; i < numTxs; i++ { txs[i] = &testTx{ - hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), - bytes: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), + hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), + bytes: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), + height: height, + index: uint32(i), //nolint:gosec } } return &testBlock{ diff --git a/sei-tendermint/internal/p2p/giga_blockdb.go b/sei-tendermint/internal/p2p/giga_blockdb.go index 7d56227a95..ceb8b4bd8a 100644 --- a/sei-tendermint/internal/p2p/giga_blockdb.go +++ b/sei-tendermint/internal/p2p/giga_blockdb.go @@ -4,8 +4,10 @@ import ( "time" "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block" + abci "github.com/sei-protocol/sei-chain/sei-tendermint/abci/types" "github.com/sei-protocol/sei-chain/sei-tendermint/crypto/tmhash" atypes "github.com/sei-protocol/sei-chain/sei-tendermint/internal/autobahn/types" + "github.com/sei-protocol/sei-chain/sei-tendermint/libs/utils" ) // globalBlockAdapter wraps *atypes.GlobalBlock so it satisfies block.Block @@ -26,19 +28,50 @@ func (a globalBlockAdapter) Time() time.Time { return a.gb.Timestamp } func (a globalBlockAdapter) Transactions() []block.Transaction { txs := a.gb.Payload.Txs() + height := uint64(a.gb.GlobalNumber) out := make([]block.Transaction, len(txs)) for i, tx := range txs { - out[i] = txAdapter{hash: tmhash.Sum(tx), bytes: tx} + out[i] = txAdapter{ + hash: tmhash.Sum(tx), + bytes: tx, + height: height, + index: uint32(i), //nolint:gosec + } } return out } -// txAdapter wraps a single Autobahn tx + its CometBFT-style hash so it -// satisfies block.Transaction. +// txAdapter wraps a single Autobahn tx + its CometBFT-style hash + its +// position so it satisfies block.Transaction. Result() is always nil at +// WriteBlock time — execution results are attached later via +// BlockDB.SetTransactionResults, and surfaced through mem_block_db's +// composedTx wrapper on read. type txAdapter struct { - hash []byte - bytes []byte + hash []byte + bytes []byte + height uint64 + index uint32 } -func (t txAdapter) Hash() []byte { return t.hash } -func (t txAdapter) Bytes() []byte { return t.bytes } +func (t txAdapter) Hash() []byte { return t.hash } +func (t txAdapter) Bytes() []byte { return t.bytes } +func (t txAdapter) Result() ([]byte, bool) { return nil, false } +func (t txAdapter) Height() uint64 { return t.height } +func (t txAdapter) Index() uint32 { return t.index } + +// execResultAdapter wraps *abci.ExecTxResult so it satisfies block.Result. +// Marshal happens lazily on Bytes(); the typical caller is mem_block_db's +// SetTransactionResults, which calls Bytes() exactly once and then drops +// the adapter. ExecTxResult is gogoproto-generated so it carries its own +// Marshal method and never fails on a well-formed message — we OrPanic to +// surface the impossible case loudly rather than silently dropping a result. +type execResultAdapter struct { + r *abci.ExecTxResult +} + +func (a execResultAdapter) Bytes() []byte { + if a.r == nil { + return nil + } + return utils.OrPanic1(a.r.Marshal()) +} diff --git a/sei-tendermint/internal/p2p/giga_router.go b/sei-tendermint/internal/p2p/giga_router.go index 1c4a934d0c..73852d76f8 100644 --- a/sei-tendermint/internal/p2p/giga_router.go +++ b/sei-tendermint/internal/p2p/giga_router.go @@ -134,6 +134,44 @@ func (r *GigaRouter) LastCommittedBlockNumber() int64 { return int64(gr.Next) - 1 // nolint:gosec // gr.Next is uint64 but bounded by actual chain height. } +// Tx returns the finalized transaction with the given hash translated into +// the CometBFT coretypes.ResultTx shape. Mirrors BlockByHash: the RPC layer +// (env.Tx) just delegates here when Autobahn is active, keeping the +// abci.ExecTxResult unmarshal and ResultTx assembly inside the giga +// package. Match CometBFT semantics for unknown hashes — return an error +// rather than nil — since callers (broadcast_tx_commit polling, ops +// tooling) already handle that error explicitly. +// +// req.Prove is intentionally not honored — Autobahn doesn't materialize +// types.TxProof, and tooling that needs it falls back to the CometBFT path. +// +// Returns (nil, error) for unknown txs and decode errors. A successful +// lookup with no execution result yet (block written, but +// SetTransactionResults hasn't run for it) returns the tx with a zero +// TxResult, matching the "executed but no events" shape callers tolerate. +func (r *GigaRouter) Tx(ctx context.Context, hash []byte) (*coretypes.ResultTx, error) { + tx, ok, err := r.blockDB.GetTransactionByHash(ctx, hash) + if err != nil { + return nil, fmt.Errorf("blockDB.GetTransactionByHash: %w", err) + } + if !ok { + return nil, fmt.Errorf("tx (%X) not found", hash) + } + var result abci.ExecTxResult + if rb, hasResult := tx.Result(); hasResult { + if err := result.Unmarshal(rb); err != nil { + return nil, fmt.Errorf("unmarshal tx result: %w", err) + } + } + return &coretypes.ResultTx{ + Hash: hash, + Height: utils.Clamp[int64](tx.Height()), + Index: tx.Index(), + TxResult: result, + Tx: tx.Bytes(), + }, nil +} + // MaxGasPerBlock returns the producer's configured max gas per block (int64). // Thin pass-through to producer.Config.MaxGasPerBlockI64 — the clamp logic // lives there. Exposed at the GigaRouter level so the RPC layer can populate @@ -231,7 +269,7 @@ func (r *GigaRouter) translateBlock(b block.Block) *coretypes.ResultBlock { } } -func (r *GigaRouter) executeBlock(ctx context.Context, b *atypes.GlobalBlock) (*abci.ResponseCommit, error) { +func (r *GigaRouter) executeBlock(ctx context.Context, b *atypes.GlobalBlock) (*abci.ResponseCommit, []*abci.ExecTxResult, error) { app := r.cfg.TxMempool.App() hash := b.Header.Hash() var proposerAddress types.Address @@ -241,7 +279,7 @@ func (r *GigaRouter) executeBlock(ctx context.Context, b *atypes.GlobalBlock) (* proposer := slices.MinFunc(vals, func(a, b abci.ValidatorUpdate) int { return a.PubKey.Compare(b.PubKey) }) key, err := crypto.PubKeyFromProto(proposer.PubKey) if err != nil { - return nil, fmt.Errorf("crypto.PubKeyFromProto(): %w", err) + return nil, nil, fmt.Errorf("crypto.PubKeyFromProto(): %w", err) } proposerAddress = key.Address() } @@ -268,14 +306,14 @@ func (r *GigaRouter) executeBlock(ctx context.Context, b *atypes.GlobalBlock) (* }).ToProto(), }) if err != nil { - return nil, fmt.Errorf("r.cfg.App.FinalizeBlock(): %w", err) + return nil, nil, fmt.Errorf("r.cfg.App.FinalizeBlock(): %w", err) } if err := r.data.PushAppHash(ctx, b.GlobalNumber, resp.AppHash); err != nil { - return nil, fmt.Errorf("r.data.PushAppHash(%v): %w", b.GlobalNumber, err) + return nil, nil, fmt.Errorf("r.data.PushAppHash(%v): %w", b.GlobalNumber, err) } commitResp, err := app.Commit(ctx) if err != nil { - return nil, fmt.Errorf("r.cfg.App.Commit(): %w", err) + return nil, nil, fmt.Errorf("r.cfg.App.Commit(): %w", err) } blockTxs := make(types.Txs, len(b.Payload.Txs())) for i, tx := range b.Payload.Txs() { @@ -294,9 +332,9 @@ func (r *GigaRouter) executeBlock(ctx context.Context, b *atypes.GlobalBlock) (* false, ) if err != nil { - return nil, fmt.Errorf("r.cfg.TxMempool.Update(%v): %w", b.GlobalNumber, err) + return nil, nil, fmt.Errorf("r.cfg.TxMempool.Update(%v): %w", b.GlobalNumber, err) } - return commitResp, nil + return commitResp, resp.TxResults, nil } func (r *GigaRouter) runExecute(ctx context.Context) error { @@ -357,10 +395,22 @@ func (r *GigaRouter) runExecute(ctx context.Context) error { if err := r.blockDB.WriteBlock(ctx, globalBlockAdapter{gb: b}); err != nil { return fmt.Errorf("r.blockDB.WriteBlock(%v): %w", n, err) } - commitResp, err := r.executeBlock(ctx, b) + commitResp, txResults, err := r.executeBlock(ctx, b) if err != nil { return fmt.Errorf("r.executeBlock(%v): %w", n, err) } + // Attach per-tx execution results to the BlockDB entry written + // above, so RPC consumers (env.Tx) can return them by tx hash. + // Wrapping each *abci.ExecTxResult in execResultAdapter keeps + // sei-db chain-agnostic — marshaling happens inside the adapter. + blockHash := b.Header.Hash() + results := make([]block.Result, len(txResults)) + for i, r := range txResults { + results[i] = execResultAdapter{r: r} + } + if err := r.blockDB.SetTransactionResults(ctx, blockHash.Bytes(), results); err != nil { + return fmt.Errorf("r.blockDB.SetTransactionResults(%v): %w", n, err) + } pruneBefore, ok := utils.SafeCast[atypes.GlobalBlockNumber](commitResp.RetainHeight) if !ok { return fmt.Errorf("invalid commitResp.RetainHeight = %v", commitResp.RetainHeight) diff --git a/sei-tendermint/internal/p2p/giga_router_test.go b/sei-tendermint/internal/p2p/giga_router_test.go index b665310c26..6995807269 100644 --- a/sei-tendermint/internal/p2p/giga_router_test.go +++ b/sei-tendermint/internal/p2p/giga_router_test.go @@ -16,6 +16,7 @@ import ( abci "github.com/sei-protocol/sei-chain/sei-tendermint/abci/types" "github.com/sei-protocol/sei-chain/sei-tendermint/crypto" "github.com/sei-protocol/sei-chain/sei-tendermint/crypto/ed25519" + "github.com/sei-protocol/sei-chain/sei-tendermint/crypto/tmhash" "github.com/sei-protocol/sei-chain/sei-tendermint/internal/autobahn/consensus" "github.com/sei-protocol/sei-chain/sei-tendermint/internal/autobahn/producer" atypes "github.com/sei-protocol/sei-chain/sei-tendermint/internal/autobahn/types" @@ -387,6 +388,22 @@ func TestGigaRouter_FinalizeBlocks(t *testing.T) { rbh, err := giga.BlockByHash(ctx, hashKey) require.NoError(t, err, "router[%v].BlockByHash(%x)", i, rb.BlockID.Hash) require.Equal(t, rb, rbh, "router[%v].BlockByHash(%x) ≠ BlockByNumber(%v)", i, rb.BlockID.Hash, committed) + // Covers GigaRouter.Tx — BlockDB-backed tx-by-hash lookup that + // env.Tx delegates to under Autobahn. For every tx in the just- + // fetched block we verify the round-trip carries hash/height/ + // index/bytes faithfully and that TxResult was attached by + // SetTransactionResults (Code is the meaningful no-fixture + // signal: testApp returns Code=0 for accepted txs). + for j, tx := range rb.Block.Data.Txs { + txHash := tmhash.Sum(tx) + rt, err := giga.Tx(ctx, txHash) + require.NoError(t, err, "router[%v].Tx(block=%v tx[%v])", i, committed, j) + require.Equal(t, txHash, []byte(rt.Hash), "router[%v].Tx hash", i) + require.Equal(t, committed, rt.Height, "router[%v].Tx height", i) + require.Equal(t, uint32(j), rt.Index, "router[%v].Tx index", i) //nolint:gosec + require.Equal(t, []byte(tx), rt.Tx, "router[%v].Tx tx bytes", i) + require.Equal(t, uint32(0), rt.TxResult.Code, "router[%v].Tx code", i) + } } // Payload.Txs round-trips: for every retained block, the txs the // data layer holds (GlobalBlock.Payload.Txs) must equal the txs diff --git a/sei-tendermint/internal/rpc/core/tx.go b/sei-tendermint/internal/rpc/core/tx.go index 33384d42c4..a8ab69a437 100644 --- a/sei-tendermint/internal/rpc/core/tx.go +++ b/sei-tendermint/internal/rpc/core/tx.go @@ -18,6 +18,13 @@ import ( // place. // More: https://docs.tendermint.com/master/rpc/#/Info/tx func (env *Environment) Tx(ctx context.Context, req *coretypes.RequestTx) (*coretypes.ResultTx, error) { + // Autobahn path: legacy EventSinks aren't populated under giga; delegate + // to GigaRouter.Tx, which reads the BlockDB populated by runExecute and + // returns a fully-translated ResultTx (mirrors how BlockByHash routes). + if r, ok := env.gigaRouter().Get(); ok { + return r.Tx(ctx, req.Hash) + } + // if index is disabled, return error if !indexer.KVSinkEnabled(env.EventSinks) { return nil, errors.New("transaction querying is disabled due to no kvEventSink") From a2aebc06ead99103c181b308b7aa5276920cabd9 Mon Sep 17 00:00:00 2001 From: Wen Date: Thu, 7 May 2026 15:43:07 -0700 Subject: [PATCH 4/9] giga: distinguish "tx result pending" from "tx not found" (CON-256) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds ErrTxResultPending. GigaRouter.Tx now returns it when the parent block has been WriteBlock'd but SetTransactionResults hasn't run yet — the window inside runExecute that spans the entire FinalizeBlock + PushAppHash + Commit + mempool.Update sequence. Before this, that window returned a ResultTx with a zero-value TxResult. Code=0 is the success code, so a polling broadcast_tx_commit client could not distinguish "tx executed and succeeded" from "tx committed but not yet executed" — which would mislead pollers into treating a not-yet-executed tx (that may still fail) as a success. New TestGigaRouter_TxResultPending pins this in isolation: WriteBlock, assert ErrTxResultPending, SetTransactionResults, assert successful ResultTx. Built on a small txStub/blockStub/resultStub that satisfies the block.Block / Transaction / Result interfaces directly, so the test exercises GigaRouter.Tx without standing up the full consensus harness used by TestGigaRouter_FinalizeBlocks. Co-Authored-By: Claude Opus 4.7 (1M context) --- sei-tendermint/internal/p2p/giga_router.go | 28 +++++-- .../internal/p2p/giga_router_test.go | 82 +++++++++++++++++++ 2 files changed, 102 insertions(+), 8 deletions(-) diff --git a/sei-tendermint/internal/p2p/giga_router.go b/sei-tendermint/internal/p2p/giga_router.go index 73852d76f8..994c8153b1 100644 --- a/sei-tendermint/internal/p2p/giga_router.go +++ b/sei-tendermint/internal/p2p/giga_router.go @@ -134,6 +134,15 @@ func (r *GigaRouter) LastCommittedBlockNumber() int64 { return int64(gr.Next) - 1 // nolint:gosec // gr.Next is uint64 but bounded by actual chain height. } +// ErrTxResultPending is returned by Tx when a transaction is known +// (its parent block has been written to BlockDB) but the per-tx execution +// result hasn't been attached yet — the window between WriteBlock and +// SetTransactionResults inside runExecute. Distinct from "not found" +// because the tx is real and the caller should retry, not give up. +// Callers that don't care can errors.Is-check to fold it into a generic +// "try again" flow. +var ErrTxResultPending = errors.New("transaction result not yet recorded") + // Tx returns the finalized transaction with the given hash translated into // the CometBFT coretypes.ResultTx shape. Mirrors BlockByHash: the RPC layer // (env.Tx) just delegates here when Autobahn is active, keeping the @@ -145,10 +154,11 @@ func (r *GigaRouter) LastCommittedBlockNumber() int64 { // req.Prove is intentionally not honored — Autobahn doesn't materialize // types.TxProof, and tooling that needs it falls back to the CometBFT path. // -// Returns (nil, error) for unknown txs and decode errors. A successful -// lookup with no execution result yet (block written, but -// SetTransactionResults hasn't run for it) returns the tx with a zero -// TxResult, matching the "executed but no events" shape callers tolerate. +// Returns ErrTxResultPending when the parent block exists but +// SetTransactionResults hasn't run for it yet. Returning a zero-result +// ResultTx in that window would be indistinguishable from a successful +// tx with Code=0 and no events, which would mislead broadcast_tx_commit +// pollers into thinking a not-yet-executed tx had succeeded. func (r *GigaRouter) Tx(ctx context.Context, hash []byte) (*coretypes.ResultTx, error) { tx, ok, err := r.blockDB.GetTransactionByHash(ctx, hash) if err != nil { @@ -157,11 +167,13 @@ func (r *GigaRouter) Tx(ctx context.Context, hash []byte) (*coretypes.ResultTx, if !ok { return nil, fmt.Errorf("tx (%X) not found", hash) } + rb, hasResult := tx.Result() + if !hasResult { + return nil, fmt.Errorf("tx (%X): %w", hash, ErrTxResultPending) + } var result abci.ExecTxResult - if rb, hasResult := tx.Result(); hasResult { - if err := result.Unmarshal(rb); err != nil { - return nil, fmt.Errorf("unmarshal tx result: %w", err) - } + if err := result.Unmarshal(rb); err != nil { + return nil, fmt.Errorf("unmarshal tx result: %w", err) } return &coretypes.ResultTx{ Hash: hash, diff --git a/sei-tendermint/internal/p2p/giga_router_test.go b/sei-tendermint/internal/p2p/giga_router_test.go index 6995807269..d739c9cbe7 100644 --- a/sei-tendermint/internal/p2p/giga_router_test.go +++ b/sei-tendermint/internal/p2p/giga_router_test.go @@ -4,6 +4,7 @@ import ( "context" "crypto/sha256" "encoding/json" + "errors" "fmt" "net/netip" "slices" @@ -13,6 +14,8 @@ import ( dbm "github.com/tendermint/tm-db" "golang.org/x/time/rate" + "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block" + memblockdb "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block/mem_block_db" abci "github.com/sei-protocol/sei-chain/sei-tendermint/abci/types" "github.com/sei-protocol/sei-chain/sei-tendermint/crypto" "github.com/sei-protocol/sei-chain/sei-tendermint/crypto/ed25519" @@ -432,3 +435,82 @@ func TestGigaRouter_FinalizeBlocks(t *testing.T) { }) require.NoError(t, err) } + +// txStub / blockStub / resultStub are minimal block.Block / block.Transaction +// / block.Result implementations for unit-testing GigaRouter.Tx in isolation +// — without spinning up the full consensus harness used by +// TestGigaRouter_FinalizeBlocks. +type txStub struct { + hash, bytes []byte + height uint64 + index uint32 +} + +func (t txStub) Hash() []byte { return t.hash } +func (t txStub) Bytes() []byte { return t.bytes } +func (t txStub) Result() ([]byte, bool) { return nil, false } +func (t txStub) Height() uint64 { return t.height } +func (t txStub) Index() uint32 { return t.index } + +type blockStub struct { + hash []byte + height uint64 + txs []block.Transaction +} + +func (b blockStub) Hash() []byte { return b.hash } +func (b blockStub) Height() uint64 { return b.height } +func (b blockStub) Time() time.Time { return time.Time{} } +func (b blockStub) Transactions() []block.Transaction { return b.txs } + +type resultStub struct{ b []byte } + +func (r resultStub) Bytes() []byte { return r.b } + +// TestGigaRouter_TxResultPending pins the contract from finding (1) of the +// branch review: a tx whose parent block has been written to BlockDB but +// whose execution results have not yet been attached must surface as +// ErrTxResultPending — never as a zero-result success that +// broadcast_tx_commit pollers would mistake for an executed tx. +func TestGigaRouter_TxResultPending(t *testing.T) { + ctx := t.Context() + + blockDB := memblockdb.NewMemBlockDB() + r := &GigaRouter{blockDB: blockDB} + + tx := txStub{ + hash: []byte("hash-of-tx-1"), + bytes: []byte("payload-1"), + height: 5, + index: 0, + } + blk := blockStub{ + hash: []byte("block-A"), + height: 5, + txs: []block.Transaction{tx}, + } + require.NoError(t, blockDB.WriteBlock(ctx, blk)) + + // Unknown tx hash → "not found" sentinel (distinct from pending). + _, err := r.Tx(ctx, []byte("does-not-exist")) + require.True(t, err != nil, "expected error for unknown tx") + require.False(t, errors.Is(err, ErrTxResultPending), "unknown tx must not surface as pending") + + // Block written, results not yet attached → ErrTxResultPending. + _, err = r.Tx(ctx, tx.hash) + require.True(t, errors.Is(err, ErrTxResultPending), + "expected ErrTxResultPending after WriteBlock but before SetTransactionResults, got %v", err) + + // After SetTransactionResults, Tx returns the translated ResultTx. + wantCode := uint32(7) // arbitrary non-zero code: confirms the result actually round-trips. + resultBytes := utils.OrPanic1((&abci.ExecTxResult{Code: wantCode}).Marshal()) + require.NoError(t, blockDB.SetTransactionResults(ctx, blk.hash, []block.Result{resultStub{b: resultBytes}})) + + rt, err := r.Tx(ctx, tx.hash) + require.NoError(t, err, "Tx after SetTransactionResults") + require.Equal(t, tx.hash, []byte(rt.Hash)) + require.Equal(t, int64(5), rt.Height) + require.Equal(t, uint32(0), rt.Index) + require.Equal(t, tx.bytes, rt.Tx) + require.Equal(t, wantCode, rt.TxResult.Code) +} From a5091ed4f9d104b7c490fae20382327041dcdcd7 Mon Sep 17 00:00:00 2001 From: Wen Date: Thu, 7 May 2026 16:11:44 -0700 Subject: [PATCH 5/9] sei-db: dedup tx index by (txHash, blockHash); split tx body from per-instance result (CON-256) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes review finding (2): the same tx hash included in two different GlobalBlocks (different lanes producing the same tx) used to overwrite each other in mem_block_db's flat tx-by-hash map, and a Prune of the older block silently delete the index entry that pointed at the still- retained newer one. The Transaction interface now carries only the invariant body (Hash + Bytes). Per-block-occurrence data (height, index, marshaled exec result) moves to Result, returned alongside Transaction by GetTransactionByHash: GetTransactionByHash(ctx, hash) (tx Transaction, results []Result, found bool, err) `results` lists every recorded execution (one per block that has had SetTransactionResults called for it); pending entries are filtered out so callers get exactly the executions, never empty wrappers. The caller (GigaRouter.Tx here) decides which one is canonical: 1. Lowest-height execution with abci.CodeTypeOK (a tx is expected to succeed at most once across the chain — lowest height is just a deterministic tiebreaker). 2. Else highest-height failure (most recent attempt). 3. Else (found && len(results)==0) → ErrTxResultPending. mem_block_db keeps a per-tx-hash entry with a two-level map of {blockHash -> {height, index, bytes}}. WriteBlock registers a pending instance per (txHash, blockHash); SetTransactionResults attaches bytes; Prune removes only the per-block instance and drops the entry when the inner map empties. Other blocks containing the same tx hash stay reachable. New tests: - block_db_test.TestTransactionMultipleBlocks: shared tx hash across two blocks, both results reachable, pruning either leaves the other intact. - giga_router_test.TestGigaRouter_TxMultipleBlocks_PrefersSuccess: A=fail, B=success → Tx returns B regardless of insertion order. - giga_router_test.TestGigaRouter_TxMultipleBlocks_FallsBackToLatestFailure: both fail → Tx returns the highest-height failure. Co-Authored-By: Claude Opus 4.7 (1M context) --- sei-db/ledger_db/block/block_db.go | 67 ++++--- .../block/block_db_test/block_db_test.go | 184 +++++++++++++----- .../block/blocksim/block_generator.go | 19 +- .../block/mem_block_db/mem_block_db.go | 145 ++++++++++---- .../block/mem_block_db/mem_block_db_test.go | 21 +- sei-tendermint/internal/p2p/giga_blockdb.go | 49 +++-- sei-tendermint/internal/p2p/giga_router.go | 80 ++++++-- .../internal/p2p/giga_router_test.go | 122 +++++++++--- 8 files changed, 481 insertions(+), 206 deletions(-) diff --git a/sei-db/ledger_db/block/block_db.go b/sei-db/ledger_db/block/block_db.go index f818a5d841..0771937cef 100644 --- a/sei-db/ledger_db/block/block_db.go +++ b/sei-db/ledger_db/block/block_db.go @@ -18,24 +18,15 @@ var ErrUnknownBlock = errors.New("block db: unknown block hash") // results slice doesn't match the number of transactions in the referenced block. var ErrResultCountMismatch = errors.New("block db: result count does not match transaction count") -// Transaction is the BlockDB's view of a single transaction inside a block: -// its hash, raw bytes, post-execution result, plus its position within the -// chain (height + index). Result returns ok=false until SetTransactionResults -// has been called for the parent block. +// Transaction is the BlockDB's view of a transaction's *body* — what's +// invariant across every block that includes it. Per-block-occurrence data +// (height, index, execution result) lives on Result, returned alongside the +// Transaction by GetTransactionByHash. type Transaction interface { // Hash returns the canonical transaction hash used for indexing. Hash() []byte // Bytes returns the raw, on-the-wire transaction bytes. Bytes() []byte - // Result returns the marshaled execution result and ok=true once it has - // been attached via SetTransactionResults. Returns (nil, false) for - // transactions whose parent block has been written but not yet had - // results recorded — callers should treat that as "not yet executed". - Result() (bytes []byte, ok bool) - // Height returns the height of the block this transaction belongs to. - Height() uint64 - // Index returns the position of this transaction within its block. - Index() uint32 } // Block is the BlockDB's view of a finalized block. The interface intentionally @@ -49,21 +40,28 @@ type Block interface { Height() uint64 // Time returns the block timestamp. Time() time.Time - // Transactions returns the block's transactions in order. Each Transaction - // must report Height() == this block's height and Index() == its position - // in the slice. Result() may be empty at WriteBlock time; results are - // supplied separately via SetTransactionResults. + // Transactions returns the block's transactions in order. Transactions() []Transaction } -// Result is the BlockDB's view of one transaction's post-execution result, -// supplied to SetTransactionResults after the application has executed the -// block. The interface keeps BlockDB chain-agnostic — callers wrap their -// concrete result types (e.g. abci.ExecTxResult) in a small adapter that -// returns the marshaled bytes. +// Result is the BlockDB's view of one transaction's post-execution outcome +// in a specific block: the marshaled execution result plus where it landed +// (block height + position in that block). Used both as the input to +// SetTransactionResults and as the per-occurrence value returned by +// GetTransactionByHash. +// +// The interface stays chain-agnostic — callers wrap their concrete result +// types (e.g. abci.ExecTxResult) in a small adapter. Bytes() is permitted +// to return the wire encoding lazily; backends that need to copy/index +// will call it during SetTransactionResults under the assumption it is +// inexpensive (typically a single proto Marshal). type Result interface { // Bytes returns the marshaled execution result for one transaction. Bytes() []byte + // Height returns the block height of the block that produced this result. + Height() uint64 + // Index returns the position of the transaction within that block. + Index() uint32 } // A database for storing finalized block and transaction data. @@ -82,11 +80,15 @@ type BlockDB interface { // SetTransactionResults attaches per-transaction execution results to a previously written // block, identified by its block hash. results must be the same length as the block's - // Transactions(); each entry corresponds positionally to the transaction at that index. + // Transactions(); each entry corresponds positionally to the transaction at that index, + // and its Height()/Index() must match the block's height and the position in this slice. // // Returns ErrUnknownBlock if no block with the given hash has been written, and // ErrResultCountMismatch if len(results) does not match the block's transaction count. // + // Calling SetTransactionResults a second time for the same block hash overwrites the + // previously attached results. + // // Like WriteBlock, this is async with respect to disk persistence; pair with Flush() // for crash durability. SetTransactionResults(ctx context.Context, blockHash []byte, results []Result) error @@ -106,8 +108,23 @@ type BlockDB interface { // Retrieves a block by its height. GetBlockByHeight(ctx context.Context, height uint64) (block Block, ok bool, err error) - // Retrieves a transaction by its hash. - GetTransactionByHash(ctx context.Context, hash []byte) (transaction Transaction, ok bool, err error) + // GetTransactionByHash returns the canonical transaction body and the list + // of recorded executions for that hash. Because the same tx body can be + // included in multiple blocks (different lanes producing the same tx), the + // API surfaces every recorded execution; the caller picks which is canonical + // for its purposes (e.g. preferring a successful execution). + // + // Returns: + // found=false unknown tx hash; tx and results are nil/empty. + // found=true, len(results)==0 tx exists in some block but no execution results + // have been attached yet (between WriteBlock and + // SetTransactionResults). + // found=true, len(results)>=1 one entry per block that has had results attached; + // order is unspecified. + // + // The returned Transaction's Hash and Bytes are the same regardless of + // which block included it (cryptographic hash collision aside). + GetTransactionByHash(ctx context.Context, hash []byte) (tx Transaction, results []Result, found bool, err error) // Schedules pruning for all blocks with a height less than the given height. Pruning is asynchronous, // and so this method does not provide any guarantees about when the pruning will complete. It is possible diff --git a/sei-db/ledger_db/block/block_db_test/block_db_test.go b/sei-db/ledger_db/block/block_db_test/block_db_test.go index 54cb233f76..555c62bc31 100644 --- a/sei-db/ledger_db/block/block_db_test/block_db_test.go +++ b/sei-db/ledger_db/block/block_db_test/block_db_test.go @@ -37,23 +37,22 @@ func newMemBlockDBBuilder() blockDBBuilder { } type testTx struct { - hash []byte - bytes []byte - result []byte - hasResult bool - height uint64 - index uint32 + hash []byte + bytes []byte } -func (t *testTx) Hash() []byte { return t.hash } -func (t *testTx) Bytes() []byte { return t.bytes } -func (t *testTx) Result() ([]byte, bool) { return t.result, t.hasResult } -func (t *testTx) Height() uint64 { return t.height } -func (t *testTx) Index() uint32 { return t.index } +func (t *testTx) Hash() []byte { return t.hash } +func (t *testTx) Bytes() []byte { return t.bytes } -type testResult struct{ bytes []byte } +type testResult struct { + bytes []byte + height uint64 + index uint32 +} -func (r testResult) Bytes() []byte { return r.bytes } +func (r testResult) Bytes() []byte { return r.bytes } +func (r testResult) Height() uint64 { return r.height } +func (r testResult) Index() uint32 { return r.index } type testBlock struct { hash []byte @@ -71,10 +70,8 @@ func makeBlock(height uint64, numTxs int) *testBlock { txs := make([]block.Transaction, numTxs) for i := 0; i < numTxs; i++ { txs[i] = &testTx{ - hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), - bytes: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), - height: height, - index: uint32(i), //nolint:gosec + hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), + bytes: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), } } return &testBlock{ @@ -84,6 +81,21 @@ func makeBlock(height uint64, numTxs int) *testBlock { } } +// makeResults builds a testResult per tx, populated with synthetic bytes +// + the canonical (height, index) for that block's tx slice. +func makeResults(blk *testBlock) []block.Result { + txs := blk.Transactions() + out := make([]block.Result, len(txs)) + for i := range txs { + out[i] = testResult{ + bytes: []byte(fmt.Sprintf("result-%d-%d", blk.height, i)), + height: blk.height, + index: uint32(i), //nolint:gosec + } + } + return out +} + func forEachBuilder(t *testing.T, fn func(t *testing.T, builder func(path string) (block.BlockDB, error))) { for _, b := range buildBuilders() { t.Run(b.name, func(t *testing.T) { @@ -136,12 +148,14 @@ func TestGetTransactionByHash(t *testing.T) { blk := makeBlock(1, 4) requireNoError(t, db.WriteBlock(ctx, blk)) + // Block written, no results attached yet — found=true with empty results. for _, tx := range blk.Transactions() { - got, ok, err := db.GetTransactionByHash(ctx, tx.Hash()) + gotTx, results, found, err := db.GetTransactionByHash(ctx, tx.Hash()) requireNoError(t, err) - requireTrue(t, ok, "expected transaction with hash %s", tx.Hash()) - requireBytesEqual(t, tx.Hash(), got.Hash(), "transaction hash") - requireBytesEqual(t, tx.Bytes(), got.Bytes(), "transaction data") + requireTrue(t, found, "expected tx %s found pre-results", tx.Hash()) + requireBytesEqual(t, tx.Hash(), gotTx.Hash(), "transaction hash") + requireBytesEqual(t, tx.Bytes(), gotTx.Bytes(), "transaction data") + requireTrue(t, len(results) == 0, "expected 0 results pre-SetTransactionResults, got %d", len(results)) } }) } @@ -170,9 +184,9 @@ func TestGetTransactionNotFound(t *testing.T) { requireNoError(t, err) defer db.Close(ctx) - _, ok, err := db.GetTransactionByHash(ctx, []byte("nonexistent")) + _, _, found, err := db.GetTransactionByHash(ctx, []byte("nonexistent")) requireNoError(t, err) - requireTrue(t, !ok, "expected no transaction with nonexistent hash") + requireTrue(t, !found, "expected no transaction with nonexistent hash") }) } @@ -208,37 +222,109 @@ func TestSetTransactionResults(t *testing.T) { blk := makeBlock(7, 3) requireNoError(t, db.WriteBlock(ctx, blk)) - // Pre-results: GetTransactionByHash returns the tx with Result ok=false. + // Pre-results: found=true, results empty. for _, tx := range blk.Transactions() { - got, ok, err := db.GetTransactionByHash(ctx, tx.Hash()) + gotTx, results, found, err := db.GetTransactionByHash(ctx, tx.Hash()) requireNoError(t, err) - requireTrue(t, ok, "expected tx pre-results") - _, hasResult := got.Result() - requireTrue(t, !hasResult, "expected Result ok=false before SetTransactionResults") - requireTrue(t, got.Height() == 7, "expected height carried through, got %d", got.Height()) - requireTrue(t, got.Index() == tx.Index(), "expected index carried through") + requireTrue(t, found, "expected tx %s found pre-results", tx.Hash()) + requireBytesEqual(t, tx.Bytes(), gotTx.Bytes(), "tx body bytes") + requireTrue(t, len(results) == 0, "expected 0 results pre-SetTransactionResults, got %d", len(results)) } // Attach results. - results := []block.Result{ - testResult{bytes: []byte("result-0")}, - testResult{bytes: []byte("result-1")}, - testResult{bytes: []byte("result-2")}, - } + results := makeResults(blk) requireNoError(t, db.SetTransactionResults(ctx, blk.Hash(), results)) - // Post-results: Result() returns (bytes, true). + // Post-results: results carries (bytes, height, index). for i, tx := range blk.Transactions() { - got, ok, err := db.GetTransactionByHash(ctx, tx.Hash()) + gotTx, gotResults, found, err := db.GetTransactionByHash(ctx, tx.Hash()) requireNoError(t, err) - requireTrue(t, ok, "expected tx post-results") - gotResult, hasResult := got.Result() - requireTrue(t, hasResult, "expected Result ok=true after SetTransactionResults") - requireBytesEqual(t, results[i].Bytes(), gotResult, fmt.Sprintf("tx[%d] result", i)) + requireTrue(t, found, "expected tx %s found post-results", tx.Hash()) + requireBytesEqual(t, tx.Bytes(), gotTx.Bytes(), "tx body bytes") + requireTrue(t, len(gotResults) == 1, "expected 1 result, got %d", len(gotResults)) + r := gotResults[0] + requireBytesEqual(t, results[i].Bytes(), r.Bytes(), fmt.Sprintf("tx[%d] result bytes", i)) + requireTrue(t, r.Height() == 7, "expected result height 7, got %d", r.Height()) + requireTrue(t, r.Index() == uint32(i), "expected result index %d, got %d", i, r.Index()) //nolint:gosec } }) } +// TestTransactionMultipleBlocks pins the (txHash, blockHash) dedup behavior: +// the same tx hash included in two different blocks is recorded as two +// separate Result entries. Both remain reachable while both blocks are +// retained; pruning either one leaves the other (and its result) +// reachable. Models the lane-block scenario where the same tx body +// appears in two different GlobalBlocks (one per lane). +func TestTransactionMultipleBlocks(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + // Two blocks at different heights with the same tx hash + bytes. + const txHash = "shared-tx-hash" + const txBytes = "shared-tx-data" + shared := func() block.Transaction { + return &testTx{hash: []byte(txHash), bytes: []byte(txBytes)} + } + blkA := &testBlock{ + hash: []byte("block-A"), + height: 1, + txs: []block.Transaction{shared()}, + } + blkB := &testBlock{ + hash: []byte("block-B"), + height: 2, + txs: []block.Transaction{shared()}, + } + requireNoError(t, db.WriteBlock(ctx, blkA)) + requireNoError(t, db.WriteBlock(ctx, blkB)) + + // Both blocks present, no results yet → found, empty results. + _, results, found, err := db.GetTransactionByHash(ctx, []byte(txHash)) + requireNoError(t, err) + requireTrue(t, found, "expected found") + requireTrue(t, len(results) == 0, "expected 0 results pre-SetTransactionResults, got %d", len(results)) + + // Attach results: A gets "result-A", B gets "result-B". + requireNoError(t, db.SetTransactionResults(ctx, blkA.Hash(), []block.Result{ + testResult{bytes: []byte("result-A"), height: 1, index: 0}, + })) + requireNoError(t, db.SetTransactionResults(ctx, blkB.Hash(), []block.Result{ + testResult{bytes: []byte("result-B"), height: 2, index: 0}, + })) + + // Both results reachable. + _, results, found, err = db.GetTransactionByHash(ctx, []byte(txHash)) + requireNoError(t, err) + requireTrue(t, found, "expected found") + requireTrue(t, len(results) == 2, "expected 2 results, got %d", len(results)) + seen := map[string]uint64{} + for _, r := range results { + seen[string(r.Bytes())] = r.Height() + } + requireTrue(t, seen["result-A"] == 1, "expected result-A at height 1, got %v", seen["result-A"]) + requireTrue(t, seen["result-B"] == 2, "expected result-B at height 2, got %v", seen["result-B"]) + + // Prune block A; B's result remains reachable. + requireNoError(t, db.Prune(ctx, 2)) + _, results, found, err = db.GetTransactionByHash(ctx, []byte(txHash)) + requireNoError(t, err) + requireTrue(t, found, "expected found after prune A") + requireTrue(t, len(results) == 1, "expected 1 result after prune A, got %d", len(results)) + requireBytesEqual(t, []byte("result-B"), results[0].Bytes(), "remaining result must be B") + requireTrue(t, results[0].Height() == 2, "remaining result height must be 2") + + // Prune block B; tx is now unknown (entire entry collected). + requireNoError(t, db.Prune(ctx, 3)) + _, _, found, err = db.GetTransactionByHash(ctx, []byte(txHash)) + requireNoError(t, err) + requireTrue(t, !found, "expected tx unknown after pruning all blocks containing it") + }) +} + func TestSetTransactionResultsErrors(t *testing.T) { forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { ctx := context.Background() @@ -253,7 +339,7 @@ func TestSetTransactionResultsErrors(t *testing.T) { // Mismatched count. blk := makeBlock(1, 2) requireNoError(t, db.WriteBlock(ctx, blk)) - err = db.SetTransactionResults(ctx, blk.Hash(), []block.Result{testResult{bytes: []byte("only-one")}}) + err = db.SetTransactionResults(ctx, blk.Hash(), []block.Result{testResult{bytes: []byte("only-one"), height: 1, index: 0}}) requireTrue(t, err != nil, "expected error for mismatched result count") }) } @@ -295,9 +381,9 @@ func TestPrunePreservesUnprunedTransactions(t *testing.T) { requireNoError(t, db.Prune(ctx, 2)) for _, tx := range survivingBlock.Transactions() { - _, ok, err := db.GetTransactionByHash(ctx, tx.Hash()) + _, _, found, err := db.GetTransactionByHash(ctx, tx.Hash()) requireNoError(t, err) - requireTrue(t, ok, "expected transaction %s to survive pruning", tx.Hash()) + requireTrue(t, found, "expected transaction %s to survive pruning", tx.Hash()) } }) } @@ -343,9 +429,9 @@ func TestCloseAndReopen(t *testing.T) { requireBlockEqual(t, blk, got) for _, tx := range blk.Transactions() { - gotTx, ok, err := db2.GetTransactionByHash(ctx, tx.Hash()) + gotTx, _, found, err := db2.GetTransactionByHash(ctx, tx.Hash()) requireNoError(t, err) - requireTrue(t, ok, "expected tx to survive close/reopen") + requireTrue(t, found, "expected tx to survive close/reopen") requireBytesEqual(t, tx.Bytes(), gotTx.Bytes(), "transaction data") } }) @@ -420,9 +506,9 @@ func TestBulkWriteAndQuery(t *testing.T) { requireBlockEqual(t, expected, byHash) for _, expectedTx := range expected.Transactions() { - gotTx, ok, err := db.GetTransactionByHash(ctx, expectedTx.Hash()) + gotTx, _, found, err := db.GetTransactionByHash(ctx, expectedTx.Hash()) requireNoError(t, err) - requireTrue(t, ok, "tx not found by hash %x (block height %d)", expectedTx.Hash(), expected.Height()) + requireTrue(t, found, "tx not found by hash %x (block height %d)", expectedTx.Hash(), expected.Height()) requireBytesEqual(t, expectedTx.Hash(), gotTx.Hash(), "tx hash") requireBytesEqual(t, expectedTx.Bytes(), gotTx.Bytes(), "tx data") } @@ -438,7 +524,7 @@ func makeRandomBlock(rng *crand.CannedRandom, height uint64, numTxs int) *testBl txHash := rng.Address('t', int64(height)*1000+int64(i), 32) txDataLen := 64 + int(rng.Int64Range(0, 512)) txData := copyBytes(rng.Bytes(txDataLen)) - txs[i] = &testTx{hash: txHash, bytes: txData, height: height, index: uint32(i)} //nolint:gosec + txs[i] = &testTx{hash: txHash, bytes: txData} } blockHash := rng.Address('b', int64(height), 32) diff --git a/sei-db/ledger_db/block/blocksim/block_generator.go b/sei-db/ledger_db/block/blocksim/block_generator.go index 2763830496..b08b6de190 100644 --- a/sei-db/ledger_db/block/blocksim/block_generator.go +++ b/sei-db/ledger_db/block/blocksim/block_generator.go @@ -15,17 +15,12 @@ const ( // genTx is a synthetic transaction that satisfies block.Transaction. type genTx struct { - hash []byte - bytes []byte - height uint64 - index uint32 + hash []byte + bytes []byte } -func (t *genTx) Hash() []byte { return t.hash } -func (t *genTx) Bytes() []byte { return t.bytes } -func (t *genTx) Result() ([]byte, bool) { return nil, false } -func (t *genTx) Height() uint64 { return t.height } -func (t *genTx) Index() uint32 { return t.index } +func (t *genTx) Hash() []byte { return t.hash } +func (t *genTx) Bytes() []byte { return t.bytes } // genBlock is a synthetic block that satisfies block.Block. extra is held to // simulate block-level metadata bytes — the BlockDB contract has no field for @@ -106,10 +101,8 @@ func (g *BlockGenerator) buildBlock() *genBlock { for i := uint64(0); i < g.config.TransactionsPerBlock; i++ { txID := int64(height)*int64(g.config.TransactionsPerBlock) + int64(i) //nolint:gosec txs[i] = &genTx{ - hash: g.rand.Address(txHashType, txID, int(g.config.TransactionHashSize)), //nolint:gosec - bytes: g.rand.Bytes(int(g.config.BytesPerTransaction)), //nolint:gosec - height: height, - index: uint32(i), //nolint:gosec + hash: g.rand.Address(txHashType, txID, int(g.config.TransactionHashSize)), //nolint:gosec + bytes: g.rand.Bytes(int(g.config.BytesPerTransaction)), //nolint:gosec } } diff --git a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go index 0bb3e3a4a6..e62fe3d153 100644 --- a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go +++ b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go @@ -8,20 +8,51 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block" ) +// resultInstance holds the per-block-occurrence data for one tx hash: +// where the tx landed (height + index in that block) and, once +// SetTransactionResults has run, the marshaled execution result. bytes is +// nil while the entry is "pending" (block written, results not yet attached); +// composedResult on read filters those out. +type resultInstance struct { + height uint64 + index uint32 + bytes []byte // nil if no result attached yet +} + +// txEntry holds the invariant tx body once per hash, plus a per-block-hash +// map of resultInstance recording every block this tx appeared in. +type txEntry struct { + tx block.Transaction + instances map[string]*resultInstance // blockHash -> instance +} + +// composedResult adapts a resultInstance into the block.Result interface for +// the read path. It's value-typed and held briefly per call so allocations +// stay cheap. +type composedResult struct { + height uint64 + index uint32 + bytes []byte +} + +func (r composedResult) Bytes() []byte { return r.bytes } +func (r composedResult) Height() uint64 { return r.height } +func (r composedResult) Index() uint32 { return r.index } + // Shared backing store, keyed by path in test builders to simulate restarts. type memBlockDBData struct { mu sync.RWMutex blocksByHash map[string]block.Block blocksByHeight map[uint64]block.Block - txByHash map[string]block.Transaction - // txResultByHash holds result bytes set by SetTransactionResults. Kept - // separate from txByHash so writes (block) and result-attachment - // (post-execution) stay independent — a Transaction read before its - // block executes returns nil from Result() rather than blocking. - txResultByHash map[string][]byte - lowestHeight uint64 - highestHeight uint64 - hasBlocks bool + // txEntries is the two-level index: tx hash -> per-block instances. + // Same tx hash appearing in multiple blocks (different lanes producing + // the same tx) gets one entry per block in the inner map; pruning a + // single block only removes that block's instance and leaves siblings + // intact. + txEntries map[string]*txEntry + lowestHeight uint64 + highestHeight uint64 + hasBlocks bool } // An in-memory implementation of the BlockDB interface. Useful as a test fixture to sanity check @@ -36,8 +67,7 @@ func NewMemBlockDB() block.BlockDB { data: &memBlockDBData{ blocksByHash: make(map[string]block.Block), blocksByHeight: make(map[uint64]block.Block), - txByHash: make(map[string]block.Transaction), - txResultByHash: make(map[string][]byte), + txEntries: make(map[string]*txEntry), }, } } @@ -48,10 +78,27 @@ func (m *memBlockDB) WriteBlock(_ context.Context, blk block.Block) error { defer d.mu.Unlock() height := blk.Height() - d.blocksByHash[string(blk.Hash())] = blk + blockHashKey := string(blk.Hash()) + d.blocksByHash[blockHashKey] = blk d.blocksByHeight[height] = blk - for _, tx := range blk.Transactions() { - d.txByHash[string(tx.Hash())] = tx + for i, tx := range blk.Transactions() { + hashKey := string(tx.Hash()) + entry, ok := d.txEntries[hashKey] + if !ok { + // First time we've seen this tx — record the canonical body. + entry = &txEntry{ + tx: tx, + instances: make(map[string]*resultInstance), + } + d.txEntries[hashKey] = entry + } + // Register a pending instance for this block, even if we've recorded + // the same tx hash for another block already. SetTransactionResults + // will fill in bytes later. + entry.instances[blockHashKey] = &resultInstance{ + height: height, + index: uint32(i), //nolint:gosec // tx index fits in uint32 (block tx count is bounded). + } } if !d.hasBlocks { @@ -70,6 +117,10 @@ func (m *memBlockDB) WriteBlock(_ context.Context, blk block.Block) error { } func (m *memBlockDB) SetTransactionResults(_ context.Context, blockHash []byte, results []block.Result) error { + // Marshal happens via results[i].Bytes(). The Result interface contract + // permits this to be cheap (typically a single proto Marshal of an + // already-built message), so we call it inside the write lock without + // pre-buffering. d := m.data d.mu.Lock() defer d.mu.Unlock() @@ -82,11 +133,19 @@ func (m *memBlockDB) SetTransactionResults(_ context.Context, blockHash []byte, if len(txs) != len(results) { return fmt.Errorf("%w: block has %d txs, got %d results", block.ErrResultCountMismatch, len(txs), len(results)) } + blockHashKey := string(blockHash) for i, tx := range txs { - // Eager copy of the result bytes so callers can release the source - // adapter (and the underlying *abci.ExecTxResult) immediately after - // SetTransactionResults returns. - d.txResultByHash[string(tx.Hash())] = results[i].Bytes() + entry, ok := d.txEntries[string(tx.Hash())] + if !ok { + // Defensive: WriteBlock should have created this entry. If it + // didn't, the index is corrupted — surface loudly. + return fmt.Errorf("internal: tx index missing entry for tx %x in block %x", tx.Hash(), blockHash) + } + inst, ok := entry.instances[blockHashKey] + if !ok { + return fmt.Errorf("internal: tx index missing instance for tx %x in block %x", tx.Hash(), blockHash) + } + inst.bytes = results[i].Bytes() } return nil } @@ -113,28 +172,30 @@ func (m *memBlockDB) GetBlockByHeight(_ context.Context, height uint64) (block.B return blk, ok, nil } -// composedTx layers a separately-stored result on top of a stored Transaction. -// Hash/Bytes/Height/Index come from the original Transaction; Result reflects -// whether SetTransactionResults has run for the parent block. -type composedTx struct { - block.Transaction - result []byte - hasResult bool -} - -func (c composedTx) Result() ([]byte, bool) { return c.result, c.hasResult } - -func (m *memBlockDB) GetTransactionByHash(_ context.Context, hash []byte) (block.Transaction, bool, error) { +func (m *memBlockDB) GetTransactionByHash(_ context.Context, hash []byte) (block.Transaction, []block.Result, bool, error) { d := m.data d.mu.RLock() defer d.mu.RUnlock() - tx, ok := d.txByHash[string(hash)] + entry, ok := d.txEntries[string(hash)] if !ok { - return nil, false, nil + return nil, nil, false, nil + } + // Build the slice with only attached-result instances; pending entries + // (bytes==nil) are filtered out so callers get exactly the executions. + // Pre-size to len(instances) — typically 1, occasionally 2-3. + results := make([]block.Result, 0, len(entry.instances)) + for _, inst := range entry.instances { + if inst.bytes == nil { + continue + } + results = append(results, composedResult{ + height: inst.height, + index: inst.index, + bytes: inst.bytes, + }) } - result, hasResult := d.txResultByHash[string(hash)] - return composedTx{Transaction: tx, result: result, hasResult: hasResult}, true, nil + return entry.tx, results, true, nil } func (m *memBlockDB) Prune(_ context.Context, lowestHeightToKeep uint64) error { @@ -152,10 +213,20 @@ func (m *memBlockDB) Prune(_ context.Context, lowestHeightToKeep uint64) error { continue } delete(d.blocksByHeight, h) - delete(d.blocksByHash, string(blk.Hash())) + blockHashKey := string(blk.Hash()) + delete(d.blocksByHash, blockHashKey) for _, tx := range blk.Transactions() { - delete(d.txByHash, string(tx.Hash())) - delete(d.txResultByHash, string(tx.Hash())) + hashKey := string(tx.Hash()) + entry, ok := d.txEntries[hashKey] + if !ok { + continue + } + // Only remove the instance for the block being pruned; other + // blocks containing the same tx hash stay reachable. + delete(entry.instances, blockHashKey) + if len(entry.instances) == 0 { + delete(d.txEntries, hashKey) + } } } diff --git a/sei-db/ledger_db/block/mem_block_db/mem_block_db_test.go b/sei-db/ledger_db/block/mem_block_db/mem_block_db_test.go index 56504d514e..edec11cede 100644 --- a/sei-db/ledger_db/block/mem_block_db/mem_block_db_test.go +++ b/sei-db/ledger_db/block/mem_block_db/mem_block_db_test.go @@ -10,19 +10,12 @@ import ( ) type testTx struct { - hash []byte - bytes []byte - result []byte - hasResult bool - height uint64 - index uint32 + hash []byte + bytes []byte } -func (t *testTx) Hash() []byte { return t.hash } -func (t *testTx) Bytes() []byte { return t.bytes } -func (t *testTx) Result() ([]byte, bool) { return t.result, t.hasResult } -func (t *testTx) Height() uint64 { return t.height } -func (t *testTx) Index() uint32 { return t.index } +func (t *testTx) Hash() []byte { return t.hash } +func (t *testTx) Bytes() []byte { return t.bytes } type testBlock struct { hash []byte @@ -40,10 +33,8 @@ func makeBlock(height uint64, numTxs int) block.Block { txs := make([]block.Transaction, numTxs) for i := 0; i < numTxs; i++ { txs[i] = &testTx{ - hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), - bytes: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), - height: height, - index: uint32(i), //nolint:gosec + hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), + bytes: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), } } return &testBlock{ diff --git a/sei-tendermint/internal/p2p/giga_blockdb.go b/sei-tendermint/internal/p2p/giga_blockdb.go index ceb8b4bd8a..534cf0f6c9 100644 --- a/sei-tendermint/internal/p2p/giga_blockdb.go +++ b/sei-tendermint/internal/p2p/giga_blockdb.go @@ -28,45 +28,39 @@ func (a globalBlockAdapter) Time() time.Time { return a.gb.Timestamp } func (a globalBlockAdapter) Transactions() []block.Transaction { txs := a.gb.Payload.Txs() - height := uint64(a.gb.GlobalNumber) out := make([]block.Transaction, len(txs)) for i, tx := range txs { out[i] = txAdapter{ - hash: tmhash.Sum(tx), - bytes: tx, - height: height, - index: uint32(i), //nolint:gosec + hash: tmhash.Sum(tx), + bytes: tx, } } return out } -// txAdapter wraps a single Autobahn tx + its CometBFT-style hash + its -// position so it satisfies block.Transaction. Result() is always nil at -// WriteBlock time — execution results are attached later via -// BlockDB.SetTransactionResults, and surfaced through mem_block_db's -// composedTx wrapper on read. +// txAdapter wraps a single Autobahn tx + its CometBFT-style hash so it +// satisfies block.Transaction. The interface only carries the invariant +// tx body — per-block-instance data (height, index, result) lives on +// block.Result, attached separately via SetTransactionResults. type txAdapter struct { - hash []byte - bytes []byte - height uint64 - index uint32 + hash []byte + bytes []byte } -func (t txAdapter) Hash() []byte { return t.hash } -func (t txAdapter) Bytes() []byte { return t.bytes } -func (t txAdapter) Result() ([]byte, bool) { return nil, false } -func (t txAdapter) Height() uint64 { return t.height } -func (t txAdapter) Index() uint32 { return t.index } +func (t txAdapter) Hash() []byte { return t.hash } +func (t txAdapter) Bytes() []byte { return t.bytes } -// execResultAdapter wraps *abci.ExecTxResult so it satisfies block.Result. -// Marshal happens lazily on Bytes(); the typical caller is mem_block_db's -// SetTransactionResults, which calls Bytes() exactly once and then drops -// the adapter. ExecTxResult is gogoproto-generated so it carries its own -// Marshal method and never fails on a well-formed message — we OrPanic to -// surface the impossible case loudly rather than silently dropping a result. +// execResultAdapter wraps *abci.ExecTxResult plus its block height + +// position so it satisfies block.Result. Marshal happens lazily on +// Bytes(); the typical caller is mem_block_db's SetTransactionResults, +// which calls Bytes() exactly once and then drops the adapter. +// ExecTxResult is gogoproto-generated so it carries its own Marshal +// method and never fails on a well-formed message — we OrPanic to surface +// the impossible case loudly rather than silently dropping a result. type execResultAdapter struct { - r *abci.ExecTxResult + r *abci.ExecTxResult + height uint64 + index uint32 } func (a execResultAdapter) Bytes() []byte { @@ -75,3 +69,6 @@ func (a execResultAdapter) Bytes() []byte { } return utils.OrPanic1(a.r.Marshal()) } + +func (a execResultAdapter) Height() uint64 { return a.height } +func (a execResultAdapter) Index() uint32 { return a.index } diff --git a/sei-tendermint/internal/p2p/giga_router.go b/sei-tendermint/internal/p2p/giga_router.go index 994c8153b1..80154fc8f9 100644 --- a/sei-tendermint/internal/p2p/giga_router.go +++ b/sei-tendermint/internal/p2p/giga_router.go @@ -135,8 +135,8 @@ func (r *GigaRouter) LastCommittedBlockNumber() int64 { } // ErrTxResultPending is returned by Tx when a transaction is known -// (its parent block has been written to BlockDB) but the per-tx execution -// result hasn't been attached yet — the window between WriteBlock and +// (its parent block has been written to BlockDB) but no execution +// result has been attached yet — the window between WriteBlock and // SetTransactionResults inside runExecute. Distinct from "not found" // because the tx is real and the caller should retry, not give up. // Callers that don't care can errors.Is-check to fold it into a generic @@ -154,32 +154,66 @@ var ErrTxResultPending = errors.New("transaction result not yet recorded") // req.Prove is intentionally not honored — Autobahn doesn't materialize // types.TxProof, and tooling that needs it falls back to the CometBFT path. // -// Returns ErrTxResultPending when the parent block exists but -// SetTransactionResults hasn't run for it yet. Returning a zero-result -// ResultTx in that window would be indistinguishable from a successful -// tx with Code=0 and no events, which would mislead broadcast_tx_commit -// pollers into thinking a not-yet-executed tx had succeeded. +// When the same tx hash was included in multiple blocks (different lanes +// producing the same tx), BlockDB returns every recorded execution; we +// pick the canonical one here. Order of preference: +// 1. The lowest-height execution with Code == abci.CodeTypeOK (a tx is +// expected to succeed at most once across the chain). +// 2. Otherwise the highest-height failure (most recent attempt). +// 3. If no executions are recorded but the tx hash is known to BlockDB, +// return ErrTxResultPending — distinguishes "may retry" from +// "definitely doesn't exist". func (r *GigaRouter) Tx(ctx context.Context, hash []byte) (*coretypes.ResultTx, error) { - tx, ok, err := r.blockDB.GetTransactionByHash(ctx, hash) + tx, results, found, err := r.blockDB.GetTransactionByHash(ctx, hash) if err != nil { return nil, fmt.Errorf("blockDB.GetTransactionByHash: %w", err) } - if !ok { + if !found { return nil, fmt.Errorf("tx (%X) not found", hash) } - rb, hasResult := tx.Result() - if !hasResult { + if len(results) == 0 { return nil, fmt.Errorf("tx (%X): %w", hash, ErrTxResultPending) } - var result abci.ExecTxResult - if err := result.Unmarshal(rb); err != nil { - return nil, fmt.Errorf("unmarshal tx result: %w", err) + + // Pick the canonical execution. Unmarshal each result once to read + // Code; the multi-result case is rare so the per-call cost is small. + var ( + successful *abci.ExecTxResult + successRes block.Result + failure *abci.ExecTxResult + failureRes block.Result + ) + for _, res := range results { + var parsed abci.ExecTxResult + if err := parsed.Unmarshal(res.Bytes()); err != nil { + return nil, fmt.Errorf("unmarshal tx result (block height %d): %w", res.Height(), err) + } + if parsed.Code == abci.CodeTypeOK { + if successful == nil || res.Height() < successRes.Height() { + p := parsed + successful = &p + successRes = res + } + continue + } + if failure == nil || res.Height() > failureRes.Height() { + p := parsed + failure = &p + failureRes = res + } + } + + chosenResult := successful + chosenRes := successRes + if chosenResult == nil { + chosenResult = failure + chosenRes = failureRes } return &coretypes.ResultTx{ Hash: hash, - Height: utils.Clamp[int64](tx.Height()), - Index: tx.Index(), - TxResult: result, + Height: utils.Clamp[int64](chosenRes.Height()), + Index: chosenRes.Index(), + TxResult: *chosenResult, Tx: tx.Bytes(), }, nil } @@ -415,10 +449,18 @@ func (r *GigaRouter) runExecute(ctx context.Context) error { // above, so RPC consumers (env.Tx) can return them by tx hash. // Wrapping each *abci.ExecTxResult in execResultAdapter keeps // sei-db chain-agnostic — marshaling happens inside the adapter. + // Result.Height/Index reflect this block's height + the tx's + // position so per-block-instance metadata travels with the result + // (the same tx hash can land in different positions across lane + // blocks). blockHash := b.Header.Hash() results := make([]block.Result, len(txResults)) - for i, r := range txResults { - results[i] = execResultAdapter{r: r} + for i, txResult := range txResults { + results[i] = execResultAdapter{ + r: txResult, + height: uint64(b.GlobalNumber), + index: uint32(i), //nolint:gosec // tx index fits in uint32 (block tx count is bounded). + } } if err := r.blockDB.SetTransactionResults(ctx, blockHash.Bytes(), results); err != nil { return fmt.Errorf("r.blockDB.SetTransactionResults(%v): %w", n, err) diff --git a/sei-tendermint/internal/p2p/giga_router_test.go b/sei-tendermint/internal/p2p/giga_router_test.go index d739c9cbe7..6d9cb06e08 100644 --- a/sei-tendermint/internal/p2p/giga_router_test.go +++ b/sei-tendermint/internal/p2p/giga_router_test.go @@ -436,21 +436,16 @@ func TestGigaRouter_FinalizeBlocks(t *testing.T) { require.NoError(t, err) } -// txStub / blockStub / resultStub are minimal block.Block / block.Transaction -// / block.Result implementations for unit-testing GigaRouter.Tx in isolation -// — without spinning up the full consensus harness used by -// TestGigaRouter_FinalizeBlocks. +// txStub / blockStub / resultStub are minimal block.Transaction / +// block.Block / block.Result implementations for unit-testing +// GigaRouter.Tx in isolation — without spinning up the full consensus +// harness used by TestGigaRouter_FinalizeBlocks. type txStub struct { hash, bytes []byte - height uint64 - index uint32 } -func (t txStub) Hash() []byte { return t.hash } -func (t txStub) Bytes() []byte { return t.bytes } -func (t txStub) Result() ([]byte, bool) { return nil, false } -func (t txStub) Height() uint64 { return t.height } -func (t txStub) Index() uint32 { return t.index } +func (t txStub) Hash() []byte { return t.hash } +func (t txStub) Bytes() []byte { return t.bytes } type blockStub struct { hash []byte @@ -463,13 +458,26 @@ func (b blockStub) Height() uint64 { return b.height } func (b blockStub) Time() time.Time { return time.Time{} } func (b blockStub) Transactions() []block.Transaction { return b.txs } -type resultStub struct{ b []byte } +type resultStub struct { + b []byte + height uint64 + index uint32 +} -func (r resultStub) Bytes() []byte { return r.b } +func (r resultStub) Bytes() []byte { return r.b } +func (r resultStub) Height() uint64 { return r.height } +func (r resultStub) Index() uint32 { return r.index } + +// marshaledExecResult is a tiny helper that returns a marshaled +// abci.ExecTxResult with the given Code — saves repetitive OrPanic1 +// boilerplate in the per-test setup. +func marshaledExecResult(code uint32) []byte { + return utils.OrPanic1((&abci.ExecTxResult{Code: code}).Marshal()) +} -// TestGigaRouter_TxResultPending pins the contract from finding (1) of the -// branch review: a tx whose parent block has been written to BlockDB but -// whose execution results have not yet been attached must surface as +// TestGigaRouter_TxResultPending pins the contract from review finding +// (1): a tx whose parent block has been written to BlockDB but whose +// execution results have not yet been attached must surface as // ErrTxResultPending — never as a zero-result success that // broadcast_tx_commit pollers would mistake for an executed tx. func TestGigaRouter_TxResultPending(t *testing.T) { @@ -479,10 +487,8 @@ func TestGigaRouter_TxResultPending(t *testing.T) { r := &GigaRouter{blockDB: blockDB} tx := txStub{ - hash: []byte("hash-of-tx-1"), - bytes: []byte("payload-1"), - height: 5, - index: 0, + hash: []byte("hash-of-tx-1"), + bytes: []byte("payload-1"), } blk := blockStub{ hash: []byte("block-A"), @@ -503,8 +509,9 @@ func TestGigaRouter_TxResultPending(t *testing.T) { // After SetTransactionResults, Tx returns the translated ResultTx. wantCode := uint32(7) // arbitrary non-zero code: confirms the result actually round-trips. - resultBytes := utils.OrPanic1((&abci.ExecTxResult{Code: wantCode}).Marshal()) - require.NoError(t, blockDB.SetTransactionResults(ctx, blk.hash, []block.Result{resultStub{b: resultBytes}})) + require.NoError(t, blockDB.SetTransactionResults(ctx, blk.hash, []block.Result{ + resultStub{b: marshaledExecResult(wantCode), height: 5, index: 0}, + })) rt, err := r.Tx(ctx, tx.hash) require.NoError(t, err, "Tx after SetTransactionResults") @@ -514,3 +521,74 @@ func TestGigaRouter_TxResultPending(t *testing.T) { require.Equal(t, tx.bytes, rt.Tx) require.Equal(t, wantCode, rt.TxResult.Code) } + +// TestGigaRouter_TxMultipleBlocks_PrefersSuccess pins review finding (2): +// the same tx hash included in two different blocks must keep both +// executions reachable, and Tx() must canonicalize on the successful +// one regardless of insertion order. +func TestGigaRouter_TxMultipleBlocks_PrefersSuccess(t *testing.T) { + ctx := t.Context() + + blockDB := memblockdb.NewMemBlockDB() + r := &GigaRouter{blockDB: blockDB} + + const txHash = "shared-hash" + const txBytes = "shared-data" + shared := func() block.Transaction { + return txStub{hash: []byte(txHash), bytes: []byte(txBytes)} + } + // A is written first and fails; B is written second and succeeds. + // The "successful wins" rule must beat insertion order. + blkA := blockStub{hash: []byte("block-A"), height: 11, txs: []block.Transaction{shared()}} + blkB := blockStub{hash: []byte("block-B"), height: 22, txs: []block.Transaction{shared()}} + require.NoError(t, blockDB.WriteBlock(ctx, blkA)) + require.NoError(t, blockDB.WriteBlock(ctx, blkB)) + + const failCode = uint32(7) + require.NoError(t, blockDB.SetTransactionResults(ctx, blkA.hash, []block.Result{ + resultStub{b: marshaledExecResult(failCode), height: 11, index: 0}, + })) + require.NoError(t, blockDB.SetTransactionResults(ctx, blkB.hash, []block.Result{ + resultStub{b: marshaledExecResult(abci.CodeTypeOK), height: 22, index: 0}, + })) + + rt, err := r.Tx(ctx, []byte(txHash)) + require.NoError(t, err) + require.Equal(t, int64(22), rt.Height, "expected canonical execution from successful block") + require.Equal(t, abci.CodeTypeOK, rt.TxResult.Code) +} + +// TestGigaRouter_TxMultipleBlocks_FallsBackToLatestFailure pins the +// "no successful execution" branch of the selection rule: when every +// recorded execution is a failure, Tx() returns the highest-height +// failure (the most recent attempt). +func TestGigaRouter_TxMultipleBlocks_FallsBackToLatestFailure(t *testing.T) { + ctx := t.Context() + + blockDB := memblockdb.NewMemBlockDB() + r := &GigaRouter{blockDB: blockDB} + + const txHash = "shared-hash" + const txBytes = "shared-data" + shared := func() block.Transaction { + return txStub{hash: []byte(txHash), bytes: []byte(txBytes)} + } + blkA := blockStub{hash: []byte("block-A"), height: 11, txs: []block.Transaction{shared()}} + blkB := blockStub{hash: []byte("block-B"), height: 22, txs: []block.Transaction{shared()}} + require.NoError(t, blockDB.WriteBlock(ctx, blkA)) + require.NoError(t, blockDB.WriteBlock(ctx, blkB)) + + const failA = uint32(5) + const failB = uint32(7) + require.NoError(t, blockDB.SetTransactionResults(ctx, blkA.hash, []block.Result{ + resultStub{b: marshaledExecResult(failA), height: 11, index: 0}, + })) + require.NoError(t, blockDB.SetTransactionResults(ctx, blkB.hash, []block.Result{ + resultStub{b: marshaledExecResult(failB), height: 22, index: 0}, + })) + + rt, err := r.Tx(ctx, []byte(txHash)) + require.NoError(t, err) + require.Equal(t, int64(22), rt.Height, "expected highest-height failure") + require.Equal(t, failB, rt.TxResult.Code) +} From ca88c1a8ec98e3ba629a810c81a5f67912e0c73b Mon Sep 17 00:00:00 2001 From: Wen Date: Thu, 7 May 2026 17:13:14 -0700 Subject: [PATCH 6/9] =?UTF-8?q?sei-db,=20giga:=20review=20cleanup=20?= =?UTF-8?q?=E2=80=94=20interface=20docs,=20perf,=20comment=20freshness=20(?= =?UTF-8?q?CON-256)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Addresses review findings 3-5 plus the doc gaps flagged on the Block/Result interfaces: (3) Stale comment in giga_router.go referenced data.State's prior blockHashes hash index — the index was removed in commit 1. Rewritten to describe the current contract (BlockDB, RPC unknown- hash semantics) without the dangling reference. (4) globalBlockAdapter.Transactions() used to allocate a fresh []block.Transaction and re-sha256 every payload tx on every call. mem_block_db calls it on WriteBlock, SetTransactionResults, and Prune — the Prune call happens under the write lock. Switched to a newGlobalBlockAdapter constructor that builds the slice once and caches it; Transactions() now returns the cached slice. (5) mem_block_db.SetTransactionResults marshaled each result via Result.Bytes() inside the write lock. For a 1000-tx block that's ~MB of proto Marshal blocking every concurrent reader. Pre-marshal happens before acquiring the lock; the lock now holds only the map writes. The error paths (unknown block, count mismatch) waste the pre-marshal but are exceptional. Doc updates: - Block.Transactions: contract pinned ("must be cheap to call repeatedly; backends may call it more than once"). - Result.Bytes: lifecycle pinned ("backends call it exactly once per result inside SetTransactionResults and cache the bytes"). New test: TestSetTransactionResultsOverwrites — exercises the documented "second call replaces" semantics, useful for callers that re-execute on recovery. Co-Authored-By: Claude Opus 4.7 (1M context) --- sei-db/ledger_db/block/block_db.go | 21 +++++++---- .../block/block_db_test/block_db_test.go | 37 +++++++++++++++++++ .../block/mem_block_db/mem_block_db.go | 20 ++++++---- sei-tendermint/internal/p2p/giga_blockdb.go | 33 +++++++++++------ sei-tendermint/internal/p2p/giga_router.go | 21 ++++++----- 5 files changed, 97 insertions(+), 35 deletions(-) diff --git a/sei-db/ledger_db/block/block_db.go b/sei-db/ledger_db/block/block_db.go index 0771937cef..a0b7544b9e 100644 --- a/sei-db/ledger_db/block/block_db.go +++ b/sei-db/ledger_db/block/block_db.go @@ -31,8 +31,12 @@ type Transaction interface { // Block is the BlockDB's view of a finalized block. The interface intentionally // exposes only what BlockDB itself needs to index and serve reads — backends -// must not assume any particular concrete implementation. Methods returning -// slices may allocate; callers that index repeatedly should cache the result. +// must not assume any particular concrete implementation. +// +// Backends are permitted to call Transactions() multiple times across the +// block's lifetime in storage (WriteBlock, SetTransactionResults validation, +// Prune). Implementations that pay a non-trivial cost per call (allocation, +// hashing) should memoize the result at construction. type Block interface { // Hash returns the canonical block hash used for indexing. Hash() []byte @@ -40,7 +44,8 @@ type Block interface { Height() uint64 // Time returns the block timestamp. Time() time.Time - // Transactions returns the block's transactions in order. + // Transactions returns the block's transactions in order. Must be cheap + // to call repeatedly — backends may call it more than once per block. Transactions() []Transaction } @@ -51,10 +56,12 @@ type Block interface { // GetTransactionByHash. // // The interface stays chain-agnostic — callers wrap their concrete result -// types (e.g. abci.ExecTxResult) in a small adapter. Bytes() is permitted -// to return the wire encoding lazily; backends that need to copy/index -// will call it during SetTransactionResults under the assumption it is -// inexpensive (typically a single proto Marshal). +// types (e.g. abci.ExecTxResult) in a small adapter. +// +// Bytes() may run a non-trivial proto Marshal; backends call it exactly +// once per result inside SetTransactionResults and cache the bytes for +// the lifetime of the entry. Adapters can therefore be cheap value types +// that defer Marshal until Bytes() is called and then can be discarded. type Result interface { // Bytes returns the marshaled execution result for one transaction. Bytes() []byte diff --git a/sei-db/ledger_db/block/block_db_test/block_db_test.go b/sei-db/ledger_db/block/block_db_test/block_db_test.go index 555c62bc31..75ad235158 100644 --- a/sei-db/ledger_db/block/block_db_test/block_db_test.go +++ b/sei-db/ledger_db/block/block_db_test/block_db_test.go @@ -325,6 +325,43 @@ func TestTransactionMultipleBlocks(t *testing.T) { }) } +// TestSetTransactionResultsOverwrites pins the documented "second call +// overwrites" behavior — useful for callers that re-execute a block on +// recovery and expect the latest results to win. +func TestSetTransactionResultsOverwrites(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + blk := makeBlock(3, 2) + requireNoError(t, db.WriteBlock(ctx, blk)) + + // First attach: "old-N". + first := []block.Result{ + testResult{bytes: []byte("old-0"), height: 3, index: 0}, + testResult{bytes: []byte("old-1"), height: 3, index: 1}, + } + requireNoError(t, db.SetTransactionResults(ctx, blk.Hash(), first)) + + // Second attach: "new-N" — must replace. + second := []block.Result{ + testResult{bytes: []byte("new-0"), height: 3, index: 0}, + testResult{bytes: []byte("new-1"), height: 3, index: 1}, + } + requireNoError(t, db.SetTransactionResults(ctx, blk.Hash(), second)) + + for i, tx := range blk.Transactions() { + _, results, found, err := db.GetTransactionByHash(ctx, tx.Hash()) + requireNoError(t, err) + requireTrue(t, found, "expected tx %s found", tx.Hash()) + requireTrue(t, len(results) == 1, "expected 1 result, got %d", len(results)) + requireBytesEqual(t, second[i].Bytes(), results[0].Bytes(), fmt.Sprintf("tx[%d] result must reflect overwrite", i)) + } + }) +} + func TestSetTransactionResultsErrors(t *testing.T) { forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { ctx := context.Background() diff --git a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go index e62fe3d153..b4b57ead04 100644 --- a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go +++ b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go @@ -117,10 +117,16 @@ func (m *memBlockDB) WriteBlock(_ context.Context, blk block.Block) error { } func (m *memBlockDB) SetTransactionResults(_ context.Context, blockHash []byte, results []block.Result) error { - // Marshal happens via results[i].Bytes(). The Result interface contract - // permits this to be cheap (typically a single proto Marshal of an - // already-built message), so we call it inside the write lock without - // pre-buffering. + // Pre-marshal each result outside the lock. Result.Bytes() may run a + // proto Marshal — for a 1000-tx block that's ~MB of CPU work, which + // we don't want to do under the write lock blocking every reader. The + // extra cost is wasted on the rare error paths (unknown block, + // count mismatch) but those are exceptional. + bytesByIdx := make([][]byte, len(results)) + for i, r := range results { + bytesByIdx[i] = r.Bytes() + } + d := m.data d.mu.Lock() defer d.mu.Unlock() @@ -130,8 +136,8 @@ func (m *memBlockDB) SetTransactionResults(_ context.Context, blockHash []byte, return fmt.Errorf("%w: %x", block.ErrUnknownBlock, blockHash) } txs := blk.Transactions() - if len(txs) != len(results) { - return fmt.Errorf("%w: block has %d txs, got %d results", block.ErrResultCountMismatch, len(txs), len(results)) + if len(txs) != len(bytesByIdx) { + return fmt.Errorf("%w: block has %d txs, got %d results", block.ErrResultCountMismatch, len(txs), len(bytesByIdx)) } blockHashKey := string(blockHash) for i, tx := range txs { @@ -145,7 +151,7 @@ func (m *memBlockDB) SetTransactionResults(_ context.Context, blockHash []byte, if !ok { return fmt.Errorf("internal: tx index missing instance for tx %x in block %x", tx.Hash(), blockHash) } - inst.bytes = results[i].Bytes() + inst.bytes = bytesByIdx[i] } return nil } diff --git a/sei-tendermint/internal/p2p/giga_blockdb.go b/sei-tendermint/internal/p2p/giga_blockdb.go index 534cf0f6c9..eb4dc94868 100644 --- a/sei-tendermint/internal/p2p/giga_blockdb.go +++ b/sei-tendermint/internal/p2p/giga_blockdb.go @@ -13,8 +13,27 @@ import ( // globalBlockAdapter wraps *atypes.GlobalBlock so it satisfies block.Block // without leaking sei-db into autobahn/types. Per-tx hashes use // tmhash.Sum (sha256), matching CometBFT's tx-hash convention. +// +// txs is computed eagerly in newGlobalBlockAdapter and cached for the +// lifetime of the adapter. mem_block_db calls Transactions() multiple +// times (WriteBlock, SetTransactionResults validation, Prune); without +// the cache each call would re-allocate the slice and re-sha256 every +// payload tx — under the write lock, on the Prune path. type globalBlockAdapter struct { - gb *atypes.GlobalBlock + gb *atypes.GlobalBlock + txs []block.Transaction +} + +func newGlobalBlockAdapter(gb *atypes.GlobalBlock) globalBlockAdapter { + src := gb.Payload.Txs() + txs := make([]block.Transaction, len(src)) + for i, tx := range src { + txs[i] = txAdapter{ + hash: tmhash.Sum(tx), + bytes: tx, + } + } + return globalBlockAdapter{gb: gb, txs: txs} } func (a globalBlockAdapter) Hash() []byte { @@ -26,17 +45,7 @@ func (a globalBlockAdapter) Height() uint64 { return uint64(a.gb.GlobalNumber) } func (a globalBlockAdapter) Time() time.Time { return a.gb.Timestamp } -func (a globalBlockAdapter) Transactions() []block.Transaction { - txs := a.gb.Payload.Txs() - out := make([]block.Transaction, len(txs)) - for i, tx := range txs { - out[i] = txAdapter{ - hash: tmhash.Sum(tx), - bytes: tx, - } - } - return out -} +func (a globalBlockAdapter) Transactions() []block.Transaction { return a.txs } // txAdapter wraps a single Autobahn tx + its CometBFT-style hash so it // satisfies block.Transaction. The interface only carries the invariant diff --git a/sei-tendermint/internal/p2p/giga_router.go b/sei-tendermint/internal/p2p/giga_router.go index 80154fc8f9..1deb9a110f 100644 --- a/sei-tendermint/internal/p2p/giga_router.go +++ b/sei-tendermint/internal/p2p/giga_router.go @@ -55,13 +55,16 @@ type GigaRouter struct { service *giga.Service poolIn *giga.Pool[NodePublicKey, rpc.Server[giga.API]] poolOut *giga.Pool[NodePublicKey, rpc.Client[giga.API]] - // blockDB indexes finalized blocks by hash. Populated synchronously by - // runExecute right before each block is handed to executeBlock; read by - // BlockByHash. Today's instance is mem_block_db (in-memory), so it does - // not survive process restarts — but neither does data.State's prior - // hash index, and the read path is best-effort (CometBFT semantics for - // unknown hash is &ResultBlock{Block: nil}). Restart-safe repopulation - // belongs to a follow-up that wires a persistent BlockDB. + // blockDB indexes finalized blocks by hash and tracks per-tx execution + // results. Populated by runExecute: WriteBlock lands just before each + // block is handed to executeBlock; SetTransactionResults follows once + // FinalizeBlock returns. Read by BlockByHash and Tx. + // + // Today's instance is mem_block_db (in-memory), so it does not survive + // process restarts — RPC semantics treat that as "unknown hash" + // (BlockByHash returns &ResultBlock{Block: nil}; Tx returns + // "tx not found"). Restart-safe repopulation belongs to a follow-up + // that wires a persistent BlockDB. blockDB block.BlockDB // lastCommitQCRecv is subscribed once at construction and reused for the @@ -254,7 +257,7 @@ func (r *GigaRouter) BlockByNumber(ctx context.Context, n atypes.GlobalBlockNumb } return nil, fmt.Errorf("data.GlobalBlock(%v): %w", n, err) } - return r.translateBlock(globalBlockAdapter{gb: gb}), nil + return r.translateBlock(newGlobalBlockAdapter(gb)), nil } // BlockByHash returns the finalized global block keyed by Autobahn block- @@ -438,7 +441,7 @@ func (r *GigaRouter) runExecute(ctx context.Context) error { // BlockByHash sees the block from this point forward. The data // layer's WAL remains the primary durability story; BlockDB is the // hash index, not the source of truth on restart. - if err := r.blockDB.WriteBlock(ctx, globalBlockAdapter{gb: b}); err != nil { + if err := r.blockDB.WriteBlock(ctx, newGlobalBlockAdapter(b)); err != nil { return fmt.Errorf("r.blockDB.WriteBlock(%v): %w", n, err) } commitResp, txResults, err := r.executeBlock(ctx, b) From 5fd87e6a0f9ba5b4fda3a78ed4b07bb6cdc400f3 Mon Sep 17 00:00:00 2001 From: Wen Date: Thu, 7 May 2026 17:52:21 -0700 Subject: [PATCH 7/9] =?UTF-8?q?sei-db/mem:=20tighten=20BlockDB=20invariant?= =?UTF-8?q?s=20=E2=80=94=20idempotency,=20collision=20check,=20determinist?= =?UTF-8?q?ic=20reads=20(CON-272)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Addresses the second-round review findings on mem_block_db: (1) WriteBlock idempotency. A second WriteBlock for the same block hash used to silently overwrite the entire instances map for every tx in the block — including any results SetTransactionResults had already attached. WriteBlock now no-ops on duplicate blockHash. (2) Deterministic Tx selection on ties. mem_block_db.GetTransactionByHash iterated entry.instances in random Go map order, so when GigaRouter.Tx needed a tie-breaker between two executions at the same height the chosen winner depended on iteration order — different across calls, different across nodes. Sort the inner map keys by blockHash so the returned slice is stable. With the existing GigaRouter.Tx selection (lowest-height success / highest-height failure), the tie-break is now "first lexicographic blockHash wins." (3) Read isolation. composedResult was a value copy of the internal resultInstance, but the bytes []byte shared the underlying array with the storage slot. A subsequent SetTransactionResults overwrite of the same instance would mutate what the caller was reading. Deep-copy bytes on read so the returned Result is immune. While here, drop composedResult entirely — it had identical fields to resultInstance, so the latter now implements block.Result directly. (6) Tx body collision detection. New ErrTxHashCollision sentinel. WriteBlock validates that any pre-existing entry for a tx hash has matching bytes; mismatch refuses the entire write (two-pass validate-then-mutate, so partial state isn't left behind on rejection). Catches a bug class — wrong-hash-fn, malformed input — that would otherwise silently keep the first-writer's bytes. New tests: - TestWriteBlockIdempotent: re-WriteBlock preserves attached results. - TestWriteBlockTxHashCollision: mismatched second-block bytes are rejected; partial state isn't left behind. - TestGetTransactionByHashDeterministicOrder: 3 blocks at same height with same tx hash, repeated reads return the same order. - TestGetTransactionByHashReadIsolation: an earlier-read Result is immune to a later SetTransactionResults overwrite. Co-Authored-By: Claude Opus 4.7 (1M context) --- sei-db/ledger_db/block/block_db.go | 7 + .../block/block_db_test/block_db_test.go | 150 ++++++++++++++++++ .../block/mem_block_db/mem_block_db.go | 93 +++++++---- 3 files changed, 219 insertions(+), 31 deletions(-) diff --git a/sei-db/ledger_db/block/block_db.go b/sei-db/ledger_db/block/block_db.go index a0b7544b9e..f8f9633aa0 100644 --- a/sei-db/ledger_db/block/block_db.go +++ b/sei-db/ledger_db/block/block_db.go @@ -18,6 +18,13 @@ var ErrUnknownBlock = errors.New("block db: unknown block hash") // results slice doesn't match the number of transactions in the referenced block. var ErrResultCountMismatch = errors.New("block db: result count does not match transaction count") +// ErrTxHashCollision is returned by WriteBlock when a tx hash that was already +// recorded under a different (txBytes) is offered again with mismatched bytes — +// i.e. two distinct tx bodies hashing to the same value. Cryptographically near- +// impossible for sha256, but a defensive check that catches bug classes (e.g. +// the wrong hashing function being applied somewhere upstream) without cost. +var ErrTxHashCollision = errors.New("block db: tx hash collision (different bytes for same hash)") + // Transaction is the BlockDB's view of a transaction's *body* — what's // invariant across every block that includes it. Per-block-occurrence data // (height, index, execution result) lives on Result, returned alongside the diff --git a/sei-db/ledger_db/block/block_db_test/block_db_test.go b/sei-db/ledger_db/block/block_db_test/block_db_test.go index 75ad235158..bcac543b01 100644 --- a/sei-db/ledger_db/block/block_db_test/block_db_test.go +++ b/sei-db/ledger_db/block/block_db_test/block_db_test.go @@ -362,6 +362,156 @@ func TestSetTransactionResultsOverwrites(t *testing.T) { }) } +// TestWriteBlockIdempotent pins the contract that calling WriteBlock a +// second time for the same blockHash is a silent no-op — does NOT wipe +// any results already attached via SetTransactionResults. Without this +// the second WriteBlock would silently corrupt the index by re-creating +// pending instances on top of recorded ones (review finding 1). +func TestWriteBlockIdempotent(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + blk := makeBlock(4, 2) + requireNoError(t, db.WriteBlock(ctx, blk)) + requireNoError(t, db.SetTransactionResults(ctx, blk.Hash(), makeResults(blk))) + + // Second WriteBlock for the same block — must not destroy results. + requireNoError(t, db.WriteBlock(ctx, blk)) + + for i, tx := range blk.Transactions() { + _, results, found, err := db.GetTransactionByHash(ctx, tx.Hash()) + requireNoError(t, err) + requireTrue(t, found, "expected tx %s found after re-WriteBlock", tx.Hash()) + requireTrue(t, len(results) == 1, "expected 1 result after re-WriteBlock, got %d", len(results)) + requireBytesEqual(t, []byte(fmt.Sprintf("result-%d-%d", 4, i)), results[0].Bytes(), fmt.Sprintf("tx[%d] result must survive re-WriteBlock", i)) + } + }) +} + +// TestWriteBlockTxHashCollision pins the defensive collision check from +// review finding 6: writing a second block whose tx hash matches a +// previously-written tx but with different bytes is rejected loudly, +// rather than silently keeping the first-writer's bytes. +func TestWriteBlockTxHashCollision(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + blkA := &testBlock{ + hash: []byte("block-A"), + height: 1, + txs: []block.Transaction{ + &testTx{hash: []byte("h"), bytes: []byte("v1")}, + }, + } + blkB := &testBlock{ + hash: []byte("block-B"), + height: 2, + txs: []block.Transaction{ + &testTx{hash: []byte("h"), bytes: []byte("v2")}, + }, + } + requireNoError(t, db.WriteBlock(ctx, blkA)) + err = db.WriteBlock(ctx, blkB) + requireTrue(t, err != nil, "expected ErrTxHashCollision for second block with mismatched bytes") + + // Block B must not have been recorded — partial state from a + // failed validation would corrupt blocksByHash. + _, ok, err := db.GetBlockByHash(ctx, blkB.Hash()) + requireNoError(t, err) + requireTrue(t, !ok, "block B must not be present after collision rejection") + }) +} + +// TestGetTransactionByHashDeterministicOrder pins the sort-by-blockHash +// behavior on the read path: with multiple instances, the returned +// slice must be in stable order across repeated calls — otherwise +// downstream selection that ties on Height() would non-deterministically +// flip between RPC calls (review finding 2). +func TestGetTransactionByHashDeterministicOrder(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + const txHash = "shared" + const txBytes = "data" + shared := func() block.Transaction { + return &testTx{hash: []byte(txHash), bytes: []byte(txBytes)} + } + // Three blocks at the same height carrying the same tx — exercises + // the tie-breaker path. Block hashes intentionally chosen so that + // lexicographic order doesn't match insertion order. + blkB := &testBlock{hash: []byte("bbb"), height: 5, txs: []block.Transaction{shared()}} + blkA := &testBlock{hash: []byte("aaa"), height: 5, txs: []block.Transaction{shared()}} + blkC := &testBlock{hash: []byte("ccc"), height: 5, txs: []block.Transaction{shared()}} + requireNoError(t, db.WriteBlock(ctx, blkB)) + requireNoError(t, db.WriteBlock(ctx, blkA)) + requireNoError(t, db.WriteBlock(ctx, blkC)) + + requireNoError(t, db.SetTransactionResults(ctx, blkB.Hash(), []block.Result{testResult{bytes: []byte("rB"), height: 5, index: 0}})) + requireNoError(t, db.SetTransactionResults(ctx, blkA.Hash(), []block.Result{testResult{bytes: []byte("rA"), height: 5, index: 0}})) + requireNoError(t, db.SetTransactionResults(ctx, blkC.Hash(), []block.Result{testResult{bytes: []byte("rC"), height: 5, index: 0}})) + + // Repeatedly read; results must be in the same order each time. + var first [][]byte + for iter := 0; iter < 10; iter++ { + _, results, found, err := db.GetTransactionByHash(ctx, []byte(txHash)) + requireNoError(t, err) + requireTrue(t, found, "expected tx found") + requireTrue(t, len(results) == 3, "expected 3 results, got %d", len(results)) + gotOrder := make([][]byte, len(results)) + for i, r := range results { + gotOrder[i] = r.Bytes() + } + if iter == 0 { + first = gotOrder + continue + } + for i := range gotOrder { + requireBytesEqual(t, first[i], gotOrder[i], fmt.Sprintf("iter %d position %d", iter, i)) + } + } + }) +} + +// TestGetTransactionByHashReadIsolation pins review finding 3: a Result +// returned by an earlier call must not be mutated by a later +// SetTransactionResults overwrite (the documented "second call +// replaces" behavior). Without the deep-copy of bytes on read, the +// caller's Result.Bytes() would observe the new value retroactively. +func TestGetTransactionByHashReadIsolation(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + blk := makeBlock(7, 1) + requireNoError(t, db.WriteBlock(ctx, blk)) + requireNoError(t, db.SetTransactionResults(ctx, blk.Hash(), []block.Result{ + testResult{bytes: []byte("first"), height: 7, index: 0}, + })) + + _, results, _, err := db.GetTransactionByHash(ctx, blk.Transactions()[0].Hash()) + requireNoError(t, err) + requireTrue(t, len(results) == 1, "expected 1 result") + held := results[0] + + // Overwrite — caller's earlier read must not be mutated. + requireNoError(t, db.SetTransactionResults(ctx, blk.Hash(), []block.Result{ + testResult{bytes: []byte("second"), height: 7, index: 0}, + })) + requireBytesEqual(t, []byte("first"), held.Bytes(), "earlier-read Result must not observe overwrite") + }) +} + func TestSetTransactionResultsErrors(t *testing.T) { forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { ctx := context.Background() diff --git a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go index b4b57ead04..0167551fcc 100644 --- a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go +++ b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go @@ -1,24 +1,32 @@ package memblockdb import ( + "bytes" "context" "fmt" + "sort" "sync" "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block" ) -// resultInstance holds the per-block-occurrence data for one tx hash: -// where the tx landed (height + index in that block) and, once -// SetTransactionResults has run, the marshaled execution result. bytes is -// nil while the entry is "pending" (block written, results not yet attached); -// composedResult on read filters those out. +// resultInstance is the per-block-occurrence record for one tx hash. It +// carries the location (height + position in the block) plus the marshaled +// execution result (nil while the entry is "pending" — block written but +// SetTransactionResults not yet called). The same struct value satisfies +// block.Result on read; callers receive a fresh copy with bytes deep-copied +// so subsequent SetTransactionResults overwrites can't be observed +// retroactively. type resultInstance struct { height uint64 index uint32 bytes []byte // nil if no result attached yet } +func (r resultInstance) Bytes() []byte { return r.bytes } +func (r resultInstance) Height() uint64 { return r.height } +func (r resultInstance) Index() uint32 { return r.index } + // txEntry holds the invariant tx body once per hash, plus a per-block-hash // map of resultInstance recording every block this tx appeared in. type txEntry struct { @@ -26,19 +34,6 @@ type txEntry struct { instances map[string]*resultInstance // blockHash -> instance } -// composedResult adapts a resultInstance into the block.Result interface for -// the read path. It's value-typed and held briefly per call so allocations -// stay cheap. -type composedResult struct { - height uint64 - index uint32 - bytes []byte -} - -func (r composedResult) Bytes() []byte { return r.bytes } -func (r composedResult) Height() uint64 { return r.height } -func (r composedResult) Index() uint32 { return r.index } - // Shared backing store, keyed by path in test builders to simulate restarts. type memBlockDBData struct { mu sync.RWMutex @@ -77,24 +72,47 @@ func (m *memBlockDB) WriteBlock(_ context.Context, blk block.Block) error { d.mu.Lock() defer d.mu.Unlock() - height := blk.Height() blockHashKey := string(blk.Hash()) + // Idempotent on duplicate: a second WriteBlock for the same block hash + // would re-create resultInstance entries with bytes=nil, silently + // destroying anything SetTransactionResults already attached. Skip. + if _, exists := d.blocksByHash[blockHashKey]; exists { + return nil + } + height := blk.Height() + txs := blk.Transactions() + + // First pass: validate every tx body against any pre-existing entry for + // the same hash. A mismatch surfaces a tx-hash collision (two distinct + // bodies hashing to the same value) — refuse the entire write rather + // than partially mutate state. + for _, tx := range txs { + entry, ok := d.txEntries[string(tx.Hash())] + if !ok { + continue + } + if !bytes.Equal(entry.tx.Bytes(), tx.Bytes()) { + return fmt.Errorf("%w: tx %x in block %x", block.ErrTxHashCollision, tx.Hash(), blk.Hash()) + } + } + + // Second pass: actually write. d.blocksByHash[blockHashKey] = blk d.blocksByHeight[height] = blk - for i, tx := range blk.Transactions() { + for i, tx := range txs { hashKey := string(tx.Hash()) entry, ok := d.txEntries[hashKey] if !ok { - // First time we've seen this tx — record the canonical body. entry = &txEntry{ tx: tx, instances: make(map[string]*resultInstance), } d.txEntries[hashKey] = entry } - // Register a pending instance for this block, even if we've recorded - // the same tx hash for another block already. SetTransactionResults - // will fill in bytes later. + // Register a pending instance for this block. SetTransactionResults + // fills bytes later. The (txHash, blockHash) keying means the same + // tx hash in another block keeps its own instance; pruning one + // block doesn't disturb others. entry.instances[blockHashKey] = &resultInstance{ height: height, index: uint32(i), //nolint:gosec // tx index fits in uint32 (block tx count is bounded). @@ -187,18 +205,31 @@ func (m *memBlockDB) GetTransactionByHash(_ context.Context, hash []byte) (block if !ok { return nil, nil, false, nil } - // Build the slice with only attached-result instances; pending entries - // (bytes==nil) are filtered out so callers get exactly the executions. - // Pre-size to len(instances) — typically 1, occasionally 2-3. - results := make([]block.Result, 0, len(entry.instances)) - for _, inst := range entry.instances { + // Sort by blockHash so the returned slice has deterministic order + // across calls — Go map iteration is randomized, and downstream + // selection (e.g. GigaRouter.Tx tie-breaking on equal heights) + // depends on stable input order to return the same Result for the + // same query. + keys := make([]string, 0, len(entry.instances)) + for k := range entry.instances { + keys = append(keys, k) + } + sort.Strings(keys) + results := make([]block.Result, 0, len(keys)) + for _, k := range keys { + inst := entry.instances[k] if inst.bytes == nil { continue } - results = append(results, composedResult{ + // Deep-copy bytes so a later SetTransactionResults overwrite of + // the same instance can't mutate what the caller is reading. + // The struct-value copy below shares the slice header otherwise. + bytesCopy := make([]byte, len(inst.bytes)) + copy(bytesCopy, inst.bytes) + results = append(results, resultInstance{ height: inst.height, index: inst.index, - bytes: inst.bytes, + bytes: bytesCopy, }) } return entry.tx, results, true, nil From 93b95b3a413ad29bfad62ec5976193de0c86ff97 Mon Sep 17 00:00:00 2001 From: Wen Date: Thu, 7 May 2026 18:07:15 -0700 Subject: [PATCH 8/9] sei-db, giga: drop dead defense, doc honesty, follow-up TODOs (CON-272) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Third-round review cleanup, focused on removing dead defensive code and documenting the trade-offs we decided to defer rather than fix: GetTransactionByHash deep-copy was dead defense. The comment claimed the deep-copy isolated callers from a later SetTransactionResults overwrite — but SetTransactionResults reassigns inst.bytes (slice header swap) rather than mutating bytes in place, so Go's value-copy of the resultInstance struct already isolates the caller's slice header. Removed the make+copy; updated the resultInstance type doc and TestGetTransactionByHashReadIsolation comment to describe what provides isolation now (and to call out that the test still catches the regression if a future change makes SetTransactionResults mutate in place). ErrTxHashCollision doc no longer claims "without cost." A rejected write means the corrupted tx hash is permanently poisoned — every future legitimate block reusing the hash will also fail until pruned out. Doc now flags this as operator-attention territory rather than something to retry. ErrTxResultPending doc is honest about the dead-process retry case: on the happy path retry lands in milliseconds, on the unhappy path (executeBlock errored, runExecute exited) the result will never land — operators will see the sentinel forever for any tx in the orphaned block. Follow-up TODOs left in code (deferred to subsequent PRs): - giga_router.go blockDB field: BlockDB DI through GigaRouterConfig; blockDB.Prune wiring (mem_block_db grows without bound today). - giga_blockdb.go globalBlockAdapter.Hash(): memoize parallel to txs cache. - mem_block_db.go memBlockDB type: -race concurrency test. Co-Authored-By: Claude Opus 4.7 (1M context) --- sei-db/ledger_db/block/block_db.go | 11 +++++-- .../block/block_db_test/block_db_test.go | 12 ++++--- .../block/mem_block_db/mem_block_db.go | 32 +++++++++++-------- sei-tendermint/internal/p2p/giga_blockdb.go | 5 +++ sei-tendermint/internal/p2p/giga_router.go | 25 ++++++++++++--- 5 files changed, 61 insertions(+), 24 deletions(-) diff --git a/sei-db/ledger_db/block/block_db.go b/sei-db/ledger_db/block/block_db.go index f8f9633aa0..a723cc410d 100644 --- a/sei-db/ledger_db/block/block_db.go +++ b/sei-db/ledger_db/block/block_db.go @@ -21,8 +21,15 @@ var ErrResultCountMismatch = errors.New("block db: result count does not match t // ErrTxHashCollision is returned by WriteBlock when a tx hash that was already // recorded under a different (txBytes) is offered again with mismatched bytes — // i.e. two distinct tx bodies hashing to the same value. Cryptographically near- -// impossible for sha256, but a defensive check that catches bug classes (e.g. -// the wrong hashing function being applied somewhere upstream) without cost. +// impossible for sha256, but a defensive check that catches bug classes such as +// the wrong hashing function being applied somewhere upstream. +// +// Caveat: rejecting the entire write means a single corrupted writer can +// permanently poison the index for that hash — every future legitimate block +// reusing the same hash will also fail until the bad entry is pruned out. This +// is intentional (better to halt than silently keep first-writer's bytes), but +// callers should treat ErrTxHashCollision as a hard failure that needs +// operator attention rather than retry. var ErrTxHashCollision = errors.New("block db: tx hash collision (different bytes for same hash)") // Transaction is the BlockDB's view of a transaction's *body* — what's diff --git a/sei-db/ledger_db/block/block_db_test/block_db_test.go b/sei-db/ledger_db/block/block_db_test/block_db_test.go index bcac543b01..6527f259fc 100644 --- a/sei-db/ledger_db/block/block_db_test/block_db_test.go +++ b/sei-db/ledger_db/block/block_db_test/block_db_test.go @@ -481,11 +481,15 @@ func TestGetTransactionByHashDeterministicOrder(t *testing.T) { }) } -// TestGetTransactionByHashReadIsolation pins review finding 3: a Result -// returned by an earlier call must not be mutated by a later +// TestGetTransactionByHashReadIsolation pins the contract that a Result +// returned by an earlier call is not mutated by a later // SetTransactionResults overwrite (the documented "second call -// replaces" behavior). Without the deep-copy of bytes on read, the -// caller's Result.Bytes() would observe the new value retroactively. +// replaces" behavior). Today this isolation comes from +// SetTransactionResults reassigning the stored slice header rather than +// mutating bytes in place — Go's value-copy of the result struct then +// keeps the caller's slice header pointing at the old backing array. +// If a future change makes SetTransactionResults mutate in place, this +// test catches the regression. func TestGetTransactionByHashReadIsolation(t *testing.T) { forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { ctx := context.Background() diff --git a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go index 0167551fcc..989234d2b2 100644 --- a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go +++ b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go @@ -14,9 +14,13 @@ import ( // carries the location (height + position in the block) plus the marshaled // execution result (nil while the entry is "pending" — block written but // SetTransactionResults not yet called). The same struct value satisfies -// block.Result on read; callers receive a fresh copy with bytes deep-copied -// so subsequent SetTransactionResults overwrites can't be observed -// retroactively. +// block.Result on read; GetTransactionByHash returns a value-copy so the +// caller's bytes slice header is independent of the storage's. A later +// SetTransactionResults reassigns inst.bytes (does not mutate it in +// place), so the caller's earlier read is naturally isolated by Go's +// slice-header-copy semantics — no defensive deep-copy needed here. +// (If a future caller wants to mutate the returned bytes in place, they +// must copy first.) type resultInstance struct { height uint64 index uint32 @@ -52,6 +56,11 @@ type memBlockDBData struct { // An in-memory implementation of the BlockDB interface. Useful as a test fixture to sanity check // test flows. +// +// TODO(blockdb): add a -race concurrency test — every public method's lock +// shape (WriteBlock + SetTransactionResults under write lock; Get* under +// read lock; two-pass validate-then-mutate in WriteBlock) is currently +// verified only by inspection. type memBlockDB struct { data *memBlockDBData } @@ -221,16 +230,13 @@ func (m *memBlockDB) GetTransactionByHash(_ context.Context, hash []byte) (block if inst.bytes == nil { continue } - // Deep-copy bytes so a later SetTransactionResults overwrite of - // the same instance can't mutate what the caller is reading. - // The struct-value copy below shares the slice header otherwise. - bytesCopy := make([]byte, len(inst.bytes)) - copy(bytesCopy, inst.bytes) - results = append(results, resultInstance{ - height: inst.height, - index: inst.index, - bytes: bytesCopy, - }) + // Value-copy the resultInstance: caller gets a fresh slice header + // pointing at the same backing array. Isolation from a later + // SetTransactionResults is provided by the fact that + // SetTransactionResults reassigns inst.bytes (rather than mutating + // it in place), so the caller's slice header keeps pointing at + // the old array. See the resultInstance type doc. + results = append(results, *inst) } return entry.tx, results, true, nil } diff --git a/sei-tendermint/internal/p2p/giga_blockdb.go b/sei-tendermint/internal/p2p/giga_blockdb.go index eb4dc94868..892ead3987 100644 --- a/sei-tendermint/internal/p2p/giga_blockdb.go +++ b/sei-tendermint/internal/p2p/giga_blockdb.go @@ -37,6 +37,11 @@ func newGlobalBlockAdapter(gb *atypes.GlobalBlock) globalBlockAdapter { } func (a globalBlockAdapter) Hash() []byte { + // TODO(autobahn): memoize parallel to txs — Hash() is called multiple + // times per block (mem_block_db's WriteBlock, runExecute's + // SetTransactionResults call site, BlockByHash translation). Each call + // re-runs the proto marshal + sha256 over the header. Not hot today + // but trivial to cache when we revisit. h := a.gb.Header.Hash() return h.Bytes() } diff --git a/sei-tendermint/internal/p2p/giga_router.go b/sei-tendermint/internal/p2p/giga_router.go index 1deb9a110f..e2ef08732f 100644 --- a/sei-tendermint/internal/p2p/giga_router.go +++ b/sei-tendermint/internal/p2p/giga_router.go @@ -63,8 +63,16 @@ type GigaRouter struct { // Today's instance is mem_block_db (in-memory), so it does not survive // process restarts — RPC semantics treat that as "unknown hash" // (BlockByHash returns &ResultBlock{Block: nil}; Tx returns - // "tx not found"). Restart-safe repopulation belongs to a follow-up - // that wires a persistent BlockDB. + // "tx not found"). + // + // TODO(autobahn): make BlockDB injectable via GigaRouterConfig (today + // it's hard-coded to mem_block_db.NewMemBlockDB() in NewGigaRouter, + // and unit tests reach into this unexported field). Will land + // alongside the persistent backend follow-up. + // + // TODO(autobahn): wire blockDB.Prune from runExecute. Today only + // data.PruneBefore runs; mem_block_db grows without bound across the + // chain's lifetime and a long-running process will OOM. blockDB block.BlockDB // lastCommitQCRecv is subscribed once at construction and reused for the @@ -141,9 +149,16 @@ func (r *GigaRouter) LastCommittedBlockNumber() int64 { // (its parent block has been written to BlockDB) but no execution // result has been attached yet — the window between WriteBlock and // SetTransactionResults inside runExecute. Distinct from "not found" -// because the tx is real and the caller should retry, not give up. -// Callers that don't care can errors.Is-check to fold it into a generic -// "try again" flow. +// because the tx is real. +// +// On the happy path the caller can retry and the result will land in +// milliseconds. On the unhappy path (executeBlock errored, runExecute +// exited, process is shutting down) the result will never land and +// retry never succeeds — operators inspecting a dead node via RPC will +// see this sentinel forever for any tx in the orphaned block. +// +// Callers that don't care about the distinction can errors.Is-check +// to fold it into a generic "try again" flow. var ErrTxResultPending = errors.New("transaction result not yet recorded") // Tx returns the finalized transaction with the given hash translated into From fe4fb27c1d6f2486f5ffacad456ef488ce6d701b Mon Sep 17 00:00:00 2001 From: Wen Date: Sat, 9 May 2026 15:07:20 -0700 Subject: [PATCH 9/9] sei-db, giga: scope BlockDB to block-only per Giga Tx Query proposal (CON-272) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Giga Transaction Query Architecture proposal makes the BlockDB / Receipt Store split explicit: BlockDB is block-storage-only; canonical txHash → execution result lookup belongs on a separate Receipt Store unified for EVM and Cosmos txs. This commit pulls all the tx-by-hash work out of BlockDB to align. Removed from sei-db/ledger_db/block: - block.Result interface - BlockDB.GetTransactionByHash, BlockDB.SetTransactionResults - ErrUnknownBlock, ErrResultCountMismatch, ErrTxHashCollision - mem_block_db's two-level txEntries / resultInstance index, the sort-by-blockHash read order, the pre-marshal-outside-lock optimization, the tx-body collision check. Removed from sei-tendermint/internal/p2p: - execResultAdapter - GigaRouter.Tx - ErrTxResultPending - executeBlock's []*abci.ExecTxResult return value - The runExecute SetTransactionResults call Removed from sei-tendermint/internal/rpc/core/tx.go: - The env.Tx delegate to GigaRouter.Tx. /tx now reverts to the legacy CometBFT path under Autobahn (returns "querying disabled" since EventSinks are empty); a TODO points at the Receipt Store follow-up. Removed from giga_router_test.go: - txStub / blockStub / resultStub fixtures - TestGigaRouter_TxResultPending - TestGigaRouter_TxMultipleBlocks_PrefersSuccess - TestGigaRouter_TxMultipleBlocks_FallsBackToLatestFailure - The Tx round-trip block inside TestGigaRouter_FinalizeBlocks Removed from block_db_test: - TestSetTransactionResults*, TestTransactionMultipleBlocks, TestGetTransactionByHash*, TestWriteBlockTxHashCollision. TestWriteBlockIdempotent stays — block-level idempotency still applies and is worth pinning. What remains (the original PR scope): - block.Block / block.Transaction interfaces, BlockDB block-only API - mem_block_db with blockHash/height indexes + idempotent WriteBlock - globalBlockAdapter / txAdapter (memoized at construction) - runExecute calls WriteBlock before executeBlock - GigaRouter.BlockByHash delegates to BlockDB.GetBlockByHash - translateBlock unifies BlockByNumber + BlockByHash translation - data.State.blockHashes map + GlobalBlockByHash removed (the map hack from #3310) Net diff: ~950 lines removed. Co-Authored-By: Claude Opus 4.7 (1M context) --- sei-db/ledger_db/block/block_db.go | 149 ++----- .../block/block_db_test/block_db_test.go | 411 +----------------- .../block/mem_block_db/mem_block_db.go | 184 +------- sei-tendermint/internal/p2p/giga_blockdb.go | 43 +- sei-tendermint/internal/p2p/giga_router.go | 145 +----- .../internal/p2p/giga_router_test.go | 177 -------- sei-tendermint/internal/rpc/core/tx.go | 13 +- 7 files changed, 88 insertions(+), 1034 deletions(-) diff --git a/sei-db/ledger_db/block/block_db.go b/sei-db/ledger_db/block/block_db.go index a723cc410d..1dc0eb81aa 100644 --- a/sei-db/ledger_db/block/block_db.go +++ b/sei-db/ledger_db/block/block_db.go @@ -10,34 +10,15 @@ import ( // when the database contains no blocks. var ErrNoBlocks = errors.New("block db: no blocks") -// ErrUnknownBlock is returned by SetTransactionResults when the referenced -// block hash has not been written to the database. -var ErrUnknownBlock = errors.New("block db: unknown block hash") - -// ErrResultCountMismatch is returned by SetTransactionResults when the supplied -// results slice doesn't match the number of transactions in the referenced block. -var ErrResultCountMismatch = errors.New("block db: result count does not match transaction count") - -// ErrTxHashCollision is returned by WriteBlock when a tx hash that was already -// recorded under a different (txBytes) is offered again with mismatched bytes — -// i.e. two distinct tx bodies hashing to the same value. Cryptographically near- -// impossible for sha256, but a defensive check that catches bug classes such as -// the wrong hashing function being applied somewhere upstream. -// -// Caveat: rejecting the entire write means a single corrupted writer can -// permanently poison the index for that hash — every future legitimate block -// reusing the same hash will also fail until the bad entry is pruned out. This -// is intentional (better to halt than silently keep first-writer's bytes), but -// callers should treat ErrTxHashCollision as a hard failure that needs -// operator attention rather than retry. -var ErrTxHashCollision = errors.New("block db: tx hash collision (different bytes for same hash)") - -// Transaction is the BlockDB's view of a transaction's *body* — what's -// invariant across every block that includes it. Per-block-occurrence data -// (height, index, execution result) lives on Result, returned alongside the -// Transaction by GetTransactionByHash. +// Transaction is the BlockDB's view of a transaction inside a block: its +// hash plus its raw bytes. BlockDB itself is block-storage-only — it does +// not index transactions by hash. Per the canonical-receipt-lookup design, +// tx-by-hash routing belongs in a separate Receipt Store; BlockDB exposes +// per-tx Hash() so a Receipt Store (or any other caller) can iterate +// `Block.Transactions()` and register its own (txHash → block, index) +// mapping at WriteBlock time. type Transaction interface { - // Hash returns the canonical transaction hash used for indexing. + // Hash returns the canonical transaction hash. Hash() []byte // Bytes returns the raw, on-the-wire transaction bytes. Bytes() []byte @@ -48,9 +29,8 @@ type Transaction interface { // must not assume any particular concrete implementation. // // Backends are permitted to call Transactions() multiple times across the -// block's lifetime in storage (WriteBlock, SetTransactionResults validation, -// Prune). Implementations that pay a non-trivial cost per call (allocation, -// hashing) should memoize the result at construction. +// block's lifetime in storage. Implementations that pay a non-trivial cost +// per call (allocation, hashing) should memoize the result at construction. type Block interface { // Hash returns the canonical block hash used for indexing. Hash() []byte @@ -63,102 +43,53 @@ type Block interface { Transactions() []Transaction } -// Result is the BlockDB's view of one transaction's post-execution outcome -// in a specific block: the marshaled execution result plus where it landed -// (block height + position in that block). Used both as the input to -// SetTransactionResults and as the per-occurrence value returned by -// GetTransactionByHash. -// -// The interface stays chain-agnostic — callers wrap their concrete result -// types (e.g. abci.ExecTxResult) in a small adapter. -// -// Bytes() may run a non-trivial proto Marshal; backends call it exactly -// once per result inside SetTransactionResults and cache the bytes for -// the lifetime of the entry. Adapters can therefore be cheap value types -// that defer Marshal until Bytes() is called and then can be discarded. -type Result interface { - // Bytes returns the marshaled execution result for one transaction. - Bytes() []byte - // Height returns the block height of the block that produced this result. - Height() uint64 - // Index returns the position of the transaction within that block. - Index() uint32 -} - -// A database for storing finalized block and transaction data. +// A database for storing finalized blocks. Block-only — the canonical +// "transaction by hash → execution result" lookup belongs in a separate +// Receipt Store (see the Giga Transaction Query proposal); a future +// Receipt Store reads tx bodies out of BlockDB by (blockHash, index) +// once it has resolved a hash. // -// This store is fully threadsafe. All writes are atomic (that is, after a crash you will either see the write or -// you will not see it at all, i.e. partial writes are not possible). Multiple writes are not atomic with respect -// to each other, meaning if you write A then B and crash, you may observe B but not A (only possible when sharding -// is enabled). Within a single session, read-your-writes consistency is provided. +// This store is fully threadsafe. All writes are atomic (after a crash +// you will either see the write or you will not see it at all, i.e. +// partial writes are not possible). Multiple writes are not atomic with +// respect to each other, meaning if you write A then B and crash, you +// may observe B but not A. Within a single session, read-your-writes +// consistency is provided. type BlockDB interface { - // Write a block to the database. + // WriteBlock writes a block to the database. Idempotent on duplicate + // block hash: a second WriteBlock for the same blockHash is a no-op, + // not an error. // - // This method may return immediately and does not necessarily wait for the block to be written to disk. - // Call Flush() if you need to wait until the block is written to disk. + // This method may return immediately and does not necessarily wait for + // the block to be written to disk. Call Flush() if you need to wait. WriteBlock(ctx context.Context, block Block) error - // SetTransactionResults attaches per-transaction execution results to a previously written - // block, identified by its block hash. results must be the same length as the block's - // Transactions(); each entry corresponds positionally to the transaction at that index, - // and its Height()/Index() must match the block's height and the position in this slice. - // - // Returns ErrUnknownBlock if no block with the given hash has been written, and - // ErrResultCountMismatch if len(results) does not match the block's transaction count. - // - // Calling SetTransactionResults a second time for the same block hash overwrites the - // previously attached results. - // - // Like WriteBlock, this is async with respect to disk persistence; pair with Flush() - // for crash durability. - SetTransactionResults(ctx context.Context, blockHash []byte, results []Result) error - - // Blocks until all pending writes are flushed to disk. Any call to WriteBlock issued before calling Flush() - // will be crash-durable after Flush() returns. Calls to WriteBlock() made concurrently with Flush() may or - // may not be crash-durable after Flush() returns (but are otherwise eventually durable). - // - // It is not required to call Flush() in order to ensure data is written to disk. The database asyncronously - // pushes data down to disk even if Flush() is never called. Flush() just allows you to syncronize an external - // goroutine with the database's internal write loop. + // Flush blocks until all pending writes are durable. WriteBlocks issued + // before calling Flush() will be crash-durable after Flush() returns. + // Concurrent WriteBlocks may or may not be durable after Flush() + // returns (but are otherwise eventually durable). Flush(ctx context.Context) error - // Retrieves a block by its hash. + // GetBlockByHash retrieves a block by its hash. GetBlockByHash(ctx context.Context, hash []byte) (block Block, ok bool, err error) - // Retrieves a block by its height. + // GetBlockByHeight retrieves a block by its height. GetBlockByHeight(ctx context.Context, height uint64) (block Block, ok bool, err error) - // GetTransactionByHash returns the canonical transaction body and the list - // of recorded executions for that hash. Because the same tx body can be - // included in multiple blocks (different lanes producing the same tx), the - // API surfaces every recorded execution; the caller picks which is canonical - // for its purposes (e.g. preferring a successful execution). - // - // Returns: - // found=false unknown tx hash; tx and results are nil/empty. - // found=true, len(results)==0 tx exists in some block but no execution results - // have been attached yet (between WriteBlock and - // SetTransactionResults). - // found=true, len(results)>=1 one entry per block that has had results attached; - // order is unspecified. - // - // The returned Transaction's Hash and Bytes are the same regardless of - // which block included it (cryptographic hash collision aside). - GetTransactionByHash(ctx context.Context, hash []byte) (tx Transaction, results []Result, found bool, err error) - - // Schedules pruning for all blocks with a height less than the given height. Pruning is asynchronous, - // and so this method does not provide any guarantees about when the pruning will complete. It is possible - // that some data will not be pruned if the database is closed before the pruning is scheduled. + // Prune schedules pruning of all blocks with height < lowestHeightToKeep. + // Pruning is asynchronous; this method does not guarantee when it will + // complete. Some data may not be pruned if the database is closed before + // pruning is scheduled. Prune(ctx context.Context, lowestHeightToKeep uint64) error - // Retrieves the lowest block height in the database. + // GetLowestBlockHeight returns the lowest block height in the database. GetLowestBlockHeight(ctx context.Context) (uint64, error) - // Retrieves the highest block height in the database. + // GetHighestBlockHeight returns the highest block height in the database. GetHighestBlockHeight(ctx context.Context) (uint64, error) - // Closes the database and releases any resources. Any in-flight writes are fully flushed to disk before this - // method returns. + // Close shuts the database down and releases any resources. Any in-flight + // writes are fully flushed to disk before this method returns. Close(ctx context.Context) error } diff --git a/sei-db/ledger_db/block/block_db_test/block_db_test.go b/sei-db/ledger_db/block/block_db_test/block_db_test.go index 6527f259fc..626ba20771 100644 --- a/sei-db/ledger_db/block/block_db_test/block_db_test.go +++ b/sei-db/ledger_db/block/block_db_test/block_db_test.go @@ -44,16 +44,6 @@ type testTx struct { func (t *testTx) Hash() []byte { return t.hash } func (t *testTx) Bytes() []byte { return t.bytes } -type testResult struct { - bytes []byte - height uint64 - index uint32 -} - -func (r testResult) Bytes() []byte { return r.bytes } -func (r testResult) Height() uint64 { return r.height } -func (r testResult) Index() uint32 { return r.index } - type testBlock struct { hash []byte height uint64 @@ -81,21 +71,6 @@ func makeBlock(height uint64, numTxs int) *testBlock { } } -// makeResults builds a testResult per tx, populated with synthetic bytes -// + the canonical (height, index) for that block's tx slice. -func makeResults(blk *testBlock) []block.Result { - txs := blk.Transactions() - out := make([]block.Result, len(txs)) - for i := range txs { - out[i] = testResult{ - bytes: []byte(fmt.Sprintf("result-%d-%d", blk.height, i)), - height: blk.height, - index: uint32(i), //nolint:gosec - } - } - return out -} - func forEachBuilder(t *testing.T, fn func(t *testing.T, builder func(path string) (block.BlockDB, error))) { for _, b := range buildBuilders() { t.Run(b.name, func(t *testing.T) { @@ -138,28 +113,6 @@ func TestWriteAndGetBlockByHash(t *testing.T) { }) } -func TestGetTransactionByHash(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - blk := makeBlock(1, 4) - requireNoError(t, db.WriteBlock(ctx, blk)) - - // Block written, no results attached yet — found=true with empty results. - for _, tx := range blk.Transactions() { - gotTx, results, found, err := db.GetTransactionByHash(ctx, tx.Hash()) - requireNoError(t, err) - requireTrue(t, found, "expected tx %s found pre-results", tx.Hash()) - requireBytesEqual(t, tx.Hash(), gotTx.Hash(), "transaction hash") - requireBytesEqual(t, tx.Bytes(), gotTx.Bytes(), "transaction data") - requireTrue(t, len(results) == 0, "expected 0 results pre-SetTransactionResults, got %d", len(results)) - } - }) -} - func TestGetBlockNotFound(t *testing.T) { forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { ctx := context.Background() @@ -177,19 +130,6 @@ func TestGetBlockNotFound(t *testing.T) { }) } -func TestGetTransactionNotFound(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - _, _, found, err := db.GetTransactionByHash(ctx, []byte("nonexistent")) - requireNoError(t, err) - requireTrue(t, !found, "expected no transaction with nonexistent hash") - }) -} - func TestMultipleBlocks(t *testing.T) { forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { ctx := context.Background() @@ -212,161 +152,8 @@ func TestMultipleBlocks(t *testing.T) { }) } -func TestSetTransactionResults(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - blk := makeBlock(7, 3) - requireNoError(t, db.WriteBlock(ctx, blk)) - - // Pre-results: found=true, results empty. - for _, tx := range blk.Transactions() { - gotTx, results, found, err := db.GetTransactionByHash(ctx, tx.Hash()) - requireNoError(t, err) - requireTrue(t, found, "expected tx %s found pre-results", tx.Hash()) - requireBytesEqual(t, tx.Bytes(), gotTx.Bytes(), "tx body bytes") - requireTrue(t, len(results) == 0, "expected 0 results pre-SetTransactionResults, got %d", len(results)) - } - - // Attach results. - results := makeResults(blk) - requireNoError(t, db.SetTransactionResults(ctx, blk.Hash(), results)) - - // Post-results: results carries (bytes, height, index). - for i, tx := range blk.Transactions() { - gotTx, gotResults, found, err := db.GetTransactionByHash(ctx, tx.Hash()) - requireNoError(t, err) - requireTrue(t, found, "expected tx %s found post-results", tx.Hash()) - requireBytesEqual(t, tx.Bytes(), gotTx.Bytes(), "tx body bytes") - requireTrue(t, len(gotResults) == 1, "expected 1 result, got %d", len(gotResults)) - r := gotResults[0] - requireBytesEqual(t, results[i].Bytes(), r.Bytes(), fmt.Sprintf("tx[%d] result bytes", i)) - requireTrue(t, r.Height() == 7, "expected result height 7, got %d", r.Height()) - requireTrue(t, r.Index() == uint32(i), "expected result index %d, got %d", i, r.Index()) //nolint:gosec - } - }) -} - -// TestTransactionMultipleBlocks pins the (txHash, blockHash) dedup behavior: -// the same tx hash included in two different blocks is recorded as two -// separate Result entries. Both remain reachable while both blocks are -// retained; pruning either one leaves the other (and its result) -// reachable. Models the lane-block scenario where the same tx body -// appears in two different GlobalBlocks (one per lane). -func TestTransactionMultipleBlocks(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - // Two blocks at different heights with the same tx hash + bytes. - const txHash = "shared-tx-hash" - const txBytes = "shared-tx-data" - shared := func() block.Transaction { - return &testTx{hash: []byte(txHash), bytes: []byte(txBytes)} - } - blkA := &testBlock{ - hash: []byte("block-A"), - height: 1, - txs: []block.Transaction{shared()}, - } - blkB := &testBlock{ - hash: []byte("block-B"), - height: 2, - txs: []block.Transaction{shared()}, - } - requireNoError(t, db.WriteBlock(ctx, blkA)) - requireNoError(t, db.WriteBlock(ctx, blkB)) - - // Both blocks present, no results yet → found, empty results. - _, results, found, err := db.GetTransactionByHash(ctx, []byte(txHash)) - requireNoError(t, err) - requireTrue(t, found, "expected found") - requireTrue(t, len(results) == 0, "expected 0 results pre-SetTransactionResults, got %d", len(results)) - - // Attach results: A gets "result-A", B gets "result-B". - requireNoError(t, db.SetTransactionResults(ctx, blkA.Hash(), []block.Result{ - testResult{bytes: []byte("result-A"), height: 1, index: 0}, - })) - requireNoError(t, db.SetTransactionResults(ctx, blkB.Hash(), []block.Result{ - testResult{bytes: []byte("result-B"), height: 2, index: 0}, - })) - - // Both results reachable. - _, results, found, err = db.GetTransactionByHash(ctx, []byte(txHash)) - requireNoError(t, err) - requireTrue(t, found, "expected found") - requireTrue(t, len(results) == 2, "expected 2 results, got %d", len(results)) - seen := map[string]uint64{} - for _, r := range results { - seen[string(r.Bytes())] = r.Height() - } - requireTrue(t, seen["result-A"] == 1, "expected result-A at height 1, got %v", seen["result-A"]) - requireTrue(t, seen["result-B"] == 2, "expected result-B at height 2, got %v", seen["result-B"]) - - // Prune block A; B's result remains reachable. - requireNoError(t, db.Prune(ctx, 2)) - _, results, found, err = db.GetTransactionByHash(ctx, []byte(txHash)) - requireNoError(t, err) - requireTrue(t, found, "expected found after prune A") - requireTrue(t, len(results) == 1, "expected 1 result after prune A, got %d", len(results)) - requireBytesEqual(t, []byte("result-B"), results[0].Bytes(), "remaining result must be B") - requireTrue(t, results[0].Height() == 2, "remaining result height must be 2") - - // Prune block B; tx is now unknown (entire entry collected). - requireNoError(t, db.Prune(ctx, 3)) - _, _, found, err = db.GetTransactionByHash(ctx, []byte(txHash)) - requireNoError(t, err) - requireTrue(t, !found, "expected tx unknown after pruning all blocks containing it") - }) -} - -// TestSetTransactionResultsOverwrites pins the documented "second call -// overwrites" behavior — useful for callers that re-execute a block on -// recovery and expect the latest results to win. -func TestSetTransactionResultsOverwrites(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - blk := makeBlock(3, 2) - requireNoError(t, db.WriteBlock(ctx, blk)) - - // First attach: "old-N". - first := []block.Result{ - testResult{bytes: []byte("old-0"), height: 3, index: 0}, - testResult{bytes: []byte("old-1"), height: 3, index: 1}, - } - requireNoError(t, db.SetTransactionResults(ctx, blk.Hash(), first)) - - // Second attach: "new-N" — must replace. - second := []block.Result{ - testResult{bytes: []byte("new-0"), height: 3, index: 0}, - testResult{bytes: []byte("new-1"), height: 3, index: 1}, - } - requireNoError(t, db.SetTransactionResults(ctx, blk.Hash(), second)) - - for i, tx := range blk.Transactions() { - _, results, found, err := db.GetTransactionByHash(ctx, tx.Hash()) - requireNoError(t, err) - requireTrue(t, found, "expected tx %s found", tx.Hash()) - requireTrue(t, len(results) == 1, "expected 1 result, got %d", len(results)) - requireBytesEqual(t, second[i].Bytes(), results[0].Bytes(), fmt.Sprintf("tx[%d] result must reflect overwrite", i)) - } - }) -} - -// TestWriteBlockIdempotent pins the contract that calling WriteBlock a -// second time for the same blockHash is a silent no-op — does NOT wipe -// any results already attached via SetTransactionResults. Without this -// the second WriteBlock would silently corrupt the index by re-creating -// pending instances on top of recorded ones (review finding 1). +// TestWriteBlockIdempotent pins the contract that re-writing the same +// block hash is a silent no-op rather than an error or an overwrite. func TestWriteBlockIdempotent(t *testing.T) { forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { ctx := context.Background() @@ -376,162 +163,13 @@ func TestWriteBlockIdempotent(t *testing.T) { blk := makeBlock(4, 2) requireNoError(t, db.WriteBlock(ctx, blk)) - requireNoError(t, db.SetTransactionResults(ctx, blk.Hash(), makeResults(blk))) - - // Second WriteBlock for the same block — must not destroy results. + // Second WriteBlock for the same block hash — must not error. requireNoError(t, db.WriteBlock(ctx, blk)) - for i, tx := range blk.Transactions() { - _, results, found, err := db.GetTransactionByHash(ctx, tx.Hash()) - requireNoError(t, err) - requireTrue(t, found, "expected tx %s found after re-WriteBlock", tx.Hash()) - requireTrue(t, len(results) == 1, "expected 1 result after re-WriteBlock, got %d", len(results)) - requireBytesEqual(t, []byte(fmt.Sprintf("result-%d-%d", 4, i)), results[0].Bytes(), fmt.Sprintf("tx[%d] result must survive re-WriteBlock", i)) - } - }) -} - -// TestWriteBlockTxHashCollision pins the defensive collision check from -// review finding 6: writing a second block whose tx hash matches a -// previously-written tx but with different bytes is rejected loudly, -// rather than silently keeping the first-writer's bytes. -func TestWriteBlockTxHashCollision(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - blkA := &testBlock{ - hash: []byte("block-A"), - height: 1, - txs: []block.Transaction{ - &testTx{hash: []byte("h"), bytes: []byte("v1")}, - }, - } - blkB := &testBlock{ - hash: []byte("block-B"), - height: 2, - txs: []block.Transaction{ - &testTx{hash: []byte("h"), bytes: []byte("v2")}, - }, - } - requireNoError(t, db.WriteBlock(ctx, blkA)) - err = db.WriteBlock(ctx, blkB) - requireTrue(t, err != nil, "expected ErrTxHashCollision for second block with mismatched bytes") - - // Block B must not have been recorded — partial state from a - // failed validation would corrupt blocksByHash. - _, ok, err := db.GetBlockByHash(ctx, blkB.Hash()) - requireNoError(t, err) - requireTrue(t, !ok, "block B must not be present after collision rejection") - }) -} - -// TestGetTransactionByHashDeterministicOrder pins the sort-by-blockHash -// behavior on the read path: with multiple instances, the returned -// slice must be in stable order across repeated calls — otherwise -// downstream selection that ties on Height() would non-deterministically -// flip between RPC calls (review finding 2). -func TestGetTransactionByHashDeterministicOrder(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - const txHash = "shared" - const txBytes = "data" - shared := func() block.Transaction { - return &testTx{hash: []byte(txHash), bytes: []byte(txBytes)} - } - // Three blocks at the same height carrying the same tx — exercises - // the tie-breaker path. Block hashes intentionally chosen so that - // lexicographic order doesn't match insertion order. - blkB := &testBlock{hash: []byte("bbb"), height: 5, txs: []block.Transaction{shared()}} - blkA := &testBlock{hash: []byte("aaa"), height: 5, txs: []block.Transaction{shared()}} - blkC := &testBlock{hash: []byte("ccc"), height: 5, txs: []block.Transaction{shared()}} - requireNoError(t, db.WriteBlock(ctx, blkB)) - requireNoError(t, db.WriteBlock(ctx, blkA)) - requireNoError(t, db.WriteBlock(ctx, blkC)) - - requireNoError(t, db.SetTransactionResults(ctx, blkB.Hash(), []block.Result{testResult{bytes: []byte("rB"), height: 5, index: 0}})) - requireNoError(t, db.SetTransactionResults(ctx, blkA.Hash(), []block.Result{testResult{bytes: []byte("rA"), height: 5, index: 0}})) - requireNoError(t, db.SetTransactionResults(ctx, blkC.Hash(), []block.Result{testResult{bytes: []byte("rC"), height: 5, index: 0}})) - - // Repeatedly read; results must be in the same order each time. - var first [][]byte - for iter := 0; iter < 10; iter++ { - _, results, found, err := db.GetTransactionByHash(ctx, []byte(txHash)) - requireNoError(t, err) - requireTrue(t, found, "expected tx found") - requireTrue(t, len(results) == 3, "expected 3 results, got %d", len(results)) - gotOrder := make([][]byte, len(results)) - for i, r := range results { - gotOrder[i] = r.Bytes() - } - if iter == 0 { - first = gotOrder - continue - } - for i := range gotOrder { - requireBytesEqual(t, first[i], gotOrder[i], fmt.Sprintf("iter %d position %d", iter, i)) - } - } - }) -} - -// TestGetTransactionByHashReadIsolation pins the contract that a Result -// returned by an earlier call is not mutated by a later -// SetTransactionResults overwrite (the documented "second call -// replaces" behavior). Today this isolation comes from -// SetTransactionResults reassigning the stored slice header rather than -// mutating bytes in place — Go's value-copy of the result struct then -// keeps the caller's slice header pointing at the old backing array. -// If a future change makes SetTransactionResults mutate in place, this -// test catches the regression. -func TestGetTransactionByHashReadIsolation(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - blk := makeBlock(7, 1) - requireNoError(t, db.WriteBlock(ctx, blk)) - requireNoError(t, db.SetTransactionResults(ctx, blk.Hash(), []block.Result{ - testResult{bytes: []byte("first"), height: 7, index: 0}, - })) - - _, results, _, err := db.GetTransactionByHash(ctx, blk.Transactions()[0].Hash()) - requireNoError(t, err) - requireTrue(t, len(results) == 1, "expected 1 result") - held := results[0] - - // Overwrite — caller's earlier read must not be mutated. - requireNoError(t, db.SetTransactionResults(ctx, blk.Hash(), []block.Result{ - testResult{bytes: []byte("second"), height: 7, index: 0}, - })) - requireBytesEqual(t, []byte("first"), held.Bytes(), "earlier-read Result must not observe overwrite") - }) -} - -func TestSetTransactionResultsErrors(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) + got, ok, err := db.GetBlockByHash(ctx, blk.Hash()) requireNoError(t, err) - defer db.Close(ctx) - - // Unknown block hash. - err = db.SetTransactionResults(ctx, []byte("nonexistent"), nil) - requireTrue(t, err != nil, "expected error for unknown block hash") - - // Mismatched count. - blk := makeBlock(1, 2) - requireNoError(t, db.WriteBlock(ctx, blk)) - err = db.SetTransactionResults(ctx, blk.Hash(), []block.Result{testResult{bytes: []byte("only-one"), height: 1, index: 0}}) - requireTrue(t, err != nil, "expected error for mismatched result count") + requireTrue(t, ok, "expected block still present after re-write") + requireBlockEqual(t, blk, got) }) } @@ -557,28 +195,6 @@ func TestPrunePreservesUnprunedBlocks(t *testing.T) { }) } -func TestPrunePreservesUnprunedTransactions(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - survivingBlock := makeBlock(2, 3) - requireNoError(t, db.WriteBlock(ctx, makeBlock(1, 1))) - requireNoError(t, db.WriteBlock(ctx, survivingBlock)) - - requireNoError(t, db.Flush(ctx)) - requireNoError(t, db.Prune(ctx, 2)) - - for _, tx := range survivingBlock.Transactions() { - _, _, found, err := db.GetTransactionByHash(ctx, tx.Hash()) - requireNoError(t, err) - requireTrue(t, found, "expected transaction %s to survive pruning", tx.Hash()) - } - }) -} - func TestPruneDoesNotError(t *testing.T) { forEachBuilder(t, func(t *testing.T, builder func(string) (block.BlockDB, error)) { ctx := context.Background() @@ -618,13 +234,6 @@ func TestCloseAndReopen(t *testing.T) { requireNoError(t, err) requireTrue(t, ok, "expected block to survive close/reopen") requireBlockEqual(t, blk, got) - - for _, tx := range blk.Transactions() { - gotTx, _, found, err := db2.GetTransactionByHash(ctx, tx.Hash()) - requireNoError(t, err) - requireTrue(t, found, "expected tx to survive close/reopen") - requireBytesEqual(t, tx.Bytes(), gotTx.Bytes(), "transaction data") - } }) } @@ -695,14 +304,6 @@ func TestBulkWriteAndQuery(t *testing.T) { requireNoError(t, err) requireTrue(t, ok, "block not found by hash at height %d", expected.Height()) requireBlockEqual(t, expected, byHash) - - for _, expectedTx := range expected.Transactions() { - gotTx, _, found, err := db.GetTransactionByHash(ctx, expectedTx.Hash()) - requireNoError(t, err) - requireTrue(t, found, "tx not found by hash %x (block height %d)", expectedTx.Hash(), expected.Height()) - requireBytesEqual(t, expectedTx.Hash(), gotTx.Hash(), "tx hash") - requireBytesEqual(t, expectedTx.Bytes(), gotTx.Bytes(), "tx data") - } } }) } diff --git a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go index 989234d2b2..b3f1422fde 100644 --- a/sei-db/ledger_db/block/mem_block_db/mem_block_db.go +++ b/sei-db/ledger_db/block/mem_block_db/mem_block_db.go @@ -1,65 +1,27 @@ package memblockdb import ( - "bytes" "context" - "fmt" - "sort" "sync" "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block" ) -// resultInstance is the per-block-occurrence record for one tx hash. It -// carries the location (height + position in the block) plus the marshaled -// execution result (nil while the entry is "pending" — block written but -// SetTransactionResults not yet called). The same struct value satisfies -// block.Result on read; GetTransactionByHash returns a value-copy so the -// caller's bytes slice header is independent of the storage's. A later -// SetTransactionResults reassigns inst.bytes (does not mutate it in -// place), so the caller's earlier read is naturally isolated by Go's -// slice-header-copy semantics — no defensive deep-copy needed here. -// (If a future caller wants to mutate the returned bytes in place, they -// must copy first.) -type resultInstance struct { - height uint64 - index uint32 - bytes []byte // nil if no result attached yet -} - -func (r resultInstance) Bytes() []byte { return r.bytes } -func (r resultInstance) Height() uint64 { return r.height } -func (r resultInstance) Index() uint32 { return r.index } - -// txEntry holds the invariant tx body once per hash, plus a per-block-hash -// map of resultInstance recording every block this tx appeared in. -type txEntry struct { - tx block.Transaction - instances map[string]*resultInstance // blockHash -> instance -} - // Shared backing store, keyed by path in test builders to simulate restarts. type memBlockDBData struct { mu sync.RWMutex blocksByHash map[string]block.Block blocksByHeight map[uint64]block.Block - // txEntries is the two-level index: tx hash -> per-block instances. - // Same tx hash appearing in multiple blocks (different lanes producing - // the same tx) gets one entry per block in the inner map; pruning a - // single block only removes that block's instance and leaves siblings - // intact. - txEntries map[string]*txEntry - lowestHeight uint64 - highestHeight uint64 - hasBlocks bool + lowestHeight uint64 + highestHeight uint64 + hasBlocks bool } -// An in-memory implementation of the BlockDB interface. Useful as a test fixture to sanity check -// test flows. +// An in-memory implementation of the BlockDB interface. Useful as a test +// fixture and as a development-time backend until a persistent BlockDB lands. // // TODO(blockdb): add a -race concurrency test — every public method's lock -// shape (WriteBlock + SetTransactionResults under write lock; Get* under -// read lock; two-pass validate-then-mutate in WriteBlock) is currently +// shape (WriteBlock under write lock; Get* under read lock) is currently // verified only by inspection. type memBlockDB struct { data *memBlockDBData @@ -71,7 +33,6 @@ func NewMemBlockDB() block.BlockDB { data: &memBlockDBData{ blocksByHash: make(map[string]block.Block), blocksByHeight: make(map[uint64]block.Block), - txEntries: make(map[string]*txEntry), }, } } @@ -82,51 +43,14 @@ func (m *memBlockDB) WriteBlock(_ context.Context, blk block.Block) error { defer d.mu.Unlock() blockHashKey := string(blk.Hash()) - // Idempotent on duplicate: a second WriteBlock for the same block hash - // would re-create resultInstance entries with bytes=nil, silently - // destroying anything SetTransactionResults already attached. Skip. + // Idempotent on duplicate: treat a second WriteBlock for the same block + // hash as a no-op rather than overwriting indexes. if _, exists := d.blocksByHash[blockHashKey]; exists { return nil } height := blk.Height() - txs := blk.Transactions() - - // First pass: validate every tx body against any pre-existing entry for - // the same hash. A mismatch surfaces a tx-hash collision (two distinct - // bodies hashing to the same value) — refuse the entire write rather - // than partially mutate state. - for _, tx := range txs { - entry, ok := d.txEntries[string(tx.Hash())] - if !ok { - continue - } - if !bytes.Equal(entry.tx.Bytes(), tx.Bytes()) { - return fmt.Errorf("%w: tx %x in block %x", block.ErrTxHashCollision, tx.Hash(), blk.Hash()) - } - } - - // Second pass: actually write. d.blocksByHash[blockHashKey] = blk d.blocksByHeight[height] = blk - for i, tx := range txs { - hashKey := string(tx.Hash()) - entry, ok := d.txEntries[hashKey] - if !ok { - entry = &txEntry{ - tx: tx, - instances: make(map[string]*resultInstance), - } - d.txEntries[hashKey] = entry - } - // Register a pending instance for this block. SetTransactionResults - // fills bytes later. The (txHash, blockHash) keying means the same - // tx hash in another block keeps its own instance; pruning one - // block doesn't disturb others. - entry.instances[blockHashKey] = &resultInstance{ - height: height, - index: uint32(i), //nolint:gosec // tx index fits in uint32 (block tx count is bounded). - } - } if !d.hasBlocks { d.lowestHeight = height @@ -143,46 +67,6 @@ func (m *memBlockDB) WriteBlock(_ context.Context, blk block.Block) error { return nil } -func (m *memBlockDB) SetTransactionResults(_ context.Context, blockHash []byte, results []block.Result) error { - // Pre-marshal each result outside the lock. Result.Bytes() may run a - // proto Marshal — for a 1000-tx block that's ~MB of CPU work, which - // we don't want to do under the write lock blocking every reader. The - // extra cost is wasted on the rare error paths (unknown block, - // count mismatch) but those are exceptional. - bytesByIdx := make([][]byte, len(results)) - for i, r := range results { - bytesByIdx[i] = r.Bytes() - } - - d := m.data - d.mu.Lock() - defer d.mu.Unlock() - - blk, ok := d.blocksByHash[string(blockHash)] - if !ok { - return fmt.Errorf("%w: %x", block.ErrUnknownBlock, blockHash) - } - txs := blk.Transactions() - if len(txs) != len(bytesByIdx) { - return fmt.Errorf("%w: block has %d txs, got %d results", block.ErrResultCountMismatch, len(txs), len(bytesByIdx)) - } - blockHashKey := string(blockHash) - for i, tx := range txs { - entry, ok := d.txEntries[string(tx.Hash())] - if !ok { - // Defensive: WriteBlock should have created this entry. If it - // didn't, the index is corrupted — surface loudly. - return fmt.Errorf("internal: tx index missing entry for tx %x in block %x", tx.Hash(), blockHash) - } - inst, ok := entry.instances[blockHashKey] - if !ok { - return fmt.Errorf("internal: tx index missing instance for tx %x in block %x", tx.Hash(), blockHash) - } - inst.bytes = bytesByIdx[i] - } - return nil -} - func (m *memBlockDB) Flush(_ context.Context) error { return nil } @@ -205,42 +89,6 @@ func (m *memBlockDB) GetBlockByHeight(_ context.Context, height uint64) (block.B return blk, ok, nil } -func (m *memBlockDB) GetTransactionByHash(_ context.Context, hash []byte) (block.Transaction, []block.Result, bool, error) { - d := m.data - d.mu.RLock() - defer d.mu.RUnlock() - - entry, ok := d.txEntries[string(hash)] - if !ok { - return nil, nil, false, nil - } - // Sort by blockHash so the returned slice has deterministic order - // across calls — Go map iteration is randomized, and downstream - // selection (e.g. GigaRouter.Tx tie-breaking on equal heights) - // depends on stable input order to return the same Result for the - // same query. - keys := make([]string, 0, len(entry.instances)) - for k := range entry.instances { - keys = append(keys, k) - } - sort.Strings(keys) - results := make([]block.Result, 0, len(keys)) - for _, k := range keys { - inst := entry.instances[k] - if inst.bytes == nil { - continue - } - // Value-copy the resultInstance: caller gets a fresh slice header - // pointing at the same backing array. Isolation from a later - // SetTransactionResults is provided by the fact that - // SetTransactionResults reassigns inst.bytes (rather than mutating - // it in place), so the caller's slice header keeps pointing at - // the old array. See the resultInstance type doc. - results = append(results, *inst) - } - return entry.tx, results, true, nil -} - func (m *memBlockDB) Prune(_ context.Context, lowestHeightToKeep uint64) error { d := m.data d.mu.Lock() @@ -256,21 +104,7 @@ func (m *memBlockDB) Prune(_ context.Context, lowestHeightToKeep uint64) error { continue } delete(d.blocksByHeight, h) - blockHashKey := string(blk.Hash()) - delete(d.blocksByHash, blockHashKey) - for _, tx := range blk.Transactions() { - hashKey := string(tx.Hash()) - entry, ok := d.txEntries[hashKey] - if !ok { - continue - } - // Only remove the instance for the block being pruned; other - // blocks containing the same tx hash stay reachable. - delete(entry.instances, blockHashKey) - if len(entry.instances) == 0 { - delete(d.txEntries, hashKey) - } - } + delete(d.blocksByHash, string(blk.Hash())) } if lowestHeightToKeep > d.highestHeight { diff --git a/sei-tendermint/internal/p2p/giga_blockdb.go b/sei-tendermint/internal/p2p/giga_blockdb.go index 892ead3987..19e197e808 100644 --- a/sei-tendermint/internal/p2p/giga_blockdb.go +++ b/sei-tendermint/internal/p2p/giga_blockdb.go @@ -4,10 +4,8 @@ import ( "time" "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block" - abci "github.com/sei-protocol/sei-chain/sei-tendermint/abci/types" "github.com/sei-protocol/sei-chain/sei-tendermint/crypto/tmhash" atypes "github.com/sei-protocol/sei-chain/sei-tendermint/internal/autobahn/types" - "github.com/sei-protocol/sei-chain/sei-tendermint/libs/utils" ) // globalBlockAdapter wraps *atypes.GlobalBlock so it satisfies block.Block @@ -16,9 +14,9 @@ import ( // // txs is computed eagerly in newGlobalBlockAdapter and cached for the // lifetime of the adapter. mem_block_db calls Transactions() multiple -// times (WriteBlock, SetTransactionResults validation, Prune); without -// the cache each call would re-allocate the slice and re-sha256 every -// payload tx — under the write lock, on the Prune path. +// times (WriteBlock, Prune); without the cache each call would +// re-allocate the slice and re-sha256 every payload tx — under the +// write lock, on the Prune path. type globalBlockAdapter struct { gb *atypes.GlobalBlock txs []block.Transaction @@ -38,10 +36,8 @@ func newGlobalBlockAdapter(gb *atypes.GlobalBlock) globalBlockAdapter { func (a globalBlockAdapter) Hash() []byte { // TODO(autobahn): memoize parallel to txs — Hash() is called multiple - // times per block (mem_block_db's WriteBlock, runExecute's - // SetTransactionResults call site, BlockByHash translation). Each call - // re-runs the proto marshal + sha256 over the header. Not hot today - // but trivial to cache when we revisit. + // times per block. Each call re-runs the proto marshal + sha256 over + // the header. Not hot today but trivial to cache when we revisit. h := a.gb.Header.Hash() return h.Bytes() } @@ -53,9 +49,9 @@ func (a globalBlockAdapter) Time() time.Time { return a.gb.Timestamp } func (a globalBlockAdapter) Transactions() []block.Transaction { return a.txs } // txAdapter wraps a single Autobahn tx + its CometBFT-style hash so it -// satisfies block.Transaction. The interface only carries the invariant -// tx body — per-block-instance data (height, index, result) lives on -// block.Result, attached separately via SetTransactionResults. +// satisfies block.Transaction. The interface carries only the invariant +// tx body — BlockDB doesn't index by tx hash; per-tx execution results +// belong on a future Receipt Store, not here. type txAdapter struct { hash []byte bytes []byte @@ -63,26 +59,3 @@ type txAdapter struct { func (t txAdapter) Hash() []byte { return t.hash } func (t txAdapter) Bytes() []byte { return t.bytes } - -// execResultAdapter wraps *abci.ExecTxResult plus its block height + -// position so it satisfies block.Result. Marshal happens lazily on -// Bytes(); the typical caller is mem_block_db's SetTransactionResults, -// which calls Bytes() exactly once and then drops the adapter. -// ExecTxResult is gogoproto-generated so it carries its own Marshal -// method and never fails on a well-formed message — we OrPanic to surface -// the impossible case loudly rather than silently dropping a result. -type execResultAdapter struct { - r *abci.ExecTxResult - height uint64 - index uint32 -} - -func (a execResultAdapter) Bytes() []byte { - if a.r == nil { - return nil - } - return utils.OrPanic1(a.r.Marshal()) -} - -func (a execResultAdapter) Height() uint64 { return a.height } -func (a execResultAdapter) Index() uint32 { return a.index } diff --git a/sei-tendermint/internal/p2p/giga_router.go b/sei-tendermint/internal/p2p/giga_router.go index e2ef08732f..c58805b2e5 100644 --- a/sei-tendermint/internal/p2p/giga_router.go +++ b/sei-tendermint/internal/p2p/giga_router.go @@ -55,15 +55,16 @@ type GigaRouter struct { service *giga.Service poolIn *giga.Pool[NodePublicKey, rpc.Server[giga.API]] poolOut *giga.Pool[NodePublicKey, rpc.Client[giga.API]] - // blockDB indexes finalized blocks by hash and tracks per-tx execution - // results. Populated by runExecute: WriteBlock lands just before each - // block is handed to executeBlock; SetTransactionResults follows once - // FinalizeBlock returns. Read by BlockByHash and Tx. + // blockDB indexes finalized blocks by hash and height. Populated by + // runExecute via WriteBlock just before each block is handed to + // executeBlock; read by BlockByHash. BlockDB is block-storage-only — + // per-tx execution results (and the txHash → result lookup) belong + // on a future Receipt Store, not here, per the Giga Transaction + // Query proposal. // // Today's instance is mem_block_db (in-memory), so it does not survive // process restarts — RPC semantics treat that as "unknown hash" - // (BlockByHash returns &ResultBlock{Block: nil}; Tx returns - // "tx not found"). + // (BlockByHash returns &ResultBlock{Block: nil}). // // TODO(autobahn): make BlockDB injectable via GigaRouterConfig (today // it's hard-coded to mem_block_db.NewMemBlockDB() in NewGigaRouter, @@ -145,97 +146,6 @@ func (r *GigaRouter) LastCommittedBlockNumber() int64 { return int64(gr.Next) - 1 // nolint:gosec // gr.Next is uint64 but bounded by actual chain height. } -// ErrTxResultPending is returned by Tx when a transaction is known -// (its parent block has been written to BlockDB) but no execution -// result has been attached yet — the window between WriteBlock and -// SetTransactionResults inside runExecute. Distinct from "not found" -// because the tx is real. -// -// On the happy path the caller can retry and the result will land in -// milliseconds. On the unhappy path (executeBlock errored, runExecute -// exited, process is shutting down) the result will never land and -// retry never succeeds — operators inspecting a dead node via RPC will -// see this sentinel forever for any tx in the orphaned block. -// -// Callers that don't care about the distinction can errors.Is-check -// to fold it into a generic "try again" flow. -var ErrTxResultPending = errors.New("transaction result not yet recorded") - -// Tx returns the finalized transaction with the given hash translated into -// the CometBFT coretypes.ResultTx shape. Mirrors BlockByHash: the RPC layer -// (env.Tx) just delegates here when Autobahn is active, keeping the -// abci.ExecTxResult unmarshal and ResultTx assembly inside the giga -// package. Match CometBFT semantics for unknown hashes — return an error -// rather than nil — since callers (broadcast_tx_commit polling, ops -// tooling) already handle that error explicitly. -// -// req.Prove is intentionally not honored — Autobahn doesn't materialize -// types.TxProof, and tooling that needs it falls back to the CometBFT path. -// -// When the same tx hash was included in multiple blocks (different lanes -// producing the same tx), BlockDB returns every recorded execution; we -// pick the canonical one here. Order of preference: -// 1. The lowest-height execution with Code == abci.CodeTypeOK (a tx is -// expected to succeed at most once across the chain). -// 2. Otherwise the highest-height failure (most recent attempt). -// 3. If no executions are recorded but the tx hash is known to BlockDB, -// return ErrTxResultPending — distinguishes "may retry" from -// "definitely doesn't exist". -func (r *GigaRouter) Tx(ctx context.Context, hash []byte) (*coretypes.ResultTx, error) { - tx, results, found, err := r.blockDB.GetTransactionByHash(ctx, hash) - if err != nil { - return nil, fmt.Errorf("blockDB.GetTransactionByHash: %w", err) - } - if !found { - return nil, fmt.Errorf("tx (%X) not found", hash) - } - if len(results) == 0 { - return nil, fmt.Errorf("tx (%X): %w", hash, ErrTxResultPending) - } - - // Pick the canonical execution. Unmarshal each result once to read - // Code; the multi-result case is rare so the per-call cost is small. - var ( - successful *abci.ExecTxResult - successRes block.Result - failure *abci.ExecTxResult - failureRes block.Result - ) - for _, res := range results { - var parsed abci.ExecTxResult - if err := parsed.Unmarshal(res.Bytes()); err != nil { - return nil, fmt.Errorf("unmarshal tx result (block height %d): %w", res.Height(), err) - } - if parsed.Code == abci.CodeTypeOK { - if successful == nil || res.Height() < successRes.Height() { - p := parsed - successful = &p - successRes = res - } - continue - } - if failure == nil || res.Height() > failureRes.Height() { - p := parsed - failure = &p - failureRes = res - } - } - - chosenResult := successful - chosenRes := successRes - if chosenResult == nil { - chosenResult = failure - chosenRes = failureRes - } - return &coretypes.ResultTx{ - Hash: hash, - Height: utils.Clamp[int64](chosenRes.Height()), - Index: chosenRes.Index(), - TxResult: *chosenResult, - Tx: tx.Bytes(), - }, nil -} - // MaxGasPerBlock returns the producer's configured max gas per block (int64). // Thin pass-through to producer.Config.MaxGasPerBlockI64 — the clamp logic // lives there. Exposed at the GigaRouter level so the RPC layer can populate @@ -333,7 +243,7 @@ func (r *GigaRouter) translateBlock(b block.Block) *coretypes.ResultBlock { } } -func (r *GigaRouter) executeBlock(ctx context.Context, b *atypes.GlobalBlock) (*abci.ResponseCommit, []*abci.ExecTxResult, error) { +func (r *GigaRouter) executeBlock(ctx context.Context, b *atypes.GlobalBlock) (*abci.ResponseCommit, error) { app := r.cfg.TxMempool.App() hash := b.Header.Hash() var proposerAddress types.Address @@ -343,7 +253,7 @@ func (r *GigaRouter) executeBlock(ctx context.Context, b *atypes.GlobalBlock) (* proposer := slices.MinFunc(vals, func(a, b abci.ValidatorUpdate) int { return a.PubKey.Compare(b.PubKey) }) key, err := crypto.PubKeyFromProto(proposer.PubKey) if err != nil { - return nil, nil, fmt.Errorf("crypto.PubKeyFromProto(): %w", err) + return nil, fmt.Errorf("crypto.PubKeyFromProto(): %w", err) } proposerAddress = key.Address() } @@ -370,14 +280,14 @@ func (r *GigaRouter) executeBlock(ctx context.Context, b *atypes.GlobalBlock) (* }).ToProto(), }) if err != nil { - return nil, nil, fmt.Errorf("r.cfg.App.FinalizeBlock(): %w", err) + return nil, fmt.Errorf("r.cfg.App.FinalizeBlock(): %w", err) } if err := r.data.PushAppHash(ctx, b.GlobalNumber, resp.AppHash); err != nil { - return nil, nil, fmt.Errorf("r.data.PushAppHash(%v): %w", b.GlobalNumber, err) + return nil, fmt.Errorf("r.data.PushAppHash(%v): %w", b.GlobalNumber, err) } commitResp, err := app.Commit(ctx) if err != nil { - return nil, nil, fmt.Errorf("r.cfg.App.Commit(): %w", err) + return nil, fmt.Errorf("r.cfg.App.Commit(): %w", err) } blockTxs := make(types.Txs, len(b.Payload.Txs())) for i, tx := range b.Payload.Txs() { @@ -396,9 +306,9 @@ func (r *GigaRouter) executeBlock(ctx context.Context, b *atypes.GlobalBlock) (* false, ) if err != nil { - return nil, nil, fmt.Errorf("r.cfg.TxMempool.Update(%v): %w", b.GlobalNumber, err) + return nil, fmt.Errorf("r.cfg.TxMempool.Update(%v): %w", b.GlobalNumber, err) } - return commitResp, resp.TxResults, nil + return commitResp, nil } func (r *GigaRouter) runExecute(ctx context.Context) error { @@ -455,34 +365,17 @@ func (r *GigaRouter) runExecute(ctx context.Context) error { // read-your-writes within this process, so any concurrent RPC // BlockByHash sees the block from this point forward. The data // layer's WAL remains the primary durability story; BlockDB is the - // hash index, not the source of truth on restart. + // block-by-hash index, not the source of truth on restart. Per-tx + // execution results are NOT recorded here — those belong on a + // future Receipt Store (see the Giga Transaction Query proposal), + // not on BlockDB. if err := r.blockDB.WriteBlock(ctx, newGlobalBlockAdapter(b)); err != nil { return fmt.Errorf("r.blockDB.WriteBlock(%v): %w", n, err) } - commitResp, txResults, err := r.executeBlock(ctx, b) + commitResp, err := r.executeBlock(ctx, b) if err != nil { return fmt.Errorf("r.executeBlock(%v): %w", n, err) } - // Attach per-tx execution results to the BlockDB entry written - // above, so RPC consumers (env.Tx) can return them by tx hash. - // Wrapping each *abci.ExecTxResult in execResultAdapter keeps - // sei-db chain-agnostic — marshaling happens inside the adapter. - // Result.Height/Index reflect this block's height + the tx's - // position so per-block-instance metadata travels with the result - // (the same tx hash can land in different positions across lane - // blocks). - blockHash := b.Header.Hash() - results := make([]block.Result, len(txResults)) - for i, txResult := range txResults { - results[i] = execResultAdapter{ - r: txResult, - height: uint64(b.GlobalNumber), - index: uint32(i), //nolint:gosec // tx index fits in uint32 (block tx count is bounded). - } - } - if err := r.blockDB.SetTransactionResults(ctx, blockHash.Bytes(), results); err != nil { - return fmt.Errorf("r.blockDB.SetTransactionResults(%v): %w", n, err) - } pruneBefore, ok := utils.SafeCast[atypes.GlobalBlockNumber](commitResp.RetainHeight) if !ok { return fmt.Errorf("invalid commitResp.RetainHeight = %v", commitResp.RetainHeight) diff --git a/sei-tendermint/internal/p2p/giga_router_test.go b/sei-tendermint/internal/p2p/giga_router_test.go index 6d9cb06e08..b665310c26 100644 --- a/sei-tendermint/internal/p2p/giga_router_test.go +++ b/sei-tendermint/internal/p2p/giga_router_test.go @@ -4,7 +4,6 @@ import ( "context" "crypto/sha256" "encoding/json" - "errors" "fmt" "net/netip" "slices" @@ -14,12 +13,9 @@ import ( dbm "github.com/tendermint/tm-db" "golang.org/x/time/rate" - "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block" - memblockdb "github.com/sei-protocol/sei-chain/sei-db/ledger_db/block/mem_block_db" abci "github.com/sei-protocol/sei-chain/sei-tendermint/abci/types" "github.com/sei-protocol/sei-chain/sei-tendermint/crypto" "github.com/sei-protocol/sei-chain/sei-tendermint/crypto/ed25519" - "github.com/sei-protocol/sei-chain/sei-tendermint/crypto/tmhash" "github.com/sei-protocol/sei-chain/sei-tendermint/internal/autobahn/consensus" "github.com/sei-protocol/sei-chain/sei-tendermint/internal/autobahn/producer" atypes "github.com/sei-protocol/sei-chain/sei-tendermint/internal/autobahn/types" @@ -391,22 +387,6 @@ func TestGigaRouter_FinalizeBlocks(t *testing.T) { rbh, err := giga.BlockByHash(ctx, hashKey) require.NoError(t, err, "router[%v].BlockByHash(%x)", i, rb.BlockID.Hash) require.Equal(t, rb, rbh, "router[%v].BlockByHash(%x) ≠ BlockByNumber(%v)", i, rb.BlockID.Hash, committed) - // Covers GigaRouter.Tx — BlockDB-backed tx-by-hash lookup that - // env.Tx delegates to under Autobahn. For every tx in the just- - // fetched block we verify the round-trip carries hash/height/ - // index/bytes faithfully and that TxResult was attached by - // SetTransactionResults (Code is the meaningful no-fixture - // signal: testApp returns Code=0 for accepted txs). - for j, tx := range rb.Block.Data.Txs { - txHash := tmhash.Sum(tx) - rt, err := giga.Tx(ctx, txHash) - require.NoError(t, err, "router[%v].Tx(block=%v tx[%v])", i, committed, j) - require.Equal(t, txHash, []byte(rt.Hash), "router[%v].Tx hash", i) - require.Equal(t, committed, rt.Height, "router[%v].Tx height", i) - require.Equal(t, uint32(j), rt.Index, "router[%v].Tx index", i) //nolint:gosec - require.Equal(t, []byte(tx), rt.Tx, "router[%v].Tx tx bytes", i) - require.Equal(t, uint32(0), rt.TxResult.Code, "router[%v].Tx code", i) - } } // Payload.Txs round-trips: for every retained block, the txs the // data layer holds (GlobalBlock.Payload.Txs) must equal the txs @@ -435,160 +415,3 @@ func TestGigaRouter_FinalizeBlocks(t *testing.T) { }) require.NoError(t, err) } - -// txStub / blockStub / resultStub are minimal block.Transaction / -// block.Block / block.Result implementations for unit-testing -// GigaRouter.Tx in isolation — without spinning up the full consensus -// harness used by TestGigaRouter_FinalizeBlocks. -type txStub struct { - hash, bytes []byte -} - -func (t txStub) Hash() []byte { return t.hash } -func (t txStub) Bytes() []byte { return t.bytes } - -type blockStub struct { - hash []byte - height uint64 - txs []block.Transaction -} - -func (b blockStub) Hash() []byte { return b.hash } -func (b blockStub) Height() uint64 { return b.height } -func (b blockStub) Time() time.Time { return time.Time{} } -func (b blockStub) Transactions() []block.Transaction { return b.txs } - -type resultStub struct { - b []byte - height uint64 - index uint32 -} - -func (r resultStub) Bytes() []byte { return r.b } -func (r resultStub) Height() uint64 { return r.height } -func (r resultStub) Index() uint32 { return r.index } - -// marshaledExecResult is a tiny helper that returns a marshaled -// abci.ExecTxResult with the given Code — saves repetitive OrPanic1 -// boilerplate in the per-test setup. -func marshaledExecResult(code uint32) []byte { - return utils.OrPanic1((&abci.ExecTxResult{Code: code}).Marshal()) -} - -// TestGigaRouter_TxResultPending pins the contract from review finding -// (1): a tx whose parent block has been written to BlockDB but whose -// execution results have not yet been attached must surface as -// ErrTxResultPending — never as a zero-result success that -// broadcast_tx_commit pollers would mistake for an executed tx. -func TestGigaRouter_TxResultPending(t *testing.T) { - ctx := t.Context() - - blockDB := memblockdb.NewMemBlockDB() - r := &GigaRouter{blockDB: blockDB} - - tx := txStub{ - hash: []byte("hash-of-tx-1"), - bytes: []byte("payload-1"), - } - blk := blockStub{ - hash: []byte("block-A"), - height: 5, - txs: []block.Transaction{tx}, - } - require.NoError(t, blockDB.WriteBlock(ctx, blk)) - - // Unknown tx hash → "not found" sentinel (distinct from pending). - _, err := r.Tx(ctx, []byte("does-not-exist")) - require.True(t, err != nil, "expected error for unknown tx") - require.False(t, errors.Is(err, ErrTxResultPending), "unknown tx must not surface as pending") - - // Block written, results not yet attached → ErrTxResultPending. - _, err = r.Tx(ctx, tx.hash) - require.True(t, errors.Is(err, ErrTxResultPending), - "expected ErrTxResultPending after WriteBlock but before SetTransactionResults, got %v", err) - - // After SetTransactionResults, Tx returns the translated ResultTx. - wantCode := uint32(7) // arbitrary non-zero code: confirms the result actually round-trips. - require.NoError(t, blockDB.SetTransactionResults(ctx, blk.hash, []block.Result{ - resultStub{b: marshaledExecResult(wantCode), height: 5, index: 0}, - })) - - rt, err := r.Tx(ctx, tx.hash) - require.NoError(t, err, "Tx after SetTransactionResults") - require.Equal(t, tx.hash, []byte(rt.Hash)) - require.Equal(t, int64(5), rt.Height) - require.Equal(t, uint32(0), rt.Index) - require.Equal(t, tx.bytes, rt.Tx) - require.Equal(t, wantCode, rt.TxResult.Code) -} - -// TestGigaRouter_TxMultipleBlocks_PrefersSuccess pins review finding (2): -// the same tx hash included in two different blocks must keep both -// executions reachable, and Tx() must canonicalize on the successful -// one regardless of insertion order. -func TestGigaRouter_TxMultipleBlocks_PrefersSuccess(t *testing.T) { - ctx := t.Context() - - blockDB := memblockdb.NewMemBlockDB() - r := &GigaRouter{blockDB: blockDB} - - const txHash = "shared-hash" - const txBytes = "shared-data" - shared := func() block.Transaction { - return txStub{hash: []byte(txHash), bytes: []byte(txBytes)} - } - // A is written first and fails; B is written second and succeeds. - // The "successful wins" rule must beat insertion order. - blkA := blockStub{hash: []byte("block-A"), height: 11, txs: []block.Transaction{shared()}} - blkB := blockStub{hash: []byte("block-B"), height: 22, txs: []block.Transaction{shared()}} - require.NoError(t, blockDB.WriteBlock(ctx, blkA)) - require.NoError(t, blockDB.WriteBlock(ctx, blkB)) - - const failCode = uint32(7) - require.NoError(t, blockDB.SetTransactionResults(ctx, blkA.hash, []block.Result{ - resultStub{b: marshaledExecResult(failCode), height: 11, index: 0}, - })) - require.NoError(t, blockDB.SetTransactionResults(ctx, blkB.hash, []block.Result{ - resultStub{b: marshaledExecResult(abci.CodeTypeOK), height: 22, index: 0}, - })) - - rt, err := r.Tx(ctx, []byte(txHash)) - require.NoError(t, err) - require.Equal(t, int64(22), rt.Height, "expected canonical execution from successful block") - require.Equal(t, abci.CodeTypeOK, rt.TxResult.Code) -} - -// TestGigaRouter_TxMultipleBlocks_FallsBackToLatestFailure pins the -// "no successful execution" branch of the selection rule: when every -// recorded execution is a failure, Tx() returns the highest-height -// failure (the most recent attempt). -func TestGigaRouter_TxMultipleBlocks_FallsBackToLatestFailure(t *testing.T) { - ctx := t.Context() - - blockDB := memblockdb.NewMemBlockDB() - r := &GigaRouter{blockDB: blockDB} - - const txHash = "shared-hash" - const txBytes = "shared-data" - shared := func() block.Transaction { - return txStub{hash: []byte(txHash), bytes: []byte(txBytes)} - } - blkA := blockStub{hash: []byte("block-A"), height: 11, txs: []block.Transaction{shared()}} - blkB := blockStub{hash: []byte("block-B"), height: 22, txs: []block.Transaction{shared()}} - require.NoError(t, blockDB.WriteBlock(ctx, blkA)) - require.NoError(t, blockDB.WriteBlock(ctx, blkB)) - - const failA = uint32(5) - const failB = uint32(7) - require.NoError(t, blockDB.SetTransactionResults(ctx, blkA.hash, []block.Result{ - resultStub{b: marshaledExecResult(failA), height: 11, index: 0}, - })) - require.NoError(t, blockDB.SetTransactionResults(ctx, blkB.hash, []block.Result{ - resultStub{b: marshaledExecResult(failB), height: 22, index: 0}, - })) - - rt, err := r.Tx(ctx, []byte(txHash)) - require.NoError(t, err) - require.Equal(t, int64(22), rt.Height, "expected highest-height failure") - require.Equal(t, failB, rt.TxResult.Code) -} diff --git a/sei-tendermint/internal/rpc/core/tx.go b/sei-tendermint/internal/rpc/core/tx.go index a8ab69a437..41e6490c95 100644 --- a/sei-tendermint/internal/rpc/core/tx.go +++ b/sei-tendermint/internal/rpc/core/tx.go @@ -17,14 +17,13 @@ import ( // transaction is in the mempool, invalidated, or was not sent in the first // place. // More: https://docs.tendermint.com/master/rpc/#/Info/tx +// +// TODO(autobahn): once the Receipt Store is wired (canonical +// txHash → execution result lookup unified for EVM + Cosmos txs, per +// the Giga Transaction Query proposal), route this handler through +// it instead of the legacy EventSinks. The current behavior under +// Autobahn is "querying disabled" because EventSinks aren't populated. func (env *Environment) Tx(ctx context.Context, req *coretypes.RequestTx) (*coretypes.ResultTx, error) { - // Autobahn path: legacy EventSinks aren't populated under giga; delegate - // to GigaRouter.Tx, which reads the BlockDB populated by runExecute and - // returns a fully-translated ResultTx (mirrors how BlockByHash routes). - if r, ok := env.gigaRouter().Get(); ok { - return r.Tx(ctx, req.Hash) - } - // if index is disabled, return error if !indexer.KVSinkEnabled(env.EventSinks) { return nil, errors.New("transaction querying is disabled due to no kvEventSink")