Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
221 changes: 221 additions & 0 deletions encoding/codecv9.go
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Remember that we need to bump da-codec version in both rollup-relayer and l2geth (I think previously we forgot about l2geth).

Copy link
Member

@yiweichi yiweichi Nov 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

blob-uploader depends on da-code as well, Do we already have plan to update blob-uploader here to support codecv9?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

added in this pr scroll-tech/scroll#1752

Original file line number Diff line number Diff line change
@@ -0,0 +1,221 @@
package encoding

import (
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"

"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/log"
)

// DACodecV9 updates the implementation of the base function checkCompressedDataCompatibility
// to use the V9 compatibility check (checkCompressedDataCompatibilityV9) instead of the previous V7 version.
//
// As per Go's rules for shadowing methods with struct embedding, we need to override
// all methods that (transitively) call checkCompressedDataCompatibility:
// - checkCompressedDataCompatibility (updated to use V9)
// - constructBlob (calls checkCompressedDataCompatibility)
// - NewDABatch (calls constructBlob)
// - CheckChunkCompressedDataCompatibility (calls CheckBatchCompressedDataCompatibility)
// - CheckBatchCompressedDataCompatibility (calls checkCompressedDataCompatibility)
// - estimateL1CommitBatchSizeAndBlobSize (calls checkCompressedDataCompatibility)
// - EstimateChunkL1CommitBatchSizeAndBlobSize (calls estimateL1CommitBatchSizeAndBlobSize)
// - EstimateBatchL1CommitBatchSizeAndBlobSize (calls estimateL1CommitBatchSizeAndBlobSize)

type DACodecV9 struct {
DACodecV8
}

func NewDACodecV9() *DACodecV9 {
v := CodecV9
return &DACodecV9{
DACodecV8: DACodecV8{
DACodecV7: DACodecV7{forcedVersion: &v},
},
}
}

// checkCompressedDataCompatibility checks the compressed data compatibility for a batch.
// It constructs a blob payload, compresses the data, and checks the compressed data compatibility.
// flag checkLength indicates whether to check the length of the compressed data against the original data.
// If checkLength is true, this function returns if compression is needed based on the compressed data's length, which is used when doing batch bytes encoding.
// If checkLength is false, this function returns the result of the compatibility check, which is used when determining the chunk and batch contents.
func (d *DACodecV9) checkCompressedDataCompatibility(payloadBytes []byte, checkLength bool) ([]byte, bool, error) {
compressedPayloadBytes, err := d.CompressScrollBatchBytes(payloadBytes)
if err != nil {
return nil, false, fmt.Errorf("failed to compress blob payload: %w", err)
}

if err = checkCompressedDataCompatibilityV9(compressedPayloadBytes); err != nil {
log.Warn("Compressed data compatibility check failed", "err", err, "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes))
return nil, false, nil
}

// check if compressed data is bigger or equal to the original data -> no need to compress
if checkLength && len(compressedPayloadBytes) >= len(payloadBytes) {
log.Warn("Compressed data is bigger or equal to the original data", "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes))
return nil, false, nil
}

return compressedPayloadBytes, true, nil
}

// NewDABatch creates a DABatch including blob from the provided Batch.
func (d *DACodecV9) NewDABatch(batch *Batch) (DABatch, error) {
if len(batch.Blocks) == 0 {
return nil, errors.New("batch must contain at least one block")
}

if err := checkBlocksBatchVSChunksConsistency(batch); err != nil {
return nil, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err)
}

blob, blobVersionedHash, blobBytes, challengeDigest, err := d.constructBlob(batch)
if err != nil {
return nil, fmt.Errorf("failed to construct blob: %w", err)
}

daBatch, err := newDABatchV7(d.Version(), batch.Index, blobVersionedHash, batch.ParentBatchHash, blob, blobBytes, challengeDigest)
if err != nil {
return nil, fmt.Errorf("failed to construct DABatch: %w", err)
}

return daBatch, nil
}

func (d *DACodecV9) constructBlob(batch *Batch) (*kzg4844.Blob, common.Hash, []byte, common.Hash, error) {
blobBytes := make([]byte, blobEnvelopeV7OffsetPayload)

payloadBytes, err := d.constructBlobPayload(batch)
if err != nil {
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to construct blob payload: %w", err)
}

compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes, true /* checkLength */)
if err != nil {
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to check batch compressed data compatibility: %w", err)
}

isCompressedFlag := uint8(0x0)
if enableCompression {
isCompressedFlag = 0x1
payloadBytes = compressedPayloadBytes
}

sizeSlice := encodeSize3Bytes(uint32(len(payloadBytes)))

blobBytes[blobEnvelopeV7OffsetVersion] = uint8(d.Version())
copy(blobBytes[blobEnvelopeV7OffsetByteSize:blobEnvelopeV7OffsetCompressedFlag], sizeSlice)
blobBytes[blobEnvelopeV7OffsetCompressedFlag] = isCompressedFlag
blobBytes = append(blobBytes, payloadBytes...)

if len(blobBytes) > maxEffectiveBlobBytes {
log.Error("ConstructBlob: Blob payload exceeds maximum size", "size", len(blobBytes), "blobBytes", hex.EncodeToString(blobBytes))
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("blob exceeds maximum size: got %d, allowed %d", len(blobBytes), maxEffectiveBlobBytes)
}

// convert raw data to BLSFieldElements
blob, err := makeBlobCanonical(blobBytes)
if err != nil {
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to convert blobBytes to canonical form: %w", err)
}

// compute blob versioned hash
c, err := kzg4844.BlobToCommitment(blob)
if err != nil {
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to create blob commitment: %w", err)
}
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)

// compute challenge digest for codecv7, different from previous versions,
// the blob bytes are padded to the max effective blob size, which is 131072 / 32 * 31 due to the blob encoding
paddedBlobBytes := make([]byte, maxEffectiveBlobBytes)
copy(paddedBlobBytes, blobBytes)

challengeDigest := crypto.Keccak256Hash(crypto.Keccak256(paddedBlobBytes), blobVersionedHash[:])

return blob, blobVersionedHash, blobBytes, challengeDigest, nil
}

// CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk.
func (d *DACodecV9) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) {
// filling the needed fields for the batch used in the check
b := &Batch{
Chunks: []*Chunk{c},
PrevL1MessageQueueHash: c.PrevL1MessageQueueHash,
PostL1MessageQueueHash: c.PostL1MessageQueueHash,
Blocks: c.Blocks,
}

return d.CheckBatchCompressedDataCompatibility(b)
}

// CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch.
func (d *DACodecV9) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) {
if len(b.Blocks) == 0 {
return false, errors.New("batch must contain at least one block")
}

if err := checkBlocksBatchVSChunksConsistency(b); err != nil {
return false, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err)
}

payloadBytes, err := d.constructBlobPayload(b)
if err != nil {
return false, fmt.Errorf("failed to construct blob payload: %w", err)
}

// This check is only used for sanity checks. If the check fails, it means that the compression did not work as expected.
// rollup-relayer will try popping the last chunk of the batch (or last block of the chunk when in proposing chunks) and try again to see if it works as expected.
// Since length check is used for DA and proving efficiency, it does not need to be checked here.
_, compatible, err := d.checkCompressedDataCompatibility(payloadBytes, false /* checkLength */)
if err != nil {
return false, fmt.Errorf("failed to check batch compressed data compatibility: %w", err)
}

return compatible, nil
}

func (d *DACodecV9) estimateL1CommitBatchSizeAndBlobSize(batch *Batch) (uint64, uint64, error) {
if len(batch.Blocks) == 0 {
return 0, 0, errors.New("batch must contain at least one block")
}

blobBytes := make([]byte, blobEnvelopeV7OffsetPayload)

payloadBytes, err := d.constructBlobPayload(batch)
if err != nil {
return 0, 0, fmt.Errorf("failed to construct blob payload: %w", err)
}

compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes, true /* checkLength */)
if err != nil {
return 0, 0, fmt.Errorf("failed to check batch compressed data compatibility: %w", err)
}

if enableCompression {
blobBytes = append(blobBytes, compressedPayloadBytes...)
} else {
blobBytes = append(blobBytes, payloadBytes...)
}

return blobEnvelopeV7OffsetPayload + uint64(len(payloadBytes)), calculatePaddedBlobSize(uint64(len(blobBytes))), nil
}

// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a single chunk.
func (d *DACodecV9) EstimateChunkL1CommitBatchSizeAndBlobSize(chunk *Chunk) (uint64, uint64, error) {
return d.estimateL1CommitBatchSizeAndBlobSize(&Batch{
Blocks: chunk.Blocks,
PrevL1MessageQueueHash: chunk.PrevL1MessageQueueHash,
PostL1MessageQueueHash: chunk.PostL1MessageQueueHash,
})
}

// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a batch.
func (d *DACodecV9) EstimateBatchL1CommitBatchSizeAndBlobSize(batch *Batch) (uint64, uint64, error) {
return d.estimateL1CommitBatchSizeAndBlobSize(batch)
}
67 changes: 63 additions & 4 deletions encoding/da.go
Original file line number Diff line number Diff line change
Expand Up @@ -509,6 +509,61 @@ func checkCompressedDataCompatibilityV7(data []byte) error {
return nil
}

// Sanity check if the compressed data (v9) is compatible with our circuit.
// If we conclude that the data could not be decompressed, then we will
// commit it uncompressed instead.
func checkCompressedDataCompatibilityV9(data []byte) error {
if len(data) < 16 {
return fmt.Errorf("too small size (0x%x), what is it?", data)
}

fheader := data[0]
// it is not the encoding type we expected in our zstd header
if fheader&63 != 32 {
return fmt.Errorf("unexpected header type (%x)", fheader)
}

// skip content size
switch fheader >> 6 {
case 0:
data = data[2:]
case 1:
data = data[3:]
case 2:
data = data[5:]
case 3:
data = data[9:]
default:
panic("impossible")
}

isLast := false
// scan each block until done
for len(data) > 3 && !isLast {
isLast = (data[0] & 1) == 1
blkType := (data[0] >> 1) & 3
var blkSize uint
if blkType == 1 { // RLE Block
blkSize = 1
} else {
if blkType == 3 {
return fmt.Errorf("encounter reserved block type at %v", data)
}
blkSize = (uint(data[2])*65536 + uint(data[1])*256 + uint(data[0])) >> 3
}
if len(data) < 3+int(blkSize) {
return fmt.Errorf("wrong data len {%d}, expect min {%d}", len(data), 3+blkSize)
}
data = data[3+blkSize:]
}

if !isLast {
return fmt.Errorf("unexpected end before last block")
}

return nil
}

// makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.
// The canonical blob representation is a 32-byte array where every 31 bytes are prepended with 1 zero byte.
// The kzg4844.Blob is a 4096-byte array, thus 0s are padded to the end of the array.
Expand Down Expand Up @@ -768,8 +823,10 @@ func GetHardforkName(config *params.ChainConfig, blockHeight, blockTimestamp uin
return "euclid"
} else if !config.IsFeynman(blockTimestamp) {
return "euclidV2"
} else {
} else if !config.IsGalileo(blockTimestamp) {
return "feynman"
} else {
return "galileo"
}
}

Expand All @@ -791,8 +848,10 @@ func GetCodecVersion(config *params.ChainConfig, blockHeight, blockTimestamp uin
return CodecV6
} else if !config.IsFeynman(blockTimestamp) {
return CodecV7
} else {
} else if !config.IsGalileo(blockTimestamp) {
return CodecV8
} else {
return CodecV9
}
}

Expand Down Expand Up @@ -821,7 +880,7 @@ func GetChunkEnableCompression(codecVersion CodecVersion, chunk *Chunk) (bool, e
return false, nil
case CodecV2, CodecV3:
return true, nil
case CodecV4, CodecV5, CodecV6, CodecV7, CodecV8:
case CodecV4, CodecV5, CodecV6, CodecV7, CodecV8, CodecV9:
return CheckChunkCompressedDataCompatibility(chunk, codecVersion)
default:
return false, fmt.Errorf("unsupported codec version: %v", codecVersion)
Expand All @@ -835,7 +894,7 @@ func GetBatchEnableCompression(codecVersion CodecVersion, batch *Batch) (bool, e
return false, nil
case CodecV2, CodecV3:
return true, nil
case CodecV4, CodecV5, CodecV6, CodecV7, CodecV8:
case CodecV4, CodecV5, CodecV6, CodecV7, CodecV8, CodecV9:
return CheckBatchCompressedDataCompatibility(batch, codecVersion)
default:
return false, fmt.Errorf("unsupported codec version: %v", codecVersion)
Expand Down
7 changes: 6 additions & 1 deletion encoding/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ const (
CodecV6
CodecV7
CodecV8
CodecV9
)

// CodecFromVersion returns the appropriate codec for the given version.
Expand All @@ -117,14 +118,18 @@ func CodecFromVersion(version CodecVersion) (Codec, error) {
return &DACodecV7{}, nil
case CodecV8:
return NewDACodecV8(), nil
case CodecV9:
return NewDACodecV9(), nil
default:
return nil, fmt.Errorf("unsupported codec version: %v", version)
}
}

// CodecFromConfig determines and returns the appropriate codec based on chain configuration, block number, and timestamp.
func CodecFromConfig(chainCfg *params.ChainConfig, startBlockNumber *big.Int, startBlockTimestamp uint64) Codec {
if chainCfg.IsFeynman(startBlockTimestamp) {
if chainCfg.IsGalileo(startBlockTimestamp) {
return NewDACodecV9()
} else if chainCfg.IsFeynman(startBlockTimestamp) {
return NewDACodecV8()
} else if chainCfg.IsEuclidV2(startBlockTimestamp) {
return &DACodecV7{}
Expand Down
Loading