Skip to content

Commit

Permalink
Merge pull request #2383 from OffchainLabs/das-fix-l1SyncService
Browse files Browse the repository at this point in the history
l1SyncService should sync full not chunked payload
  • Loading branch information
Tristan-Wilson authored Jun 14, 2024
2 parents 52cbd28 + ead80cf commit be6fd2f
Showing 1 changed file with 12 additions and 27 deletions.
39 changes: 12 additions & 27 deletions das/syncing_fallback_storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ package das
import (
"context"
"encoding/binary"
"errors"
"fmt"
"math"
"math/big"
Expand Down Expand Up @@ -57,7 +56,6 @@ func init() {
}

type SyncToStorageConfig struct {
CheckAlreadyExists bool `koanf:"check-already-exists"`
Eager bool `koanf:"eager"`
EagerLowerBoundBlock uint64 `koanf:"eager-lower-bound-block"`
RetentionPeriod time.Duration `koanf:"retention-period"`
Expand All @@ -68,7 +66,6 @@ type SyncToStorageConfig struct {
}

var DefaultSyncToStorageConfig = SyncToStorageConfig{
CheckAlreadyExists: true,
Eager: false,
EagerLowerBoundBlock: 0,
RetentionPeriod: time.Duration(math.MaxInt64),
Expand All @@ -79,7 +76,6 @@ var DefaultSyncToStorageConfig = SyncToStorageConfig{
}

func SyncToStorageConfigAddOptions(prefix string, f *flag.FlagSet) {
f.Bool(prefix+".check-already-exists", DefaultSyncToStorageConfig.CheckAlreadyExists, "check if the data already exists in this DAS's storage. Must be disabled for fast sync with an IPFS backend")
f.Bool(prefix+".eager", DefaultSyncToStorageConfig.Eager, "eagerly sync batch data to this DAS's storage from the rest endpoints, using L1 as the index of batch data hashes; otherwise only sync lazily")
f.Uint64(prefix+".eager-lower-bound-block", DefaultSyncToStorageConfig.EagerLowerBoundBlock, "when eagerly syncing, start indexing forward from this L1 block. Only used if there is no sync state")
f.Uint64(prefix+".parent-chain-blocks-per-read", DefaultSyncToStorageConfig.ParentChainBlocksPerRead, "when eagerly syncing, max l1 blocks to read per poll")
Expand All @@ -106,7 +102,9 @@ type l1SyncService struct {
lastBatchAcc common.Hash
}

const nextBlockNoFilename = "nextBlockNumber"
// The original syncing process had a bug, so the file was renamed to cause any mirrors
// in the wild to re-sync from their configured starting block number.
const nextBlockNoFilename = "nextBlockNumberV2"

func readSyncStateOrDefault(syncDir string, dflt uint64) uint64 {
if syncDir == "" {
Expand Down Expand Up @@ -212,31 +210,18 @@ func (s *l1SyncService) processBatchDelivered(ctx context.Context, batchDelivere
binary.BigEndian.PutUint64(header[32:40], deliveredEvent.AfterDelayedMessagesRead.Uint64())

data = append(header, data...)
preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte)
preimageRecorder := daprovider.RecordPreimagesTo(preimages)
if _, err = daprovider.RecoverPayloadFromDasBatch(ctx, deliveredEvent.BatchSequenceNumber.Uint64(), data, s.dataSource, preimageRecorder, true); err != nil {
if errors.Is(err, daprovider.ErrSeqMsgValidation) {
log.Error(err.Error())
} else {
log.Error("recover payload failed", "txhash", batchDeliveredLog.TxHash, "data", data)
return err
}
var payload []byte
if payload, err = daprovider.RecoverPayloadFromDasBatch(ctx, deliveredEvent.BatchSequenceNumber.Uint64(), data, s.dataSource, nil, true); err != nil {
log.Error("recover payload failed", "txhash", batchDeliveredLog.TxHash, "data", data)
return err
}
for _, preimages := range preimages {
for hash, contents := range preimages {
var err error
if s.config.CheckAlreadyExists {
_, err = s.syncTo.GetByHash(ctx, hash)
}
if err == nil || errors.Is(err, ErrNotFound) {
if err := s.syncTo.Put(ctx, contents, storeUntil); err != nil {
return err
}
} else {
return err
}

if payload != nil {
if err := s.syncTo.Put(ctx, payload, storeUntil); err != nil {
return err
}
}

seqNumber := deliveredEvent.BatchSequenceNumber
if seqNumber == nil {
seqNumber = common.Big0
Expand Down

0 comments on commit be6fd2f

Please sign in to comment.