Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
73 commits
Select commit Hold shift + click to select a range
f3c7603
add ChainID parameter to Header storage
tim-barry Dec 1, 2025
0fe4007
update cluster mutator/snapshot tests
tim-barry Dec 2, 2025
467b732
update header generation in cluster builder tests
tim-barry Dec 2, 2025
585c0e9
update mock usage in epochmgr tests
tim-barry Dec 3, 2025
6d9c389
enable TestExtend_WithReferenceBlockFromClusterChain
tim-barry Dec 3, 2025
927b229
fix FinalizedAncestryLookup during cluster switchover
tim-barry Dec 3, 2025
81ffdeb
Use appropriate height index for header storage
tim-barry Dec 3, 2025
99a2498
Merge branch 'master' into tim/4204-split-header-storage-by-chainid
tim-barry Dec 3, 2025
6253765
Merge branch 'master' into tim/4204-split-header-storage-by-chainid
tim-barry Dec 4, 2025
a22fd0b
introduce sentinel error for incorrect header chain
tim-barry Dec 8, 2025
ee80525
update default ChainID for cluster block fixture in tests
tim-barry Dec 8, 2025
2ecfee1
update tests
tim-barry Dec 8, 2025
6fe94ec
move IsClusterChain to a method on ChainID
tim-barry Dec 12, 2025
2fbe0b5
Add NewClusterHeaders constructor and make chainID checks more explicit
tim-barry Dec 12, 2025
f16ccbf
add chain-specific lock checks for header insertion
tim-barry Dec 12, 2025
c24be76
update determineChainID() and document expected errors
tim-barry Dec 12, 2025
e397124
Fix bug in populateFinalizedAncestryLookup and remove workaround
tim-barry Dec 12, 2025
f53d361
clarify chainID is for consensus
tim-barry Dec 13, 2025
a93fcfa
address some TODOs for initializing storage during util commands
tim-barry Dec 15, 2025
62d9ee0
remove completed TODO
tim-barry Dec 15, 2025
5708e46
rewrite determineChainID() for clarity; move preInitFns before ChainI…
tim-barry Dec 15, 2025
6134b48
use only necessary storage interfaces in read-badger commands
tim-barry Dec 16, 2025
c1af7a8
fixed typos
AlexHentschel Dec 18, 2025
9e506b1
added deprecation notice
AlexHentschel Dec 18, 2025
789ab94
fix reference block in collection finalizer test
tim-barry Dec 17, 2025
3c57826
Add operation.InsertClusterHeader
tim-barry Dec 17, 2025
0e0e37a
update required lock for operation.InsertHeader
tim-barry Dec 17, 2025
dae7d73
rename+move IsClusterID next to CanonicalClusterID; use regex
tim-barry Dec 18, 2025
a6cc833
Consistently use valid/canonical cluster chainIDs in tests
tim-barry Dec 18, 2025
c0bdbac
Merge branch 'master' into tim/4204-split-header-storage-by-chainid
tim-barry Dec 18, 2025
a6a98a6
Apply suggestions from code review
tim-barry Dec 18, 2025
7737e6b
fix lint
tim-barry Dec 18, 2025
d2f2846
rename to `clusterHeaders` where appropriate
tim-barry Dec 18, 2025
8c2f842
continue rename of clusterHeaders vars/fields
tim-barry Dec 19, 2025
d0dd8fd
remove unused field in collection builder test
tim-barry Dec 19, 2025
8ffd89a
update doc comments for Headers storage
tim-barry Dec 19, 2025
3b53515
add some tests for storing/retrieving on headers with wrong chain
tim-barry Dec 20, 2025
6613eda
update error returns for storage.Blocks
tim-barry Dec 20, 2025
ccaa0d1
clarify Headers.ByView not available for cluster blocks
tim-barry Jan 5, 2026
7e28024
test lock holding requirements for storing headers
tim-barry Jan 6, 2026
b1d4cbb
use canonical ClusterIDs in channels_test package
tim-barry Jan 6, 2026
cf5cdc8
add unit test for IsCanonicalClusterID
tim-barry Jan 6, 2026
2a4240a
minor: use unittest.WithLocks in header test
tim-barry Jan 6, 2026
de3e77f
Add IncompleteStateError for missing data required by protocol
tim-barry Jan 6, 2026
7bad925
use exceptions when failing to retrieve ChainID on startup
tim-barry Jan 6, 2026
ceb47b1
rename GetChainIDFromLatestFinalizedHeader -> GetChainID
tim-barry Jan 6, 2026
d49895a
Update storage/store/headers.go
tim-barry Jan 6, 2026
9cfaddd
Merge branch 'master' into tim/4204-split-header-storage-by-chainid
tim-barry Jan 7, 2026
0819804
post-merge: update util cmd for storage changes
tim-barry Jan 7, 2026
d4c6bcc
fix: cluster ref height range for transaction deduplication
tim-barry Jan 7, 2026
00b68f9
minor fixes
tim-barry Jan 7, 2026
ce7f73f
replace panic with error return in NewHeaders storage initialization
tim-barry Jan 7, 2026
c63ad7e
ensure we halt and surface error instead of bootstrapping if state is…
tim-barry Jan 8, 2026
5aec0c8
update comments for epoch first/final height caching
tim-barry Jan 8, 2026
7796d9a
update comments for GetLatestFinalizedHeader
tim-barry Jan 8, 2026
e9eedc4
minor documentation extensions
AlexHentschel Jan 13, 2026
ad56c4f
headers documentation
AlexHentschel Jan 13, 2026
187dbf7
`blocks` documentation
AlexHentschel Jan 13, 2026
355b43f
minor doc polishing
AlexHentschel Jan 13, 2026
e4d1235
Apply suggestions from code review
tim-barry Jan 13, 2026
020aaed
Apply suggestions from code review
tim-barry Jan 13, 2026
1dc2936
fix import
tim-barry Jan 13, 2026
8aecaa1
minor fixes
tim-barry Jan 14, 2026
bc69f0f
Update docs+errors for Header storage
tim-barry Jan 14, 2026
2523aa3
Update docs+errors for blocks/clusterblocks storage
tim-barry Jan 14, 2026
b376fc9
update error returns for failure to retrieve indexed blocks
tim-barry Jan 14, 2026
100cfc9
minor fixups
tim-barry Jan 14, 2026
b2ac2b6
clarify caching of epoch starting height in collection builder
tim-barry Jan 14, 2026
6db6bf6
refactor error returns in Headers.ByParentID
tim-barry Jan 14, 2026
08e65e8
Test Proposal retrieval alongside Block/Header retrieval
tim-barry Jan 15, 2026
5aa5c4d
Merge branch 'master' into tim/4204-split-header-storage-by-chainid
tim-barry Jan 15, 2026
abd6b9f
fix typo
tim-barry Jan 15, 2026
2a14b8d
return IncompleteStateError from GetLatestFinalizedHeader
tim-barry Jan 15, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 7 additions & 4 deletions admin/commands/storage/read_range_cluster_blocks.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"github.com/onflow/flow-go/admin/commands"
"github.com/onflow/flow-go/cmd/util/cmd/read-light-block"
"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/module/metrics"
"github.com/onflow/flow-go/storage"
"github.com/onflow/flow-go/storage/store"
)
Expand All @@ -22,14 +23,12 @@ const Max_Range_Cluster_Block_Limit = uint64(10001)

type ReadRangeClusterBlocksCommand struct {
db storage.DB
headers *store.Headers
payloads *store.ClusterPayloads
}

func NewReadRangeClusterBlocksCommand(db storage.DB, headers *store.Headers, payloads *store.ClusterPayloads) commands.AdminCommand {
func NewReadRangeClusterBlocksCommand(db storage.DB, payloads *store.ClusterPayloads) commands.AdminCommand {
return &ReadRangeClusterBlocksCommand{
db: db,
headers: headers,
payloads: payloads,
}
}
Expand All @@ -51,8 +50,12 @@ func (c *ReadRangeClusterBlocksCommand) Handler(ctx context.Context, req *admin.
return nil, admin.NewInvalidAdminReqErrorf("getting for more than %v blocks at a time might have an impact to node's performance and is not allowed", Max_Range_Cluster_Block_Limit)
}

clusterHeaders, err := store.NewClusterHeaders(metrics.NewNoopCollector(), c.db, flow.ChainID(chainID))
if err != nil {
return nil, err
}
clusterBlocks := store.NewClusterBlocks(
c.db, flow.ChainID(chainID), c.headers, c.payloads,
c.db, flow.ChainID(chainID), clusterHeaders, c.payloads,
)

lights, err := read.ReadClusterLightBlockByHeightRange(clusterBlocks, reqData.startHeight, reqData.endHeight)
Expand Down
4 changes: 2 additions & 2 deletions cmd/collection/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -235,8 +235,8 @@ func main() {
}).
AdminCommand("read-range-cluster-blocks", func(conf *cmd.NodeConfig) commands.AdminCommand {
clusterPayloads := store.NewClusterPayloads(&metrics.NoopCollector{}, conf.ProtocolDB)
headers := store.NewHeaders(&metrics.NoopCollector{}, conf.ProtocolDB)
return storageCommands.NewReadRangeClusterBlocksCommand(conf.ProtocolDB, headers, clusterPayloads)
// defer construction of Headers since the cluster's ChainID is provided by the command
return storageCommands.NewReadRangeClusterBlocksCommand(conf.ProtocolDB, clusterPayloads)
}).
Module("follower distributor", func(node *cmd.NodeConfig) error {
followerDistributor = pubsub.NewFollowerDistributor()
Expand Down
52 changes: 46 additions & 6 deletions cmd/scaffold.go
Original file line number Diff line number Diff line change
Expand Up @@ -1186,8 +1186,45 @@ func (fnb *FlowNodeBuilder) initStorageLockManager() error {
return nil
}

// determineChainID attempts to determine the chain this node is running on
// directly from the database or root snapshot, before storage interfaces have been initialized.
// No errors expected during normal operation.
func (fnb *FlowNodeBuilder) determineChainID() error {
bootstrapped, err := badgerState.IsBootstrapped(fnb.ProtocolDB)
if err != nil {
return err
}
if bootstrapped {
chainID, err := badgerState.GetChainID(fnb.ProtocolDB)
if err != nil {
return err
}
fnb.RootChainID = chainID
} else {
// if no root snapshot is configured, attempt to load the file from disk
var rootSnapshot = fnb.RootSnapshot
if rootSnapshot == nil {
fnb.Logger.Info().Msgf("loading root protocol state snapshot from disk")
rootSnapshot, err = loadRootProtocolSnapshot(fnb.BaseConfig.BootstrapDir)
if err != nil {
return fmt.Errorf("failed to read protocol snapshot from disk: %w", err)
}
}
// retrieve ChainID from the snapshot
sealingSegment, err := rootSnapshot.SealingSegment()
if err != nil {
return fmt.Errorf("failed to read ChainID from root snapshot: %w", err)
}
fnb.RootChainID = sealingSegment.Highest().ChainID
}
return nil
}

func (fnb *FlowNodeBuilder) initStorage() error {
headers := store.NewHeaders(fnb.Metrics.Cache, fnb.ProtocolDB)
headers, err := store.NewHeaders(fnb.Metrics.Cache, fnb.ProtocolDB, fnb.RootChainID)
if err != nil {
return err
}
guarantees := store.NewGuarantees(fnb.Metrics.Cache, fnb.ProtocolDB, fnb.BaseConfig.guaranteesCacheSize,
store.DefaultCacheSize)
seals := store.NewSeals(fnb.Metrics.Cache, fnb.ProtocolDB)
Expand Down Expand Up @@ -1457,7 +1494,6 @@ func (fnb *FlowNodeBuilder) setRootSnapshot(rootSnapshot protocol.Snapshot) erro
return fmt.Errorf("failed to read root QC: %w", err)
}

fnb.RootChainID = fnb.FinalizedRootBlock.ChainID
fnb.SporkID = fnb.RootSnapshot.Params().SporkID()

return nil
Expand Down Expand Up @@ -2081,16 +2117,20 @@ func (fnb *FlowNodeBuilder) onStart() error {
return err
}

if err := fnb.initStorage(); err != nil {
return err
}

for _, f := range fnb.preInitFns {
if err := fnb.handlePreInit(f); err != nil {
return err
}
}

if err := fnb.determineChainID(); err != nil {
return err
}

if err := fnb.initStorage(); err != nil {
return err
}

if err := fnb.initState(); err != nil {
return err
}
Expand Down
10 changes: 8 additions & 2 deletions cmd/util/cmd/common/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (

"github.com/rs/zerolog/log"

"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/module/metrics"
"github.com/onflow/flow-go/storage"
storagebadger "github.com/onflow/flow-go/storage/badger"
Expand Down Expand Up @@ -53,9 +54,14 @@ func IsPebbleFolder(dataDir string) (bool, error) {
return pebblestorage.IsPebbleFolder(dataDir)
}

func InitStorages(db storage.DB) *store.All {
// InitStorages initializes the common storage abstractions used by all node roles (with default cache sizes
// suitable for mainnet). However, no metrics are collected (if you need metrics, use [store.InitAll] directly).
// The chain ID indicates which Flow network the node is operating, referencing the ID of the blockchain
// build by the main consensus, i.e. security nodes (not the chains built by collector clusters).
// No errors are expected during normal operations.
func InitStorages(db storage.DB, chainID flow.ChainID) (*store.All, error) {
metrics := &metrics.NoopCollector{}
return store.InitAll(metrics, db)
return store.InitAll(metrics, db, chainID)
}

// WithStorage runs the given function with the storage depending on the flags.
Expand Down
10 changes: 9 additions & 1 deletion cmd/util/cmd/exec-data-json-export/block_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"github.com/onflow/flow-go/cmd/util/cmd/common"
"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/module/metrics"
badgerstate "github.com/onflow/flow-go/state/protocol/badger"
"github.com/onflow/flow-go/storage"
"github.com/onflow/flow-go/storage/store"
)
Expand All @@ -37,9 +38,16 @@ func ExportBlocks(blockID flow.Identifier, dbPath string, outputPath string) (fl

// traverse backward from the given block (parent block) and fetch by blockHash
err := common.WithStorage(dbPath, func(db storage.DB) error {
chainID, err := badgerstate.GetChainID(db)
if err != nil {
return err
}

cacheMetrics := &metrics.NoopCollector{}
headers := store.NewHeaders(cacheMetrics, db)
headers, err := store.NewHeaders(cacheMetrics, db, chainID)
if err != nil {
return err
}
index := store.NewIndex(cacheMetrics, db)
guarantees := store.NewGuarantees(cacheMetrics, db, store.DefaultCacheSize, store.DefaultCacheSize)
seals := store.NewSeals(cacheMetrics, db)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"github.com/onflow/flow-go/fvm/storage/snapshot"
"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/module/metrics"
badgerstate "github.com/onflow/flow-go/state/protocol/badger"
"github.com/onflow/flow-go/storage"
"github.com/onflow/flow-go/storage/operation"
"github.com/onflow/flow-go/storage/store"
Expand All @@ -26,9 +27,16 @@ func ExportDeltaSnapshots(blockID flow.Identifier, dbPath string, outputPath str

// traverse backward from the given block (parent block) and fetch by blockHash
return common.WithStorage(dbPath, func(db storage.DB) error {
chainID, err := badgerstate.GetChainID(db)
if err != nil {
return err
}

cacheMetrics := &metrics.NoopCollector{}
headers := store.NewHeaders(cacheMetrics, db)
headers, err := store.NewHeaders(cacheMetrics, db, chainID)
if err != nil {
return err
}

activeBlockID := blockID
outputFile := filepath.Join(outputPath, "delta.jsonl")
Expand Down
10 changes: 9 additions & 1 deletion cmd/util/cmd/exec-data-json-export/event_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"github.com/onflow/flow-go/cmd/util/cmd/common"
"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/module/metrics"
badgerstate "github.com/onflow/flow-go/state/protocol/badger"
"github.com/onflow/flow-go/storage"
"github.com/onflow/flow-go/storage/store"
)
Expand All @@ -30,9 +31,16 @@ func ExportEvents(blockID flow.Identifier, dbPath string, outputPath string) err

// traverse backward from the given block (parent block) and fetch by blockHash
return common.WithStorage(dbPath, func(db storage.DB) error {
chainID, err := badgerstate.GetChainID(db)
if err != nil {
return err
}

cacheMetrics := &metrics.NoopCollector{}
headers := store.NewHeaders(cacheMetrics, db)
headers, err := store.NewHeaders(cacheMetrics, db, chainID)
if err != nil {
return err
}
events := store.NewEvents(cacheMetrics, db)
activeBlockID := blockID

Expand Down
10 changes: 9 additions & 1 deletion cmd/util/cmd/exec-data-json-export/result_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"github.com/onflow/flow-go/cmd/util/cmd/common"
"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/module/metrics"
badgerstate "github.com/onflow/flow-go/state/protocol/badger"
"github.com/onflow/flow-go/storage"
"github.com/onflow/flow-go/storage/store"
)
Expand All @@ -28,9 +29,16 @@ func ExportResults(blockID flow.Identifier, dbPath string, outputPath string) er

// traverse backward from the given block (parent block) and fetch by blockHash
return common.WithStorage(dbPath, func(db storage.DB) error {
chainID, err := badgerstate.GetChainID(db)
if err != nil {
return err
}

cacheMetrics := &metrics.NoopCollector{}
headers := store.NewHeaders(cacheMetrics, db)
headers, err := store.NewHeaders(cacheMetrics, db, chainID)
if err != nil {
return err
}
results := store.NewExecutionResults(cacheMetrics, db)
activeBlockID := blockID

Expand Down
11 changes: 10 additions & 1 deletion cmd/util/cmd/exec-data-json-export/transaction_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"github.com/onflow/flow-go/cmd/util/cmd/common"
"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/module/metrics"
badgerstate "github.com/onflow/flow-go/state/protocol/badger"
"github.com/onflow/flow-go/storage/store"
)

Expand Down Expand Up @@ -48,14 +49,22 @@ func ExportExecutedTransactions(blockID flow.Identifier, dbPath string, outputPa
}
defer db.Close()

chainID, err := badgerstate.GetChainID(db)
if err != nil {
return err
}

cacheMetrics := &metrics.NoopCollector{}
index := store.NewIndex(cacheMetrics, db)
guarantees := store.NewGuarantees(cacheMetrics, db, store.DefaultCacheSize, store.DefaultCacheSize)
seals := store.NewSeals(cacheMetrics, db)
results := store.NewExecutionResults(cacheMetrics, db)
receipts := store.NewExecutionReceipts(cacheMetrics, db, results, store.DefaultCacheSize)
transactions := store.NewTransactions(cacheMetrics, db)
headers := store.NewHeaders(cacheMetrics, db)
headers, err := store.NewHeaders(cacheMetrics, db, chainID)
if err != nil {
return err
}
payloads := store.NewPayloads(db, index, guarantees, seals, receipts, results)
blocks := store.NewBlocks(db, headers, payloads)
collections := store.NewCollections(db, transactions)
Expand Down
10 changes: 9 additions & 1 deletion cmd/util/cmd/export-json-transactions/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (

"github.com/onflow/flow-go/cmd/util/cmd/common"
"github.com/onflow/flow-go/cmd/util/cmd/export-json-transactions/transactions"
badgerstate "github.com/onflow/flow-go/state/protocol/badger"
"github.com/onflow/flow-go/storage"
)

Expand Down Expand Up @@ -67,7 +68,14 @@ func ExportTransactions(lockManager lockctx.Manager, dataDir string, outputDir s

// init dependencies
return common.WithStorage(flagDatadir, func(db storage.DB) error {
storages := common.InitStorages(db)
chainID, err := badgerstate.GetChainID(db)
if err != nil {
return err
}
storages, err := common.InitStorages(db, chainID)
if err != nil {
return err
}

state, err := common.OpenProtocolState(lockManager, db, storages)
if err != nil {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@ func TestFindBlockTransactions(t *testing.T) {
)

// prepare dependencies
storages := common.InitStorages(db)
storages, err := common.InitStorages(db, flow.Emulator)
require.NoError(t, err)
payloads, collections := storages.Payloads, storages.Collections
snap4 := &mock.Snapshot{}
snap4.On("Head").Return(b1.ToHeader(), nil)
Expand All @@ -73,7 +74,7 @@ func TestFindBlockTransactions(t *testing.T) {
// store into database
p1 := unittest.ProposalFromBlock(b1)
p2 := unittest.ProposalFromBlock(b2)
err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error {
err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error {
return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error {
err := storages.Blocks.BatchStore(lctx, rw, p1)
if err != nil {
Expand Down
10 changes: 9 additions & 1 deletion cmd/util/cmd/find-inconsistent-result/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/module/block_iterator/latest"
"github.com/onflow/flow-go/state/protocol"
badgerstate "github.com/onflow/flow-go/state/protocol/badger"
"github.com/onflow/flow-go/storage"
)

Expand Down Expand Up @@ -95,7 +96,14 @@ func findFirstMismatch(datadir string, startHeight, endHeight uint64, lockManage

func createStorages(db storage.DB, lockManager lockctx.Manager) (
storage.Headers, storage.ExecutionResults, storage.Seals, protocol.State, error) {
storages := common.InitStorages(db)
chainID, err := badgerstate.GetChainID(db)
if err != nil {
return nil, nil, nil, nil, err
}
storages, err := common.InitStorages(db, chainID)
if err != nil {
return nil, nil, nil, nil, err
}
state, err := common.OpenProtocolState(lockManager, db, storages)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("could not open protocol state: %v", err)
Expand Down
13 changes: 10 additions & 3 deletions cmd/util/cmd/read-badger/cmd/blocks.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"github.com/onflow/flow-go/cmd/util/cmd/common"
"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/module/metrics"
badgerstate "github.com/onflow/flow-go/state/protocol/badger"
"github.com/onflow/flow-go/storage"
"github.com/onflow/flow-go/storage/store"
)
Expand All @@ -28,8 +29,15 @@ var blocksCmd = &cobra.Command{
Short: "get a block by block ID or height",
RunE: func(cmd *cobra.Command, args []string) error {
return common.WithStorage(flagDatadir, func(db storage.DB) error {
cacheMetrics := &metrics.NoopCollector{}
headers := store.NewHeaders(cacheMetrics, db)
chainID, err := badgerstate.GetChainID(db)
if err != nil {
return err
}
cacheMetrics := metrics.NewNoopCollector()
headers, err := store.NewHeaders(cacheMetrics, db, chainID)
if err != nil {
return err
}
index := store.NewIndex(cacheMetrics, db)
guarantees := store.NewGuarantees(cacheMetrics, db, store.DefaultCacheSize, store.DefaultCacheSize)
seals := store.NewSeals(cacheMetrics, db)
Expand All @@ -39,7 +47,6 @@ var blocksCmd = &cobra.Command{
blocks := store.NewBlocks(db, headers, payloads)

var block *flow.Block
var err error

if flagBlockID != "" {
log.Info().Msgf("got flag block id: %s", flagBlockID)
Expand Down
Loading
Loading