From 1d70a8e659d6dc1dee5057ba47bd0d92f99d0e59 Mon Sep 17 00:00:00 2001 From: CHAMI Rachid Date: Wed, 27 Mar 2024 02:32:47 +0100 Subject: [PATCH] chore: increase the data commitment blocks limit in the API (#1268) ## Description This is done to support 4 hours batches of attestations in the API without having to make a breaking change #### PR checklist - [ ] Tests written/updated - [ ] Changelog entry added in `.changelog` (we use [unclog](https://github.com/informalsystems/unclog) to manage our changelog) - [ ] Updated relevant documentation (`docs/` or `spec/`) and code comments --- pkg/consts/consts.go | 2 ++ rpc/core/blocks.go | 9 ++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/pkg/consts/consts.go b/pkg/consts/consts.go index 7797aa044b..8f6cb9717b 100644 --- a/pkg/consts/consts.go +++ b/pkg/consts/consts.go @@ -41,5 +41,7 @@ var ( NewBaseHashFunc = sha256.New // DataCommitmentBlocksLimit is the limit to the number of blocks we can generate a data commitment for. + // NOTE: this is no longer used as we're moving towards Blobstream X. However, we're leaving it + // here for backwards compatibility purpose until it's removed in the next breaking release. DataCommitmentBlocksLimit = 1000 ) diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index 14f4757bf5..c6ae1a2571 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -11,7 +11,6 @@ import ( "github.com/cometbft/cometbft/libs/bytes" cmtmath "github.com/cometbft/cometbft/libs/math" cmtquery "github.com/cometbft/cometbft/libs/pubsub/query" - "github.com/cometbft/cometbft/pkg/consts" ctypes "github.com/cometbft/cometbft/rpc/core/types" rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" blockidxnull "github.com/cometbft/cometbft/state/indexer/block/null" @@ -333,6 +332,10 @@ func EncodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { return append(paddedHeight, dataRoot[:]...), nil } +// dataCommitmentBlocksLimit The maximum number of blocks to be used to create a data commitment. +// It's a local parameter to protect the API from creating unnecessarily large commitments. +const dataCommitmentBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-second blocks. + // validateDataCommitmentRange runs basic checks on the asc sorted list of // heights that will be used subsequently in generating data commitments over // the defined set of heights. @@ -342,8 +345,8 @@ func validateDataCommitmentRange(start uint64, end uint64) error { } env := GetEnvironment() heightsRange := end - start - if heightsRange > uint64(consts.DataCommitmentBlocksLimit) { - return fmt.Errorf("the query exceeds the limit of allowed blocks %d", consts.DataCommitmentBlocksLimit) + if heightsRange > uint64(dataCommitmentBlocksLimit) { + return fmt.Errorf("the query exceeds the limit of allowed blocks %d", dataCommitmentBlocksLimit) } if heightsRange == 0 { return fmt.Errorf("cannot create the data commitments for an empty set of blocks")